1 /* 2 * Device driver optimized for the Symbios/LSI 53C896/53C895A/53C1010 3 * PCI-SCSI controllers. 4 * 5 * Copyright (C) 1999-2000 Gerard Roudier <groudier@club-internet.fr> 6 * 7 * This driver also supports the following Symbios/LSI PCI-SCSI chips: 8 * 53C810A, 53C825A, 53C860, 53C875, 53C876, 53C885, 53C895, 9 * 53C810, 53C815, 53C825 and the 53C1510D is 53C8XX mode. 10 * 11 * 12 * This driver for FreeBSD-CAM is derived from the Linux sym53c8xx driver. 13 * Copyright (C) 1998-1999 Gerard Roudier 14 * 15 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been 16 * a port of the FreeBSD ncr driver to Linux-1.2.13. 17 * 18 * The original ncr driver has been written for 386bsd and FreeBSD by 19 * Wolfgang Stanglmeier <wolf@cologne.de> 20 * Stefan Esser <se@mi.Uni-Koeln.de> 21 * Copyright (C) 1994 Wolfgang Stanglmeier 22 * 23 * The initialisation code, and part of the code that addresses 24 * FreeBSD-CAM services is based on the aic7xxx driver for FreeBSD-CAM 25 * written by Justin T. Gibbs. 26 * 27 * Other major contributions: 28 * 29 * NVRAM detection and reading. 30 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> 31 * 32 *----------------------------------------------------------------------------- 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. The name of the author may not be used to endorse or promote products 43 * derived from this software without specific prior written permission. 44 * 45 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 49 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * SUCH DAMAGE. 56 */ 57 58 /* $FreeBSD$ */ 59 60 #define SYM_DRIVER_NAME "sym-1.5.2-20000430" 61 62 #include <pci.h> 63 #include <stddef.h> /* For offsetof */ 64 65 #include <sys/param.h> 66 /* 67 * Only use the BUS stuff for PCI under FreeBSD 4 and later versions. 68 * Note that the old BUS stuff also works for FreeBSD 4 and spares 69 * about 1.5KB for the driver object file. 70 */ 71 #if __FreeBSD_version >= 400000 72 #define FreeBSD_Bus_Io_Abstraction 73 #define FreeBSD_Bus_Dma_Abstraction 74 #endif 75 76 #include <sys/systm.h> 77 #include <sys/malloc.h> 78 #include <sys/kernel.h> 79 #ifdef FreeBSD_Bus_Io_Abstraction 80 #include <sys/module.h> 81 #include <sys/bus.h> 82 #endif 83 84 #include <sys/proc.h> 85 86 #include <pci/pcireg.h> 87 #include <pci/pcivar.h> 88 89 #include <machine/bus_memio.h> 90 #include <machine/bus_pio.h> 91 #include <machine/bus.h> 92 #ifdef FreeBSD_Bus_Io_Abstraction 93 #include <machine/resource.h> 94 #include <sys/rman.h> 95 #endif 96 #include <machine/clock.h> 97 98 #include <cam/cam.h> 99 #include <cam/cam_ccb.h> 100 #include <cam/cam_sim.h> 101 #include <cam/cam_xpt_sim.h> 102 #include <cam/cam_debug.h> 103 104 #include <cam/scsi/scsi_all.h> 105 #include <cam/scsi/scsi_message.h> 106 107 #include <vm/vm.h> 108 #include <vm/vm_param.h> 109 #include <vm/pmap.h> 110 111 #if 0 112 #include <sys/kernel.h> 113 #include <sys/sysctl.h> 114 #include <vm/vm_extern.h> 115 #endif 116 117 /* Short and quite clear integer types */ 118 typedef int8_t s8; 119 typedef int16_t s16; 120 typedef int32_t s32; 121 typedef u_int8_t u8; 122 typedef u_int16_t u16; 123 typedef u_int32_t u32; 124 125 /* Driver configuration and definitions */ 126 #if 1 127 #include "opt_sym.h" 128 #include <dev/sym/sym_conf.h> 129 #include <dev/sym/sym_defs.h> 130 #include <dev/sym/sym_fw.h> 131 #else 132 #include "ncr.h" /* To know if the ncr has been configured */ 133 #include <pci/sym_conf.h> 134 #include <pci/sym_defs.h> 135 #include <pci/sym_fw.h> 136 #endif 137 138 /* 139 * On x86 architecture, write buffers management does not 140 * reorder writes to memory. So, preventing compiler from 141 * optimizing the code is enough to guarantee some ordering 142 * when the CPU is writing data accessed by the PCI chip. 143 * On Alpha architecture, explicit barriers are to be used. 144 * By the way, the *BSD semantic associates the barrier 145 * with some window on the BUS and the corresponding verbs 146 * are for now unused. What a strangeness. The driver must 147 * ensure that accesses from the CPU to the start and done 148 * queues are not reordered by either the compiler or the 149 * CPU and uses 'volatile' for this purpose. 150 */ 151 152 #ifdef __alpha__ 153 #define MEMORY_BARRIER() alpha_mb() 154 #else /*__i386__*/ 155 #define MEMORY_BARRIER() do { ; } while(0) 156 #endif 157 158 /* 159 * A la VMS/CAM-3 queue management. 160 */ 161 162 typedef struct sym_quehead { 163 struct sym_quehead *flink; /* Forward pointer */ 164 struct sym_quehead *blink; /* Backward pointer */ 165 } SYM_QUEHEAD; 166 167 #define sym_que_init(ptr) do { \ 168 (ptr)->flink = (ptr); (ptr)->blink = (ptr); \ 169 } while (0) 170 171 static __inline struct sym_quehead *sym_que_first(struct sym_quehead *head) 172 { 173 return (head->flink == head) ? 0 : head->flink; 174 } 175 176 static __inline struct sym_quehead *sym_que_last(struct sym_quehead *head) 177 { 178 return (head->blink == head) ? 0 : head->blink; 179 } 180 181 static __inline void __sym_que_add(struct sym_quehead * new, 182 struct sym_quehead * blink, 183 struct sym_quehead * flink) 184 { 185 flink->blink = new; 186 new->flink = flink; 187 new->blink = blink; 188 blink->flink = new; 189 } 190 191 static __inline void __sym_que_del(struct sym_quehead * blink, 192 struct sym_quehead * flink) 193 { 194 flink->blink = blink; 195 blink->flink = flink; 196 } 197 198 static __inline int sym_que_empty(struct sym_quehead *head) 199 { 200 return head->flink == head; 201 } 202 203 static __inline void sym_que_splice(struct sym_quehead *list, 204 struct sym_quehead *head) 205 { 206 struct sym_quehead *first = list->flink; 207 208 if (first != list) { 209 struct sym_quehead *last = list->blink; 210 struct sym_quehead *at = head->flink; 211 212 first->blink = head; 213 head->flink = first; 214 215 last->flink = at; 216 at->blink = last; 217 } 218 } 219 220 #define sym_que_entry(ptr, type, member) \ 221 ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member))) 222 223 224 #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) 225 226 #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink) 227 228 #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) 229 230 static __inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) 231 { 232 struct sym_quehead *elem = head->flink; 233 234 if (elem != head) 235 __sym_que_del(head, elem->flink); 236 else 237 elem = 0; 238 return elem; 239 } 240 241 #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) 242 243 static __inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head) 244 { 245 struct sym_quehead *elem = head->blink; 246 247 if (elem != head) 248 __sym_que_del(elem->blink, head); 249 else 250 elem = 0; 251 return elem; 252 } 253 254 /* 255 * This one may be usefull. 256 */ 257 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ 258 for (qp = (head)->flink; qp != (head); qp = qp->flink) 259 /* 260 * FreeBSD does not offer our kind of queue in the CAM CCB. 261 * So, we have to cast. 262 */ 263 #define sym_qptr(p) ((struct sym_quehead *) (p)) 264 265 /* 266 * Simple bitmap operations. 267 */ 268 #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f))) 269 #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f))) 270 #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f))) 271 272 /* 273 * Number of tasks per device we want to handle. 274 */ 275 #if SYM_CONF_MAX_TAG_ORDER > 8 276 #error "more than 256 tags per logical unit not allowed." 277 #endif 278 #define SYM_CONF_MAX_TASK (1<<SYM_CONF_MAX_TAG_ORDER) 279 280 /* 281 * Donnot use more tasks that we can handle. 282 */ 283 #ifndef SYM_CONF_MAX_TAG 284 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK 285 #endif 286 #if SYM_CONF_MAX_TAG > SYM_CONF_MAX_TASK 287 #undef SYM_CONF_MAX_TAG 288 #define SYM_CONF_MAX_TAG SYM_CONF_MAX_TASK 289 #endif 290 291 /* 292 * This one means 'NO TAG for this job' 293 */ 294 #define NO_TAG (256) 295 296 /* 297 * Number of SCSI targets. 298 */ 299 #if SYM_CONF_MAX_TARGET > 16 300 #error "more than 16 targets not allowed." 301 #endif 302 303 /* 304 * Number of logical units per target. 305 */ 306 #if SYM_CONF_MAX_LUN > 64 307 #error "more than 64 logical units per target not allowed." 308 #endif 309 310 /* 311 * Asynchronous pre-scaler (ns). Shall be 40 for 312 * the SCSI timings to be compliant. 313 */ 314 #define SYM_CONF_MIN_ASYNC (40) 315 316 /* 317 * Number of entries in the START and DONE queues. 318 * 319 * We limit to 1 PAGE in order to succeed allocation of 320 * these queues. Each entry is 8 bytes long (2 DWORDS). 321 */ 322 #ifdef SYM_CONF_MAX_START 323 #define SYM_CONF_MAX_QUEUE (SYM_CONF_MAX_START+2) 324 #else 325 #define SYM_CONF_MAX_QUEUE (7*SYM_CONF_MAX_TASK+2) 326 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) 327 #endif 328 329 #if SYM_CONF_MAX_QUEUE > PAGE_SIZE/8 330 #undef SYM_CONF_MAX_QUEUE 331 #define SYM_CONF_MAX_QUEUE PAGE_SIZE/8 332 #undef SYM_CONF_MAX_START 333 #define SYM_CONF_MAX_START (SYM_CONF_MAX_QUEUE-2) 334 #endif 335 336 /* 337 * For this one, we want a short name :-) 338 */ 339 #define MAX_QUEUE SYM_CONF_MAX_QUEUE 340 341 /* 342 * These ones should have been already defined. 343 */ 344 #ifndef offsetof 345 #define offsetof(t, m) ((size_t) (&((t *)0)->m)) 346 #endif 347 #ifndef MIN 348 #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 349 #endif 350 351 /* 352 * Active debugging tags and verbosity. 353 */ 354 #define DEBUG_ALLOC (0x0001) 355 #define DEBUG_PHASE (0x0002) 356 #define DEBUG_POLL (0x0004) 357 #define DEBUG_QUEUE (0x0008) 358 #define DEBUG_RESULT (0x0010) 359 #define DEBUG_SCATTER (0x0020) 360 #define DEBUG_SCRIPT (0x0040) 361 #define DEBUG_TINY (0x0080) 362 #define DEBUG_TIMING (0x0100) 363 #define DEBUG_NEGO (0x0200) 364 #define DEBUG_TAGS (0x0400) 365 #define DEBUG_POINTER (0x0800) 366 367 #if 0 368 static int sym_debug = 0; 369 #define DEBUG_FLAGS sym_debug 370 #else 371 /* #define DEBUG_FLAGS (0x0631) */ 372 #define DEBUG_FLAGS (0x0000) 373 374 #endif 375 #define sym_verbose (np->verbose) 376 377 /* 378 * Copy from main memory to PCI memory space. 379 */ 380 #ifdef __alpha__ 381 #define memcpy_to_pci(d, s, n) memcpy_toio((u32)(d), (void *)(s), (n)) 382 #else /*__i386__*/ 383 #define memcpy_to_pci(d, s, n) bcopy((s), (void *)(d), (n)) 384 #endif 385 386 /* 387 * Insert a delay in micro-seconds and milli-seconds. 388 */ 389 static void UDELAY(long us) { DELAY(us); } 390 static void MDELAY(long ms) { while (ms--) UDELAY(1000); } 391 392 /* 393 * Simple power of two buddy-like allocator. 394 * 395 * This simple code is not intended to be fast, but to 396 * provide power of 2 aligned memory allocations. 397 * Since the SCRIPTS processor only supplies 8 bit arithmetic, 398 * this allocator allows simple and fast address calculations 399 * from the SCRIPTS code. In addition, cache line alignment 400 * is guaranteed for power of 2 cache line size. 401 * 402 * This allocator has been developped for the Linux sym53c8xx 403 * driver, since this O/S does not provide naturally aligned 404 * allocations. 405 * It has the vertue to allow the driver to use private pages 406 * of memory that will be useful if we ever need to deal with 407 * IO MMU for PCI. 408 */ 409 410 #define MEMO_SHIFT 4 /* 16 bytes minimum memory chunk */ 411 #define MEMO_PAGE_ORDER 0 /* 1 PAGE maximum */ 412 #if 0 413 #define MEMO_FREE_UNUSED /* Free unused pages immediately */ 414 #endif 415 #define MEMO_WARN 1 416 #define MEMO_CLUSTER_SHIFT (PAGE_SHIFT+MEMO_PAGE_ORDER) 417 #define MEMO_CLUSTER_SIZE (1UL << MEMO_CLUSTER_SHIFT) 418 #define MEMO_CLUSTER_MASK (MEMO_CLUSTER_SIZE-1) 419 420 #define get_pages() malloc(MEMO_CLUSTER_SIZE, M_DEVBUF, M_NOWAIT) 421 #define free_pages(p) free((p), M_DEVBUF) 422 423 typedef u_long m_addr_t; /* Enough bits to bit-hack addresses */ 424 425 typedef struct m_link { /* Link between free memory chunks */ 426 struct m_link *next; 427 } m_link_s; 428 429 #ifdef FreeBSD_Bus_Dma_Abstraction 430 typedef struct m_vtob { /* Virtual to Bus address translation */ 431 struct m_vtob *next; 432 bus_dmamap_t dmamap; /* Map for this chunk */ 433 m_addr_t vaddr; /* Virtual address */ 434 m_addr_t baddr; /* Bus physical address */ 435 } m_vtob_s; 436 /* Hash this stuff a bit to speed up translations */ 437 #define VTOB_HASH_SHIFT 5 438 #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) 439 #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) 440 #define VTOB_HASH_CODE(m) \ 441 ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT) & VTOB_HASH_MASK) 442 #endif 443 444 typedef struct m_pool { /* Memory pool of a given kind */ 445 #ifdef FreeBSD_Bus_Dma_Abstraction 446 bus_dma_tag_t dev_dmat; /* Identifies the pool */ 447 bus_dma_tag_t dmat; /* Tag for our fixed allocations */ 448 m_addr_t (*getp)(struct m_pool *); 449 #ifdef MEMO_FREE_UNUSED 450 void (*freep)(struct m_pool *, m_addr_t); 451 #endif 452 #define M_GETP() mp->getp(mp) 453 #define M_FREEP(p) mp->freep(mp, p) 454 int nump; 455 m_vtob_s *(vtob[VTOB_HASH_SIZE]); 456 struct m_pool *next; 457 #else 458 #define M_GETP() get_pages() 459 #define M_FREEP(p) free_pages(p) 460 #endif /* FreeBSD_Bus_Dma_Abstraction */ 461 struct m_link h[MEMO_CLUSTER_SHIFT - MEMO_SHIFT + 1]; 462 } m_pool_s; 463 464 static void *___sym_malloc(m_pool_s *mp, int size) 465 { 466 int i = 0; 467 int s = (1 << MEMO_SHIFT); 468 int j; 469 m_addr_t a; 470 m_link_s *h = mp->h; 471 472 if (size > MEMO_CLUSTER_SIZE) 473 return 0; 474 475 while (size > s) { 476 s <<= 1; 477 ++i; 478 } 479 480 j = i; 481 while (!h[j].next) { 482 if (s == MEMO_CLUSTER_SIZE) { 483 h[j].next = (m_link_s *) M_GETP(); 484 if (h[j].next) 485 h[j].next->next = 0; 486 break; 487 } 488 ++j; 489 s <<= 1; 490 } 491 a = (m_addr_t) h[j].next; 492 if (a) { 493 h[j].next = h[j].next->next; 494 while (j > i) { 495 j -= 1; 496 s >>= 1; 497 h[j].next = (m_link_s *) (a+s); 498 h[j].next->next = 0; 499 } 500 } 501 #ifdef DEBUG 502 printf("___sym_malloc(%d) = %p\n", size, (void *) a); 503 #endif 504 return (void *) a; 505 } 506 507 static void ___sym_mfree(m_pool_s *mp, void *ptr, int size) 508 { 509 int i = 0; 510 int s = (1 << MEMO_SHIFT); 511 m_link_s *q; 512 m_addr_t a, b; 513 m_link_s *h = mp->h; 514 515 #ifdef DEBUG 516 printf("___sym_mfree(%p, %d)\n", ptr, size); 517 #endif 518 519 if (size > MEMO_CLUSTER_SIZE) 520 return; 521 522 while (size > s) { 523 s <<= 1; 524 ++i; 525 } 526 527 a = (m_addr_t) ptr; 528 529 while (1) { 530 #ifdef MEMO_FREE_UNUSED 531 if (s == MEMO_CLUSTER_SIZE) { 532 M_FREEP(a); 533 break; 534 } 535 #endif 536 b = a ^ s; 537 q = &h[i]; 538 while (q->next && q->next != (m_link_s *) b) { 539 q = q->next; 540 } 541 if (!q->next) { 542 ((m_link_s *) a)->next = h[i].next; 543 h[i].next = (m_link_s *) a; 544 break; 545 } 546 q->next = q->next->next; 547 a = a & b; 548 s <<= 1; 549 ++i; 550 } 551 } 552 553 static void *__sym_calloc2(m_pool_s *mp, int size, char *name, int uflags) 554 { 555 void *p; 556 557 p = ___sym_malloc(mp, size); 558 559 if (DEBUG_FLAGS & DEBUG_ALLOC) 560 printf ("new %-10s[%4d] @%p.\n", name, size, p); 561 562 if (p) 563 bzero(p, size); 564 else if (uflags & MEMO_WARN) 565 printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size); 566 567 return p; 568 } 569 570 #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, MEMO_WARN) 571 572 static void __sym_mfree(m_pool_s *mp, void *ptr, int size, char *name) 573 { 574 if (DEBUG_FLAGS & DEBUG_ALLOC) 575 printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr); 576 577 ___sym_mfree(mp, ptr, size); 578 579 } 580 581 /* 582 * Default memory pool we donnot need to involve in DMA. 583 */ 584 #ifndef FreeBSD_Bus_Dma_Abstraction 585 /* 586 * Without the `bus dma abstraction', all the memory is assumed 587 * DMAable and a single pool is all what we need. 588 */ 589 static m_pool_s mp0; 590 591 #else 592 /* 593 * With the `bus dma abstraction', we use a separate pool for 594 * memory we donnot need to involve in DMA. 595 */ 596 static m_addr_t ___mp0_getp(m_pool_s *mp) 597 { 598 m_addr_t m = (m_addr_t) get_pages(); 599 if (m) 600 ++mp->nump; 601 return m; 602 } 603 604 #ifdef MEMO_FREE_UNUSED 605 static void ___mp0_freep(m_pool_s *mp, m_addr_t m) 606 { 607 free_pages(m); 608 --mp->nump; 609 } 610 #endif 611 612 #ifdef MEMO_FREE_UNUSED 613 static m_pool_s mp0 = {0, 0, ___mp0_getp, ___mp0_freep}; 614 #else 615 static m_pool_s mp0 = {0, 0, ___mp0_getp}; 616 #endif 617 618 #endif /* FreeBSD_Bus_Dma_Abstraction */ 619 620 /* 621 * Actual memory allocation routine for non-DMAed memory. 622 */ 623 static void *sym_calloc(int size, char *name) 624 { 625 void *m; 626 /* Lock */ 627 m = __sym_calloc(&mp0, size, name); 628 /* Unlock */ 629 return m; 630 } 631 632 /* 633 * Actual memory allocation routine for non-DMAed memory. 634 */ 635 static void sym_mfree(void *ptr, int size, char *name) 636 { 637 /* Lock */ 638 __sym_mfree(&mp0, ptr, size, name); 639 /* Unlock */ 640 } 641 642 /* 643 * DMAable pools. 644 */ 645 #ifndef FreeBSD_Bus_Dma_Abstraction 646 /* 647 * Without `bus dma abstraction', all the memory is DMAable, and 648 * only a single pool is needed (vtophys() is our friend). 649 */ 650 #define __sym_calloc_dma(b, s, n) sym_calloc(s, n) 651 #define __sym_mfree_dma(b, p, s, n) sym_mfree(p, s, n) 652 #ifdef __alpha__ 653 #define __vtobus(b, p) alpha_XXX_dmamap((vm_offset_t)(p)) 654 #else /*__i386__*/ 655 #define __vtobus(b, p) vtophys(p) 656 #endif 657 658 #else 659 /* 660 * With `bus dma abstraction', we use a separate pool per parent 661 * BUS handle. A reverse table (hashed) is maintained for virtual 662 * to BUS address translation. 663 */ 664 static void getbaddrcb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 665 { 666 bus_addr_t *baddr; 667 baddr = (bus_addr_t *)arg; 668 *baddr = segs->ds_addr; 669 } 670 671 static m_addr_t ___dma_getp(m_pool_s *mp) 672 { 673 m_vtob_s *vbp; 674 void *vaddr = 0; 675 bus_addr_t baddr = 0; 676 677 vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB"); 678 if (!vbp) 679 goto out_err; 680 681 if (bus_dmamem_alloc(mp->dmat, &vaddr, 682 BUS_DMA_NOWAIT, &vbp->dmamap)) 683 goto out_err; 684 bus_dmamap_load(mp->dmat, vbp->dmamap, vaddr, 685 MEMO_CLUSTER_SIZE, getbaddrcb, &baddr, 0); 686 if (baddr) { 687 int hc = VTOB_HASH_CODE(vaddr); 688 vbp->vaddr = (m_addr_t) vaddr; 689 vbp->baddr = (m_addr_t) baddr; 690 vbp->next = mp->vtob[hc]; 691 mp->vtob[hc] = vbp; 692 ++mp->nump; 693 return (m_addr_t) vaddr; 694 } 695 out_err: 696 if (baddr) 697 bus_dmamap_unload(mp->dmat, vbp->dmamap); 698 if (vaddr) 699 bus_dmamem_free(mp->dmat, vaddr, vbp->dmamap); 700 if (vbp->dmamap) 701 bus_dmamap_destroy(mp->dmat, vbp->dmamap); 702 if (vbp) 703 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); 704 return 0; 705 } 706 707 #ifdef MEMO_FREE_UNUSED 708 static void ___dma_freep(m_pool_s *mp, m_addr_t m) 709 { 710 m_vtob_s **vbpp, *vbp; 711 int hc = VTOB_HASH_CODE(m); 712 713 vbpp = &mp->vtob[hc]; 714 while (*vbpp && (*vbpp)->vaddr != m) 715 vbpp = &(*vbpp)->next; 716 if (*vbpp) { 717 vbp = *vbpp; 718 *vbpp = (*vbpp)->next; 719 bus_dmamap_unload(mp->dmat, vbp->dmamap); 720 bus_dmamem_free(mp->dmat, (void *) vbp->vaddr, vbp->dmamap); 721 bus_dmamap_destroy(mp->dmat, vbp->dmamap); 722 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB"); 723 --mp->nump; 724 } 725 } 726 #endif 727 728 static __inline__ m_pool_s *___get_dma_pool(bus_dma_tag_t dev_dmat) 729 { 730 m_pool_s *mp; 731 for (mp = mp0.next; mp && mp->dev_dmat != dev_dmat; mp = mp->next); 732 return mp; 733 } 734 735 static m_pool_s *___cre_dma_pool(bus_dma_tag_t dev_dmat) 736 { 737 m_pool_s *mp = 0; 738 739 mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL"); 740 if (mp) { 741 mp->dev_dmat = dev_dmat; 742 if (!bus_dma_tag_create(dev_dmat, 1, MEMO_CLUSTER_SIZE, 743 BUS_SPACE_MAXADDR_32BIT, 744 BUS_SPACE_MAXADDR_32BIT, 745 NULL, NULL, MEMO_CLUSTER_SIZE, 1, 746 MEMO_CLUSTER_SIZE, 0, &mp->dmat)) { 747 mp->getp = ___dma_getp; 748 #ifdef MEMO_FREE_UNUSED 749 mp->freep = ___dma_freep; 750 #endif 751 mp->next = mp0.next; 752 mp0.next = mp; 753 return mp; 754 } 755 } 756 if (mp) 757 __sym_mfree(&mp0, mp, sizeof(*mp), "MPOOL"); 758 return 0; 759 } 760 761 #ifdef MEMO_FREE_UNUSED 762 static void ___del_dma_pool(m_pool_s *p) 763 { 764 struct m_pool **pp = &mp0.next; 765 766 while (*pp && *pp != p) 767 pp = &(*pp)->next; 768 if (*pp) { 769 *pp = (*pp)->next; 770 bus_dma_tag_destroy(p->dmat); 771 __sym_mfree(&mp0, p, sizeof(*p), "MPOOL"); 772 } 773 } 774 #endif 775 776 static void *__sym_calloc_dma(bus_dma_tag_t dev_dmat, int size, char *name) 777 { 778 struct m_pool *mp; 779 void *m = 0; 780 781 /* Lock */ 782 mp = ___get_dma_pool(dev_dmat); 783 if (!mp) 784 mp = ___cre_dma_pool(dev_dmat); 785 if (mp) 786 m = __sym_calloc(mp, size, name); 787 #ifdef MEMO_FREE_UNUSED 788 if (mp && !mp->nump) 789 ___del_dma_pool(mp); 790 #endif 791 /* Unlock */ 792 793 return m; 794 } 795 796 static void 797 __sym_mfree_dma(bus_dma_tag_t dev_dmat, void *m, int size, char *name) 798 { 799 struct m_pool *mp; 800 801 /* Lock */ 802 mp = ___get_dma_pool(dev_dmat); 803 if (mp) 804 __sym_mfree(mp, m, size, name); 805 #ifdef MEMO_FREE_UNUSED 806 if (mp && !mp->nump) 807 ___del_dma_pool(mp); 808 #endif 809 /* Unlock */ 810 } 811 812 static m_addr_t __vtobus(bus_dma_tag_t dev_dmat, void *m) 813 { 814 m_pool_s *mp; 815 int hc = VTOB_HASH_CODE(m); 816 m_vtob_s *vp = 0; 817 m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK; 818 819 /* Lock */ 820 mp = ___get_dma_pool(dev_dmat); 821 if (mp) { 822 vp = mp->vtob[hc]; 823 while (vp && (m_addr_t) vp->vaddr != a) 824 vp = vp->next; 825 } 826 /* Unlock */ 827 if (!vp) 828 panic("sym: VTOBUS FAILED!\n"); 829 return vp ? vp->baddr + (((m_addr_t) m) - a) : 0; 830 } 831 832 #endif /* FreeBSD_Bus_Dma_Abstraction */ 833 834 /* 835 * Verbs for DMAable memory handling. 836 * The _uvptv_ macro avoids a nasty warning about pointer to volatile 837 * being discarded. 838 */ 839 #define _uvptv_(p) ((void *)((vm_offset_t)(p))) 840 #define _sym_calloc_dma(np, s, n) __sym_calloc_dma(np->bus_dmat, s, n) 841 #define _sym_mfree_dma(np, p, s, n) \ 842 __sym_mfree_dma(np->bus_dmat, _uvptv_(p), s, n) 843 #define sym_calloc_dma(s, n) _sym_calloc_dma(np, s, n) 844 #define sym_mfree_dma(p, s, n) _sym_mfree_dma(np, p, s, n) 845 #define _vtobus(np, p) __vtobus(np->bus_dmat, _uvptv_(p)) 846 #define vtobus(p) _vtobus(np, p) 847 848 849 /* 850 * Print a buffer in hexadecimal format. 851 */ 852 static void sym_printb_hex (u_char *p, int n) 853 { 854 while (n-- > 0) 855 printf (" %x", *p++); 856 } 857 858 /* 859 * Same with a label at beginning and .\n at end. 860 */ 861 static void sym_printl_hex (char *label, u_char *p, int n) 862 { 863 printf ("%s", label); 864 sym_printb_hex (p, n); 865 printf (".\n"); 866 } 867 868 /* 869 * Return a string for SCSI BUS mode. 870 */ 871 static char *sym_scsi_bus_mode(int mode) 872 { 873 switch(mode) { 874 case SMODE_HVD: return "HVD"; 875 case SMODE_SE: return "SE"; 876 case SMODE_LVD: return "LVD"; 877 } 878 return "??"; 879 } 880 881 /* 882 * Some poor sync table that refers to Tekram NVRAM layout. 883 */ 884 #ifdef SYM_CONF_NVRAM_SUPPORT 885 static u_char Tekram_sync[16] = 886 {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10}; 887 #endif 888 889 /* 890 * Union of supported NVRAM formats. 891 */ 892 struct sym_nvram { 893 int type; 894 #define SYM_SYMBIOS_NVRAM (1) 895 #define SYM_TEKRAM_NVRAM (2) 896 #ifdef SYM_CONF_NVRAM_SUPPORT 897 union { 898 Symbios_nvram Symbios; 899 Tekram_nvram Tekram; 900 } data; 901 #endif 902 }; 903 904 /* 905 * This one is hopefully useless, but actually useful. :-) 906 */ 907 #ifndef assert 908 #define assert(expression) { \ 909 if (!(expression)) { \ 910 (void)panic( \ 911 "assertion \"%s\" failed: file \"%s\", line %d\n", \ 912 #expression, \ 913 __FILE__, __LINE__); \ 914 } \ 915 } 916 #endif 917 918 /* 919 * Some provision for a possible big endian support. 920 * By the way some Symbios chips also may support some kind 921 * of big endian byte ordering. 922 * For now, this stuff does not deserve any comments. :) 923 */ 924 925 #define sym_offb(o) (o) 926 #define sym_offw(o) (o) 927 928 #define cpu_to_scr(dw) (dw) 929 #define scr_to_cpu(dw) (dw) 930 931 /* 932 * Access to the controller chip. 933 * 934 * If SYM_CONF_IOMAPPED is defined, the driver will use 935 * normal IOs instead of the MEMORY MAPPED IO method 936 * recommended by PCI specifications. 937 */ 938 939 /* 940 * Define some understable verbs so we will not suffer of 941 * having to deal with the stupid PC tokens for IO. 942 */ 943 #define io_read8(p) scr_to_cpu(inb((p))) 944 #define io_read16(p) scr_to_cpu(inw((p))) 945 #define io_read32(p) scr_to_cpu(inl((p))) 946 #define io_write8(p, v) outb((p), cpu_to_scr(v)) 947 #define io_write16(p, v) outw((p), cpu_to_scr(v)) 948 #define io_write32(p, v) outl((p), cpu_to_scr(v)) 949 950 #ifdef __alpha__ 951 952 #define mmio_read8(a) readb(a) 953 #define mmio_read16(a) readw(a) 954 #define mmio_read32(a) readl(a) 955 #define mmio_write8(a, b) writeb(a, b) 956 #define mmio_write16(a, b) writew(a, b) 957 #define mmio_write32(a, b) writel(a, b) 958 959 #else /*__i386__*/ 960 961 #define mmio_read8(a) scr_to_cpu((*(volatile unsigned char *) (a))) 962 #define mmio_read16(a) scr_to_cpu((*(volatile unsigned short *) (a))) 963 #define mmio_read32(a) scr_to_cpu((*(volatile unsigned int *) (a))) 964 #define mmio_write8(a, b) (*(volatile unsigned char *) (a)) = cpu_to_scr(b) 965 #define mmio_write16(a, b) (*(volatile unsigned short *) (a)) = cpu_to_scr(b) 966 #define mmio_write32(a, b) (*(volatile unsigned int *) (a)) = cpu_to_scr(b) 967 968 #endif 969 970 /* 971 * Normal IO 972 */ 973 #if defined(SYM_CONF_IOMAPPED) 974 975 #define INB_OFF(o) io_read8(np->io_port + sym_offb(o)) 976 #define OUTB_OFF(o, v) io_write8(np->io_port + sym_offb(o), (v)) 977 978 #define INW_OFF(o) io_read16(np->io_port + sym_offw(o)) 979 #define OUTW_OFF(o, v) io_write16(np->io_port + sym_offw(o), (v)) 980 981 #define INL_OFF(o) io_read32(np->io_port + (o)) 982 #define OUTL_OFF(o, v) io_write32(np->io_port + (o), (v)) 983 984 #else /* Memory mapped IO */ 985 986 #define INB_OFF(o) mmio_read8(np->mmio_va + sym_offb(o)) 987 #define OUTB_OFF(o, v) mmio_write8(np->mmio_va + sym_offb(o), (v)) 988 989 #define INW_OFF(o) mmio_read16(np->mmio_va + sym_offw(o)) 990 #define OUTW_OFF(o, v) mmio_write16(np->mmio_va + sym_offw(o), (v)) 991 992 #define INL_OFF(o) mmio_read32(np->mmio_va + (o)) 993 #define OUTL_OFF(o, v) mmio_write32(np->mmio_va + (o), (v)) 994 995 #endif 996 997 /* 998 * Common to both normal IO and MMIO. 999 */ 1000 #define INB(r) INB_OFF(offsetof(struct sym_reg,r)) 1001 #define INW(r) INW_OFF(offsetof(struct sym_reg,r)) 1002 #define INL(r) INL_OFF(offsetof(struct sym_reg,r)) 1003 1004 #define OUTB(r, v) OUTB_OFF(offsetof(struct sym_reg,r), (v)) 1005 #define OUTW(r, v) OUTW_OFF(offsetof(struct sym_reg,r), (v)) 1006 #define OUTL(r, v) OUTL_OFF(offsetof(struct sym_reg,r), (v)) 1007 1008 #define OUTONB(r, m) OUTB(r, INB(r) | (m)) 1009 #define OUTOFFB(r, m) OUTB(r, INB(r) & ~(m)) 1010 #define OUTONW(r, m) OUTW(r, INW(r) | (m)) 1011 #define OUTOFFW(r, m) OUTW(r, INW(r) & ~(m)) 1012 #define OUTONL(r, m) OUTL(r, INL(r) | (m)) 1013 #define OUTOFFL(r, m) OUTL(r, INL(r) & ~(m)) 1014 1015 /* 1016 * Command control block states. 1017 */ 1018 #define HS_IDLE (0) 1019 #define HS_BUSY (1) 1020 #define HS_NEGOTIATE (2) /* sync/wide data transfer*/ 1021 #define HS_DISCONNECT (3) /* Disconnected by target */ 1022 #define HS_WAIT (4) /* waiting for resource */ 1023 1024 #define HS_DONEMASK (0x80) 1025 #define HS_COMPLETE (4|HS_DONEMASK) 1026 #define HS_SEL_TIMEOUT (5|HS_DONEMASK) /* Selection timeout */ 1027 #define HS_UNEXPECTED (6|HS_DONEMASK) /* Unexpected disconnect */ 1028 #define HS_COMP_ERR (7|HS_DONEMASK) /* Completed with error */ 1029 1030 /* 1031 * Software Interrupt Codes 1032 */ 1033 #define SIR_BAD_SCSI_STATUS (1) 1034 #define SIR_SEL_ATN_NO_MSG_OUT (2) 1035 #define SIR_MSG_RECEIVED (3) 1036 #define SIR_MSG_WEIRD (4) 1037 #define SIR_NEGO_FAILED (5) 1038 #define SIR_NEGO_PROTO (6) 1039 #define SIR_SCRIPT_STOPPED (7) 1040 #define SIR_REJECT_TO_SEND (8) 1041 #define SIR_SWIDE_OVERRUN (9) 1042 #define SIR_SODL_UNDERRUN (10) 1043 #define SIR_RESEL_NO_MSG_IN (11) 1044 #define SIR_RESEL_NO_IDENTIFY (12) 1045 #define SIR_RESEL_BAD_LUN (13) 1046 #define SIR_TARGET_SELECTED (14) 1047 #define SIR_RESEL_BAD_I_T_L (15) 1048 #define SIR_RESEL_BAD_I_T_L_Q (16) 1049 #define SIR_ABORT_SENT (17) 1050 #define SIR_RESEL_ABORTED (18) 1051 #define SIR_MSG_OUT_DONE (19) 1052 #define SIR_COMPLETE_ERROR (20) 1053 #define SIR_DATA_OVERRUN (21) 1054 #define SIR_BAD_PHASE (22) 1055 #define SIR_MAX (22) 1056 1057 /* 1058 * Extended error bit codes. 1059 * xerr_status field of struct sym_ccb. 1060 */ 1061 #define XE_EXTRA_DATA (1) /* unexpected data phase */ 1062 #define XE_BAD_PHASE (1<<1) /* illegal phase (4/5) */ 1063 #define XE_PARITY_ERR (1<<2) /* unrecovered SCSI parity error */ 1064 #define XE_SODL_UNRUN (1<<3) /* ODD transfer in DATA OUT phase */ 1065 #define XE_SWIDE_OVRUN (1<<4) /* ODD transfer in DATA IN phase */ 1066 1067 /* 1068 * Negotiation status. 1069 * nego_status field of struct sym_ccb. 1070 */ 1071 #define NS_SYNC (1) 1072 #define NS_WIDE (2) 1073 #define NS_PPR (3) 1074 1075 /* 1076 * A CCB hashed table is used to retrieve CCB address 1077 * from DSA value. 1078 */ 1079 #define CCB_HASH_SHIFT 8 1080 #define CCB_HASH_SIZE (1UL << CCB_HASH_SHIFT) 1081 #define CCB_HASH_MASK (CCB_HASH_SIZE-1) 1082 #define CCB_HASH_CODE(dsa) (((dsa) >> 9) & CCB_HASH_MASK) 1083 1084 /* 1085 * Device flags. 1086 */ 1087 #define SYM_DISC_ENABLED (1) 1088 #define SYM_TAGS_ENABLED (1<<1) 1089 #define SYM_SCAN_BOOT_DISABLED (1<<2) 1090 #define SYM_SCAN_LUNS_DISABLED (1<<3) 1091 1092 /* 1093 * Host adapter miscellaneous flags. 1094 */ 1095 #define SYM_AVOID_BUS_RESET (1) 1096 #define SYM_SCAN_TARGETS_HILO (1<<1) 1097 1098 /* 1099 * Device quirks. 1100 * Some devices, for example the CHEETAH 2 LVD, disconnects without 1101 * saving the DATA POINTER then reconnect and terminates the IO. 1102 * On reselection, the automatic RESTORE DATA POINTER makes the 1103 * CURRENT DATA POINTER not point at the end of the IO. 1104 * This behaviour just breaks our calculation of the residual. 1105 * For now, we just force an AUTO SAVE on disconnection and will 1106 * fix that in a further driver version. 1107 */ 1108 #define SYM_QUIRK_AUTOSAVE 1 1109 1110 /* 1111 * Misc. 1112 */ 1113 #define SYM_SNOOP_TIMEOUT (10000000) 1114 #define SYM_PCI_IO PCIR_MAPS 1115 #define SYM_PCI_MMIO (PCIR_MAPS + 4) 1116 #define SYM_PCI_RAM (PCIR_MAPS + 8) 1117 #define SYM_PCI_RAM64 (PCIR_MAPS + 12) 1118 1119 /* 1120 * Back-pointer from the CAM CCB to our data structures. 1121 */ 1122 #define sym_hcb_ptr spriv_ptr0 1123 /* #define sym_ccb_ptr spriv_ptr1 */ 1124 1125 /* 1126 * We mostly have to deal with pointers. 1127 * Thus these typedef's. 1128 */ 1129 typedef struct sym_tcb *tcb_p; 1130 typedef struct sym_lcb *lcb_p; 1131 typedef struct sym_ccb *ccb_p; 1132 typedef struct sym_hcb *hcb_p; 1133 1134 /* 1135 * Gather negotiable parameters value 1136 */ 1137 struct sym_trans { 1138 u8 period; 1139 u8 offset; 1140 u8 width; 1141 u8 options; /* PPR options */ 1142 }; 1143 1144 struct sym_tinfo { 1145 struct sym_trans current; 1146 struct sym_trans goal; 1147 struct sym_trans user; 1148 }; 1149 1150 #define BUS_8_BIT MSG_EXT_WDTR_BUS_8_BIT 1151 #define BUS_16_BIT MSG_EXT_WDTR_BUS_16_BIT 1152 1153 /* 1154 * Global TCB HEADER. 1155 * 1156 * Due to lack of indirect addressing on earlier NCR chips, 1157 * this substructure is copied from the TCB to a global 1158 * address after selection. 1159 * For SYMBIOS chips that support LOAD/STORE this copy is 1160 * not needed and thus not performed. 1161 */ 1162 struct sym_tcbh { 1163 /* 1164 * Scripts bus addresses of LUN table accessed from scripts. 1165 * LUN #0 is a special case, since multi-lun devices are rare, 1166 * and we we want to speed-up the general case and not waste 1167 * resources. 1168 */ 1169 u32 luntbl_sa; /* bus address of this table */ 1170 u32 lun0_sa; /* bus address of LCB #0 */ 1171 /* 1172 * Actual SYNC/WIDE IO registers value for this target. 1173 * 'sval', 'wval' and 'uval' are read from SCRIPTS and 1174 * so have alignment constraints. 1175 */ 1176 /*0*/ u_char uval; /* -> SCNTL4 register */ 1177 /*1*/ u_char sval; /* -> SXFER io register */ 1178 /*2*/ u_char filler1; 1179 /*3*/ u_char wval; /* -> SCNTL3 io register */ 1180 }; 1181 1182 /* 1183 * Target Control Block 1184 */ 1185 struct sym_tcb { 1186 /* 1187 * TCB header. 1188 * Assumed at offset 0. 1189 */ 1190 /*0*/ struct sym_tcbh head; 1191 1192 /* 1193 * LUN table used by the SCRIPTS processor. 1194 * An array of bus addresses is used on reselection. 1195 */ 1196 u32 *luntbl; /* LCBs bus address table */ 1197 1198 /* 1199 * LUN table used by the C code. 1200 */ 1201 lcb_p lun0p; /* LCB of LUN #0 (usual case) */ 1202 #if SYM_CONF_MAX_LUN > 1 1203 lcb_p *lunmp; /* Other LCBs [1..MAX_LUN] */ 1204 #endif 1205 1206 /* 1207 * Bitmap that tells about LUNs that succeeded at least 1208 * 1 IO and therefore assumed to be a real device. 1209 * Avoid useless allocation of the LCB structure. 1210 */ 1211 u32 lun_map[(SYM_CONF_MAX_LUN+31)/32]; 1212 1213 /* 1214 * Bitmap that tells about LUNs that haven't yet an LCB 1215 * allocated (not discovered or LCB allocation failed). 1216 */ 1217 u32 busy0_map[(SYM_CONF_MAX_LUN+31)/32]; 1218 1219 /* 1220 * Transfer capabilities (SIP) 1221 */ 1222 struct sym_tinfo tinfo; 1223 1224 /* 1225 * Keep track of the CCB used for the negotiation in order 1226 * to ensure that only 1 negotiation is queued at a time. 1227 */ 1228 ccb_p nego_cp; /* CCB used for the nego */ 1229 1230 /* 1231 * Set when we want to reset the device. 1232 */ 1233 u_char to_reset; 1234 1235 /* 1236 * Other user settable limits and options. 1237 * These limits are read from the NVRAM if present. 1238 */ 1239 u_char usrflags; 1240 u_short usrtags; 1241 }; 1242 1243 /* 1244 * Global LCB HEADER. 1245 * 1246 * Due to lack of indirect addressing on earlier NCR chips, 1247 * this substructure is copied from the LCB to a global 1248 * address after selection. 1249 * For SYMBIOS chips that support LOAD/STORE this copy is 1250 * not needed and thus not performed. 1251 */ 1252 struct sym_lcbh { 1253 /* 1254 * SCRIPTS address jumped by SCRIPTS on reselection. 1255 * For not probed logical units, this address points to 1256 * SCRIPTS that deal with bad LU handling (must be at 1257 * offset zero of the LCB for that reason). 1258 */ 1259 /*0*/ u32 resel_sa; 1260 1261 /* 1262 * Task (bus address of a CCB) read from SCRIPTS that points 1263 * to the unique ITL nexus allowed to be disconnected. 1264 */ 1265 u32 itl_task_sa; 1266 1267 /* 1268 * Task table bus address (read from SCRIPTS). 1269 */ 1270 u32 itlq_tbl_sa; 1271 }; 1272 1273 /* 1274 * Logical Unit Control Block 1275 */ 1276 struct sym_lcb { 1277 /* 1278 * TCB header. 1279 * Assumed at offset 0. 1280 */ 1281 /*0*/ struct sym_lcbh head; 1282 1283 /* 1284 * Task table read from SCRIPTS that contains pointers to 1285 * ITLQ nexuses. The bus address read from SCRIPTS is 1286 * inside the header. 1287 */ 1288 u32 *itlq_tbl; /* Kernel virtual address */ 1289 1290 /* 1291 * Busy CCBs management. 1292 */ 1293 u_short busy_itlq; /* Number of busy tagged CCBs */ 1294 u_short busy_itl; /* Number of busy untagged CCBs */ 1295 1296 /* 1297 * Circular tag allocation buffer. 1298 */ 1299 u_short ia_tag; /* Tag allocation index */ 1300 u_short if_tag; /* Tag release index */ 1301 u_char *cb_tags; /* Circular tags buffer */ 1302 1303 /* 1304 * Set when we want to clear all tasks. 1305 */ 1306 u_char to_clear; 1307 1308 /* 1309 * Capabilities. 1310 */ 1311 u_char user_flags; 1312 u_char current_flags; 1313 }; 1314 1315 /* 1316 * Action from SCRIPTS on a task. 1317 * Is part of the CCB, but is also used separately to plug 1318 * error handling action to perform from SCRIPTS. 1319 */ 1320 struct sym_actscr { 1321 u32 start; /* Jumped by SCRIPTS after selection */ 1322 u32 restart; /* Jumped by SCRIPTS on relection */ 1323 }; 1324 1325 /* 1326 * Phase mismatch context. 1327 * 1328 * It is part of the CCB and is used as parameters for the 1329 * DATA pointer. We need two contexts to handle correctly the 1330 * SAVED DATA POINTER. 1331 */ 1332 struct sym_pmc { 1333 struct sym_tblmove sg; /* Updated interrupted SG block */ 1334 u32 ret; /* SCRIPT return address */ 1335 }; 1336 1337 /* 1338 * LUN control block lookup. 1339 * We use a direct pointer for LUN #0, and a table of 1340 * pointers which is only allocated for devices that support 1341 * LUN(s) > 0. 1342 */ 1343 #if SYM_CONF_MAX_LUN <= 1 1344 #define sym_lp(np, tp, lun) (!lun) ? (tp)->lun0p : 0 1345 #else 1346 #define sym_lp(np, tp, lun) \ 1347 (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : 0 1348 #endif 1349 1350 /* 1351 * Status are used by the host and the script processor. 1352 * 1353 * The last four bytes (status[4]) are copied to the 1354 * scratchb register (declared as scr0..scr3) just after the 1355 * select/reselect, and copied back just after disconnecting. 1356 * Inside the script the XX_REG are used. 1357 */ 1358 1359 /* 1360 * Last four bytes (script) 1361 */ 1362 #define QU_REG scr0 1363 #define HS_REG scr1 1364 #define HS_PRT nc_scr1 1365 #define SS_REG scr2 1366 #define SS_PRT nc_scr2 1367 #define HF_REG scr3 1368 #define HF_PRT nc_scr3 1369 1370 /* 1371 * Last four bytes (host) 1372 */ 1373 #define actualquirks phys.head.status[0] 1374 #define host_status phys.head.status[1] 1375 #define ssss_status phys.head.status[2] 1376 #define host_flags phys.head.status[3] 1377 1378 /* 1379 * Host flags 1380 */ 1381 #define HF_IN_PM0 1u 1382 #define HF_IN_PM1 (1u<<1) 1383 #define HF_ACT_PM (1u<<2) 1384 #define HF_DP_SAVED (1u<<3) 1385 #define HF_SENSE (1u<<4) 1386 #define HF_EXT_ERR (1u<<5) 1387 #define HF_DATA_IN (1u<<6) 1388 #ifdef SYM_CONF_IARB_SUPPORT 1389 #define HF_HINT_IARB (1u<<7) 1390 #endif 1391 1392 /* 1393 * Global CCB HEADER. 1394 * 1395 * Due to lack of indirect addressing on earlier NCR chips, 1396 * this substructure is copied from the ccb to a global 1397 * address after selection (or reselection) and copied back 1398 * before disconnect. 1399 * For SYMBIOS chips that support LOAD/STORE this copy is 1400 * not needed and thus not performed. 1401 */ 1402 1403 struct sym_ccbh { 1404 /* 1405 * Start and restart SCRIPTS addresses (must be at 0). 1406 */ 1407 /*0*/ struct sym_actscr go; 1408 1409 /* 1410 * SCRIPTS jump address that deal with data pointers. 1411 * 'savep' points to the position in the script responsible 1412 * for the actual transfer of data. 1413 * It's written on reception of a SAVE_DATA_POINTER message. 1414 */ 1415 u32 savep; /* Jump address to saved data pointer */ 1416 u32 lastp; /* SCRIPTS address at end of data */ 1417 u32 goalp; /* Not accessed for now from SCRIPTS */ 1418 1419 /* 1420 * Status fields. 1421 */ 1422 u8 status[4]; 1423 }; 1424 1425 /* 1426 * Data Structure Block 1427 * 1428 * During execution of a ccb by the script processor, the 1429 * DSA (data structure address) register points to this 1430 * substructure of the ccb. 1431 */ 1432 struct sym_dsb { 1433 /* 1434 * CCB header. 1435 * Also Assumed at offset 0 of the sym_ccb structure. 1436 */ 1437 /*0*/ struct sym_ccbh head; 1438 1439 /* 1440 * Phase mismatch contexts. 1441 * We need two to handle correctly the SAVED DATA POINTER. 1442 * MUST BOTH BE AT OFFSET < 256, due to using 8 bit arithmetic 1443 * for address calculation from SCRIPTS. 1444 */ 1445 struct sym_pmc pm0; 1446 struct sym_pmc pm1; 1447 1448 /* 1449 * Table data for Script 1450 */ 1451 struct sym_tblsel select; 1452 struct sym_tblmove smsg; 1453 struct sym_tblmove smsg_ext; 1454 struct sym_tblmove cmd; 1455 struct sym_tblmove sense; 1456 struct sym_tblmove wresid; 1457 struct sym_tblmove data [SYM_CONF_MAX_SG]; 1458 }; 1459 1460 /* 1461 * Our Command Control Block 1462 */ 1463 struct sym_ccb { 1464 /* 1465 * This is the data structure which is pointed by the DSA 1466 * register when it is executed by the script processor. 1467 * It must be the first entry. 1468 */ 1469 struct sym_dsb phys; 1470 1471 /* 1472 * Pointer to CAM ccb and related stuff. 1473 */ 1474 union ccb *cam_ccb; /* CAM scsiio ccb */ 1475 u8 cdb_buf[16]; /* Copy of CDB */ 1476 u8 *sns_bbuf; /* Bounce buffer for sense data */ 1477 #define SYM_SNS_BBUF_LEN sizeof(struct scsi_sense_data) 1478 int data_len; /* Total data length */ 1479 int segments; /* Number of SG segments */ 1480 1481 /* 1482 * Miscellaneous status'. 1483 */ 1484 u_char nego_status; /* Negotiation status */ 1485 u_char xerr_status; /* Extended error flags */ 1486 u32 extra_bytes; /* Extraneous bytes transferred */ 1487 1488 /* 1489 * Message areas. 1490 * We prepare a message to be sent after selection. 1491 * We may use a second one if the command is rescheduled 1492 * due to CHECK_CONDITION or COMMAND TERMINATED. 1493 * Contents are IDENTIFY and SIMPLE_TAG. 1494 * While negotiating sync or wide transfer, 1495 * a SDTR or WDTR message is appended. 1496 */ 1497 u_char scsi_smsg [12]; 1498 u_char scsi_smsg2[12]; 1499 1500 /* 1501 * Auto request sense related fields. 1502 */ 1503 u_char sensecmd[6]; /* Request Sense command */ 1504 u_char sv_scsi_status; /* Saved SCSI status */ 1505 u_char sv_xerr_status; /* Saved extended status */ 1506 int sv_resid; /* Saved residual */ 1507 1508 /* 1509 * Map for the DMA of user data. 1510 */ 1511 #ifdef FreeBSD_Bus_Dma_Abstraction 1512 void *arg; /* Argument for some callback */ 1513 bus_dmamap_t dmamap; /* DMA map for user data */ 1514 u_char dmamapped; 1515 #define SYM_DMA_NONE 0 1516 #define SYM_DMA_READ 1 1517 #define SYM_DMA_WRITE 2 1518 #endif 1519 /* 1520 * Other fields. 1521 */ 1522 u_long ccb_ba; /* BUS address of this CCB */ 1523 u_short tag; /* Tag for this transfer */ 1524 /* NO_TAG means no tag */ 1525 u_char target; 1526 u_char lun; 1527 ccb_p link_ccbh; /* Host adapter CCB hash chain */ 1528 SYM_QUEHEAD 1529 link_ccbq; /* Link to free/busy CCB queue */ 1530 u32 startp; /* Initial data pointer */ 1531 int ext_sg; /* Extreme data pointer, used */ 1532 int ext_ofs; /* to calculate the residual. */ 1533 u_char to_abort; /* Want this IO to be aborted */ 1534 }; 1535 1536 #define CCB_BA(cp,lbl) (cp->ccb_ba + offsetof(struct sym_ccb, lbl)) 1537 1538 /* 1539 * Host Control Block 1540 */ 1541 struct sym_hcb { 1542 /* 1543 * Global headers. 1544 * Due to poorness of addressing capabilities, earlier 1545 * chips (810, 815, 825) copy part of the data structures 1546 * (CCB, TCB and LCB) in fixed areas. 1547 */ 1548 #ifdef SYM_CONF_GENERIC_SUPPORT 1549 struct sym_ccbh ccb_head; 1550 struct sym_tcbh tcb_head; 1551 struct sym_lcbh lcb_head; 1552 #endif 1553 /* 1554 * Idle task and invalid task actions and 1555 * their bus addresses. 1556 */ 1557 struct sym_actscr idletask, notask, bad_itl, bad_itlq; 1558 vm_offset_t idletask_ba, notask_ba, bad_itl_ba, bad_itlq_ba; 1559 1560 /* 1561 * Dummy lun table to protect us against target 1562 * returning bad lun number on reselection. 1563 */ 1564 u32 *badluntbl; /* Table physical address */ 1565 u32 badlun_sa; /* SCRIPT handler BUS address */ 1566 1567 /* 1568 * Bus address of this host control block. 1569 */ 1570 u32 hcb_ba; 1571 1572 /* 1573 * Bit 32-63 of the on-chip RAM bus address in LE format. 1574 * The START_RAM64 script loads the MMRS and MMWS from this 1575 * field. 1576 */ 1577 u32 scr_ram_seg; 1578 1579 /* 1580 * Chip and controller indentification. 1581 */ 1582 #ifdef FreeBSD_Bus_Io_Abstraction 1583 device_t device; 1584 #else 1585 pcici_t pci_tag; 1586 #endif 1587 int unit; 1588 char inst_name[8]; 1589 1590 /* 1591 * Initial value of some IO register bits. 1592 * These values are assumed to have been set by BIOS, and may 1593 * be used to probe adapter implementation differences. 1594 */ 1595 u_char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4, 1596 sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_scntl4, 1597 sv_stest1; 1598 1599 /* 1600 * Actual initial value of IO register bits used by the 1601 * driver. They are loaded at initialisation according to 1602 * features that are to be enabled/disabled. 1603 */ 1604 u_char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, 1605 rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4; 1606 1607 /* 1608 * Target data used by the CPU. 1609 */ 1610 struct sym_tcb target[SYM_CONF_MAX_TARGET]; 1611 1612 /* 1613 * Target control block bus address array used by the SCRIPT 1614 * on reselection. 1615 */ 1616 u32 *targtbl; 1617 u32 targtbl_ba; 1618 1619 /* 1620 * CAM SIM information for this instance. 1621 */ 1622 struct cam_sim *sim; 1623 struct cam_path *path; 1624 1625 /* 1626 * Allocated hardware resources. 1627 */ 1628 #ifdef FreeBSD_Bus_Io_Abstraction 1629 struct resource *irq_res; 1630 struct resource *io_res; 1631 struct resource *mmio_res; 1632 struct resource *ram_res; 1633 int ram_id; 1634 void *intr; 1635 #endif 1636 1637 /* 1638 * Bus stuff. 1639 * 1640 * My understanding of PCI is that all agents must share the 1641 * same addressing range and model. 1642 * But some hardware architecture guys provide complex and 1643 * brain-deaded stuff that makes shit. 1644 * This driver only support PCI compliant implementations and 1645 * deals with part of the BUS stuff complexity only to fit O/S 1646 * requirements. 1647 */ 1648 #ifdef FreeBSD_Bus_Io_Abstraction 1649 bus_space_handle_t io_bsh; 1650 bus_space_tag_t io_tag; 1651 bus_space_handle_t mmio_bsh; 1652 bus_space_tag_t mmio_tag; 1653 bus_space_handle_t ram_bsh; 1654 bus_space_tag_t ram_tag; 1655 #endif 1656 1657 /* 1658 * DMA stuff. 1659 */ 1660 #ifdef FreeBSD_Bus_Dma_Abstraction 1661 bus_dma_tag_t bus_dmat; /* DMA tag from parent BUS */ 1662 bus_dma_tag_t data_dmat; /* DMA tag for user data */ 1663 #endif 1664 /* 1665 * Virtual and physical bus addresses of the chip. 1666 */ 1667 vm_offset_t mmio_va; /* MMIO kernel virtual address */ 1668 vm_offset_t mmio_pa; /* MMIO CPU physical address */ 1669 vm_offset_t mmio_ba; /* MMIO BUS address */ 1670 int mmio_ws; /* MMIO Window size */ 1671 1672 vm_offset_t ram_va; /* RAM kernel virtual address */ 1673 vm_offset_t ram_pa; /* RAM CPU physical address */ 1674 vm_offset_t ram_ba; /* RAM BUS address */ 1675 int ram_ws; /* RAM window size */ 1676 u32 io_port; /* IO port address */ 1677 1678 /* 1679 * SCRIPTS virtual and physical bus addresses. 1680 * 'script' is loaded in the on-chip RAM if present. 1681 * 'scripth' stays in main memory for all chips except the 1682 * 53C895A, 53C896 and 53C1010 that provide 8K on-chip RAM. 1683 */ 1684 u_char *scripta0; /* Copies of script and scripth */ 1685 u_char *scriptb0; /* Copies of script and scripth */ 1686 vm_offset_t scripta_ba; /* Actual script and scripth */ 1687 vm_offset_t scriptb_ba; /* bus addresses. */ 1688 vm_offset_t scriptb0_ba; 1689 u_short scripta_sz; /* Actual size of script A */ 1690 u_short scriptb_sz; /* Actual size of script B */ 1691 1692 /* 1693 * Bus addresses, setup and patch methods for 1694 * the selected firmware. 1695 */ 1696 struct sym_fwa_ba fwa_bas; /* Useful SCRIPTA bus addresses */ 1697 struct sym_fwb_ba fwb_bas; /* Useful SCRIPTB bus addresses */ 1698 void (*fw_setup)(hcb_p np, struct sym_fw *fw); 1699 void (*fw_patch)(hcb_p np); 1700 char *fw_name; 1701 1702 /* 1703 * General controller parameters and configuration. 1704 */ 1705 u_short device_id; /* PCI device id */ 1706 u_char revision_id; /* PCI device revision id */ 1707 u_int features; /* Chip features map */ 1708 u_char myaddr; /* SCSI id of the adapter */ 1709 u_char maxburst; /* log base 2 of dwords burst */ 1710 u_char maxwide; /* Maximum transfer width */ 1711 u_char minsync; /* Min sync period factor (ST) */ 1712 u_char maxsync; /* Max sync period factor (ST) */ 1713 u_char minsync_dt; /* Min sync period factor (DT) */ 1714 u_char maxsync_dt; /* Max sync period factor (DT) */ 1715 u_char maxoffs; /* Max scsi offset */ 1716 u_char multiplier; /* Clock multiplier (1,2,4) */ 1717 u_char clock_divn; /* Number of clock divisors */ 1718 u_long clock_khz; /* SCSI clock frequency in KHz */ 1719 1720 /* 1721 * Start queue management. 1722 * It is filled up by the host processor and accessed by the 1723 * SCRIPTS processor in order to start SCSI commands. 1724 */ 1725 volatile /* Prevent code optimizations */ 1726 u32 *squeue; /* Start queue virtual address */ 1727 u32 squeue_ba; /* Start queue BUS address */ 1728 u_short squeueput; /* Next free slot of the queue */ 1729 u_short actccbs; /* Number of allocated CCBs */ 1730 1731 /* 1732 * Command completion queue. 1733 * It is the same size as the start queue to avoid overflow. 1734 */ 1735 u_short dqueueget; /* Next position to scan */ 1736 volatile /* Prevent code optimizations */ 1737 u32 *dqueue; /* Completion (done) queue */ 1738 u32 dqueue_ba; /* Done queue BUS address */ 1739 1740 /* 1741 * Miscellaneous buffers accessed by the scripts-processor. 1742 * They shall be DWORD aligned, because they may be read or 1743 * written with a script command. 1744 */ 1745 u_char msgout[8]; /* Buffer for MESSAGE OUT */ 1746 u_char msgin [8]; /* Buffer for MESSAGE IN */ 1747 u32 lastmsg; /* Last SCSI message sent */ 1748 u_char scratch; /* Scratch for SCSI receive */ 1749 1750 /* 1751 * Miscellaneous configuration and status parameters. 1752 */ 1753 u_char usrflags; /* Miscellaneous user flags */ 1754 u_char scsi_mode; /* Current SCSI BUS mode */ 1755 u_char verbose; /* Verbosity for this controller*/ 1756 u32 cache; /* Used for cache test at init. */ 1757 1758 /* 1759 * CCB lists and queue. 1760 */ 1761 ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */ 1762 SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */ 1763 SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */ 1764 1765 /* 1766 * During error handling and/or recovery, 1767 * active CCBs that are to be completed with 1768 * error or requeued are moved from the busy_ccbq 1769 * to the comp_ccbq prior to completion. 1770 */ 1771 SYM_QUEHEAD comp_ccbq; 1772 1773 /* 1774 * CAM CCB pending queue. 1775 */ 1776 SYM_QUEHEAD cam_ccbq; 1777 1778 /* 1779 * IMMEDIATE ARBITRATION (IARB) control. 1780 * 1781 * We keep track in 'last_cp' of the last CCB that has been 1782 * queued to the SCRIPTS processor and clear 'last_cp' when 1783 * this CCB completes. If last_cp is not zero at the moment 1784 * we queue a new CCB, we set a flag in 'last_cp' that is 1785 * used by the SCRIPTS as a hint for setting IARB. 1786 * We donnot set more than 'iarb_max' consecutive hints for 1787 * IARB in order to leave devices a chance to reselect. 1788 * By the way, any non zero value of 'iarb_max' is unfair. :) 1789 */ 1790 #ifdef SYM_CONF_IARB_SUPPORT 1791 u_short iarb_max; /* Max. # consecutive IARB hints*/ 1792 u_short iarb_count; /* Actual # of these hints */ 1793 ccb_p last_cp; 1794 #endif 1795 1796 /* 1797 * Command abort handling. 1798 * We need to synchronize tightly with the SCRIPTS 1799 * processor in order to handle things correctly. 1800 */ 1801 u_char abrt_msg[4]; /* Message to send buffer */ 1802 struct sym_tblmove abrt_tbl; /* Table for the MOV of it */ 1803 struct sym_tblsel abrt_sel; /* Sync params for selection */ 1804 u_char istat_sem; /* Tells the chip to stop (SEM) */ 1805 }; 1806 1807 #define HCB_BA(np, lbl) (np->hcb_ba + offsetof(struct sym_hcb, lbl)) 1808 1809 /* 1810 * Return the name of the controller. 1811 */ 1812 static __inline char *sym_name(hcb_p np) 1813 { 1814 return np->inst_name; 1815 } 1816 1817 /*--------------------------------------------------------------------------*/ 1818 /*------------------------------ FIRMWARES ---------------------------------*/ 1819 /*--------------------------------------------------------------------------*/ 1820 1821 /* 1822 * This stuff will be moved to a separate source file when 1823 * the driver will be broken into several source modules. 1824 */ 1825 1826 /* 1827 * Macros used for all firmwares. 1828 */ 1829 #define SYM_GEN_A(s, label) ((short) offsetof(s, label)), 1830 #define SYM_GEN_B(s, label) ((short) offsetof(s, label)), 1831 #define PADDR_A(label) SYM_GEN_PADDR_A(struct SYM_FWA_SCR, label) 1832 #define PADDR_B(label) SYM_GEN_PADDR_B(struct SYM_FWB_SCR, label) 1833 1834 1835 #ifdef SYM_CONF_GENERIC_SUPPORT 1836 /* 1837 * Allocate firmware #1 script area. 1838 */ 1839 #define SYM_FWA_SCR sym_fw1a_scr 1840 #define SYM_FWB_SCR sym_fw1b_scr 1841 #include <dev/sym/sym_fw1.h> 1842 struct sym_fwa_ofs sym_fw1a_ofs = { 1843 SYM_GEN_FW_A(struct SYM_FWA_SCR) 1844 }; 1845 struct sym_fwb_ofs sym_fw1b_ofs = { 1846 SYM_GEN_FW_B(struct SYM_FWB_SCR) 1847 }; 1848 #undef SYM_FWA_SCR 1849 #undef SYM_FWB_SCR 1850 #endif /* SYM_CONF_GENERIC_SUPPORT */ 1851 1852 /* 1853 * Allocate firmware #2 script area. 1854 */ 1855 #define SYM_FWA_SCR sym_fw2a_scr 1856 #define SYM_FWB_SCR sym_fw2b_scr 1857 #include <dev/sym/sym_fw2.h> 1858 struct sym_fwa_ofs sym_fw2a_ofs = { 1859 SYM_GEN_FW_A(struct SYM_FWA_SCR) 1860 }; 1861 struct sym_fwb_ofs sym_fw2b_ofs = { 1862 SYM_GEN_FW_B(struct SYM_FWB_SCR) 1863 SYM_GEN_B(struct SYM_FWB_SCR, start64) 1864 SYM_GEN_B(struct SYM_FWB_SCR, pm_handle) 1865 }; 1866 #undef SYM_FWA_SCR 1867 #undef SYM_FWB_SCR 1868 1869 #undef SYM_GEN_A 1870 #undef SYM_GEN_B 1871 #undef PADDR_A 1872 #undef PADDR_B 1873 1874 #ifdef SYM_CONF_GENERIC_SUPPORT 1875 /* 1876 * Patch routine for firmware #1. 1877 */ 1878 static void 1879 sym_fw1_patch(hcb_p np) 1880 { 1881 struct sym_fw1a_scr *scripta0; 1882 struct sym_fw1b_scr *scriptb0; 1883 1884 scripta0 = (struct sym_fw1a_scr *) np->scripta0; 1885 scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; 1886 1887 /* 1888 * Remove LED support if not needed. 1889 */ 1890 if (!(np->features & FE_LED0)) { 1891 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); 1892 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); 1893 scripta0->start[0] = cpu_to_scr(SCR_NO_OP); 1894 } 1895 1896 #ifdef SYM_CONF_IARB_SUPPORT 1897 /* 1898 * If user does not want to use IMMEDIATE ARBITRATION 1899 * when we are reselected while attempting to arbitrate, 1900 * patch the SCRIPTS accordingly with a SCRIPT NO_OP. 1901 */ 1902 if (!SYM_CONF_SET_IARB_ON_ARB_LOST) 1903 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); 1904 #endif 1905 /* 1906 * Patch some data in SCRIPTS. 1907 * - start and done queue initial bus address. 1908 * - target bus address table bus address. 1909 */ 1910 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); 1911 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); 1912 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); 1913 } 1914 #endif /* SYM_CONF_GENERIC_SUPPORT */ 1915 1916 /* 1917 * Patch routine for firmware 2. 1918 */ 1919 static void 1920 sym_fw2_patch(hcb_p np) 1921 { 1922 struct sym_fw2a_scr *scripta0; 1923 struct sym_fw2b_scr *scriptb0; 1924 1925 scripta0 = (struct sym_fw2a_scr *) np->scripta0; 1926 scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; 1927 1928 /* 1929 * Remove LED support if not needed. 1930 */ 1931 if (!(np->features & FE_LED0)) { 1932 scripta0->idle[0] = cpu_to_scr(SCR_NO_OP); 1933 scripta0->reselected[0] = cpu_to_scr(SCR_NO_OP); 1934 scripta0->start[0] = cpu_to_scr(SCR_NO_OP); 1935 } 1936 1937 #ifdef SYM_CONF_IARB_SUPPORT 1938 /* 1939 * If user does not want to use IMMEDIATE ARBITRATION 1940 * when we are reselected while attempting to arbitrate, 1941 * patch the SCRIPTS accordingly with a SCRIPT NO_OP. 1942 */ 1943 if (!SYM_CONF_SET_IARB_ON_ARB_LOST) 1944 scripta0->ungetjob[0] = cpu_to_scr(SCR_NO_OP); 1945 #endif 1946 /* 1947 * Patch some variable in SCRIPTS. 1948 * - start and done queue initial bus address. 1949 * - target bus address table bus address. 1950 */ 1951 scriptb0->startpos[0] = cpu_to_scr(np->squeue_ba); 1952 scriptb0->done_pos[0] = cpu_to_scr(np->dqueue_ba); 1953 scriptb0->targtbl[0] = cpu_to_scr(np->targtbl_ba); 1954 1955 /* 1956 * Remove the load of SCNTL4 on reselection if not a C10. 1957 */ 1958 if (!(np->features & FE_C10)) { 1959 scripta0->resel_scntl4[0] = cpu_to_scr(SCR_NO_OP); 1960 scripta0->resel_scntl4[1] = cpu_to_scr(0); 1961 } 1962 1963 /* 1964 * Patch some other variables in SCRIPTS. 1965 * These ones are loaded by the SCRIPTS processor. 1966 */ 1967 scriptb0->pm0_data_addr[0] = 1968 cpu_to_scr(np->scripta_ba + 1969 offsetof(struct sym_fw2a_scr, pm0_data)); 1970 scriptb0->pm1_data_addr[0] = 1971 cpu_to_scr(np->scripta_ba + 1972 offsetof(struct sym_fw2a_scr, pm1_data)); 1973 } 1974 1975 /* 1976 * Fill the data area in scripts. 1977 * To be done for all firmwares. 1978 */ 1979 static void 1980 sym_fw_fill_data (u32 *in, u32 *out) 1981 { 1982 int i; 1983 1984 for (i = 0; i < SYM_CONF_MAX_SG; i++) { 1985 *in++ = SCR_CHMOV_TBL ^ SCR_DATA_IN; 1986 *in++ = offsetof (struct sym_dsb, data[i]); 1987 *out++ = SCR_CHMOV_TBL ^ SCR_DATA_OUT; 1988 *out++ = offsetof (struct sym_dsb, data[i]); 1989 } 1990 } 1991 1992 /* 1993 * Setup useful script bus addresses. 1994 * To be done for all firmwares. 1995 */ 1996 static void 1997 sym_fw_setup_bus_addresses(hcb_p np, struct sym_fw *fw) 1998 { 1999 u32 *pa; 2000 u_short *po; 2001 int i; 2002 2003 /* 2004 * Build the bus address table for script A 2005 * from the script A offset table. 2006 */ 2007 po = (u_short *) fw->a_ofs; 2008 pa = (u32 *) &np->fwa_bas; 2009 for (i = 0 ; i < sizeof(np->fwa_bas)/sizeof(u32) ; i++) 2010 pa[i] = np->scripta_ba + po[i]; 2011 2012 /* 2013 * Same for script B. 2014 */ 2015 po = (u_short *) fw->b_ofs; 2016 pa = (u32 *) &np->fwb_bas; 2017 for (i = 0 ; i < sizeof(np->fwb_bas)/sizeof(u32) ; i++) 2018 pa[i] = np->scriptb_ba + po[i]; 2019 } 2020 2021 #ifdef SYM_CONF_GENERIC_SUPPORT 2022 /* 2023 * Setup routine for firmware #1. 2024 */ 2025 static void 2026 sym_fw1_setup(hcb_p np, struct sym_fw *fw) 2027 { 2028 struct sym_fw1a_scr *scripta0; 2029 struct sym_fw1b_scr *scriptb0; 2030 2031 scripta0 = (struct sym_fw1a_scr *) np->scripta0; 2032 scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; 2033 2034 /* 2035 * Fill variable parts in scripts. 2036 */ 2037 sym_fw_fill_data(scripta0->data_in, scripta0->data_out); 2038 2039 /* 2040 * Setup bus addresses used from the C code.. 2041 */ 2042 sym_fw_setup_bus_addresses(np, fw); 2043 } 2044 #endif /* SYM_CONF_GENERIC_SUPPORT */ 2045 2046 /* 2047 * Setup routine for firmware 2. 2048 */ 2049 static void 2050 sym_fw2_setup(hcb_p np, struct sym_fw *fw) 2051 { 2052 struct sym_fw2a_scr *scripta0; 2053 struct sym_fw2b_scr *scriptb0; 2054 2055 scripta0 = (struct sym_fw2a_scr *) np->scripta0; 2056 scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; 2057 2058 /* 2059 * Fill variable parts in scripts. 2060 */ 2061 sym_fw_fill_data(scripta0->data_in, scripta0->data_out); 2062 2063 /* 2064 * Setup bus addresses used from the C code.. 2065 */ 2066 sym_fw_setup_bus_addresses(np, fw); 2067 } 2068 2069 /* 2070 * Allocate firmware descriptors. 2071 */ 2072 #ifdef SYM_CONF_GENERIC_SUPPORT 2073 static struct sym_fw sym_fw1 = SYM_FW_ENTRY(sym_fw1, "NCR-generic"); 2074 #endif /* SYM_CONF_GENERIC_SUPPORT */ 2075 static struct sym_fw sym_fw2 = SYM_FW_ENTRY(sym_fw2, "LOAD/STORE-based"); 2076 2077 /* 2078 * Find the most appropriate firmware for a chip. 2079 */ 2080 static struct sym_fw * 2081 sym_find_firmware(struct sym_pci_chip *chip) 2082 { 2083 if (chip->features & FE_LDSTR) 2084 return &sym_fw2; 2085 #ifdef SYM_CONF_GENERIC_SUPPORT 2086 else if (!(chip->features & (FE_PFEN|FE_NOPM|FE_64BIT))) 2087 return &sym_fw1; 2088 #endif 2089 else 2090 return 0; 2091 } 2092 2093 /* 2094 * Bind a script to physical addresses. 2095 */ 2096 static void sym_fw_bind_script (hcb_p np, u32 *start, int len) 2097 { 2098 u32 opcode, new, old, tmp1, tmp2; 2099 u32 *end, *cur; 2100 int relocs; 2101 2102 cur = start; 2103 end = start + len/4; 2104 2105 while (cur < end) { 2106 2107 opcode = *cur; 2108 2109 /* 2110 * If we forget to change the length 2111 * in scripts, a field will be 2112 * padded with 0. This is an illegal 2113 * command. 2114 */ 2115 if (opcode == 0) { 2116 printf ("%s: ERROR0 IN SCRIPT at %d.\n", 2117 sym_name(np), (int) (cur-start)); 2118 MDELAY (10000); 2119 ++cur; 2120 continue; 2121 }; 2122 2123 /* 2124 * We use the bogus value 0xf00ff00f ;-) 2125 * to reserve data area in SCRIPTS. 2126 */ 2127 if (opcode == SCR_DATA_ZERO) { 2128 *cur++ = 0; 2129 continue; 2130 } 2131 2132 if (DEBUG_FLAGS & DEBUG_SCRIPT) 2133 printf ("%x: <%x>\n", cur-start, (unsigned)opcode); 2134 2135 /* 2136 * We don't have to decode ALL commands 2137 */ 2138 switch (opcode >> 28) { 2139 case 0xf: 2140 /* 2141 * LOAD / STORE DSA relative, don't relocate. 2142 */ 2143 relocs = 0; 2144 break; 2145 case 0xe: 2146 /* 2147 * LOAD / STORE absolute. 2148 */ 2149 relocs = 1; 2150 break; 2151 case 0xc: 2152 /* 2153 * COPY has TWO arguments. 2154 */ 2155 relocs = 2; 2156 tmp1 = cur[1]; 2157 tmp2 = cur[2]; 2158 if ((tmp1 ^ tmp2) & 3) { 2159 printf ("%s: ERROR1 IN SCRIPT at %d.\n", 2160 sym_name(np), (int) (cur-start)); 2161 MDELAY (10000); 2162 } 2163 /* 2164 * If PREFETCH feature not enabled, remove 2165 * the NO FLUSH bit if present. 2166 */ 2167 if ((opcode & SCR_NO_FLUSH) && 2168 !(np->features & FE_PFEN)) { 2169 opcode = (opcode & ~SCR_NO_FLUSH); 2170 } 2171 break; 2172 case 0x0: 2173 /* 2174 * MOVE/CHMOV (absolute address) 2175 */ 2176 if (!(np->features & FE_WIDE)) 2177 opcode = (opcode | OPC_MOVE); 2178 relocs = 1; 2179 break; 2180 case 0x1: 2181 /* 2182 * MOVE/CHMOV (table indirect) 2183 */ 2184 if (!(np->features & FE_WIDE)) 2185 opcode = (opcode | OPC_MOVE); 2186 relocs = 0; 2187 break; 2188 case 0x8: 2189 /* 2190 * JUMP / CALL 2191 * dont't relocate if relative :-) 2192 */ 2193 if (opcode & 0x00800000) 2194 relocs = 0; 2195 else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/ 2196 relocs = 2; 2197 else 2198 relocs = 1; 2199 break; 2200 case 0x4: 2201 case 0x5: 2202 case 0x6: 2203 case 0x7: 2204 relocs = 1; 2205 break; 2206 default: 2207 relocs = 0; 2208 break; 2209 }; 2210 2211 /* 2212 * Scriptify:) the opcode. 2213 */ 2214 *cur++ = cpu_to_scr(opcode); 2215 2216 /* 2217 * If no relocation, assume 1 argument 2218 * and just scriptize:) it. 2219 */ 2220 if (!relocs) { 2221 *cur = cpu_to_scr(*cur); 2222 ++cur; 2223 continue; 2224 } 2225 2226 /* 2227 * Otherwise performs all needed relocations. 2228 */ 2229 while (relocs--) { 2230 old = *cur; 2231 2232 switch (old & RELOC_MASK) { 2233 case RELOC_REGISTER: 2234 new = (old & ~RELOC_MASK) + np->mmio_ba; 2235 break; 2236 case RELOC_LABEL_A: 2237 new = (old & ~RELOC_MASK) + np->scripta_ba; 2238 break; 2239 case RELOC_LABEL_B: 2240 new = (old & ~RELOC_MASK) + np->scriptb_ba; 2241 break; 2242 case RELOC_SOFTC: 2243 new = (old & ~RELOC_MASK) + np->hcb_ba; 2244 break; 2245 case 0: 2246 /* 2247 * Don't relocate a 0 address. 2248 * They are mostly used for patched or 2249 * script self-modified areas. 2250 */ 2251 if (old == 0) { 2252 new = old; 2253 break; 2254 } 2255 /* fall through */ 2256 default: 2257 new = 0; 2258 panic("sym_fw_bind_script: " 2259 "weird relocation %x\n", old); 2260 break; 2261 } 2262 2263 *cur++ = cpu_to_scr(new); 2264 } 2265 }; 2266 } 2267 2268 /*--------------------------------------------------------------------------*/ 2269 /*--------------------------- END OF FIRMARES -----------------------------*/ 2270 /*--------------------------------------------------------------------------*/ 2271 2272 /* 2273 * Function prototypes. 2274 */ 2275 static void sym_save_initial_setting (hcb_p np); 2276 static int sym_prepare_setting (hcb_p np, struct sym_nvram *nvram); 2277 static int sym_prepare_nego (hcb_p np, ccb_p cp, int nego, u_char *msgptr); 2278 static void sym_put_start_queue (hcb_p np, ccb_p cp); 2279 static void sym_chip_reset (hcb_p np); 2280 static void sym_soft_reset (hcb_p np); 2281 static void sym_start_reset (hcb_p np); 2282 static int sym_reset_scsi_bus (hcb_p np, int enab_int); 2283 static int sym_wakeup_done (hcb_p np); 2284 static void sym_flush_busy_queue (hcb_p np, int cam_status); 2285 static void sym_flush_comp_queue (hcb_p np, int cam_status); 2286 static void sym_init (hcb_p np, int reason); 2287 static int sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, 2288 u_char *fakp); 2289 static void sym_setsync (hcb_p np, ccb_p cp, u_char ofs, u_char per, 2290 u_char div, u_char fak); 2291 static void sym_setwide (hcb_p np, ccb_p cp, u_char wide); 2292 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 2293 u_char per, u_char wide, u_char div, u_char fak); 2294 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 2295 u_char per, u_char wide, u_char div, u_char fak); 2296 static void sym_log_hard_error (hcb_p np, u_short sist, u_char dstat); 2297 static void sym_intr (void *arg); 2298 static void sym_poll (struct cam_sim *sim); 2299 static void sym_recover_scsi_int (hcb_p np, u_char hsts); 2300 static void sym_int_sto (hcb_p np); 2301 static void sym_int_udc (hcb_p np); 2302 static void sym_int_sbmc (hcb_p np); 2303 static void sym_int_par (hcb_p np, u_short sist); 2304 static void sym_int_ma (hcb_p np); 2305 static int sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, 2306 int task); 2307 static void sym_sir_bad_scsi_status (hcb_p np, int num, ccb_p cp); 2308 static int sym_clear_tasks (hcb_p np, int status, int targ, int lun, int task); 2309 static void sym_sir_task_recovery (hcb_p np, int num); 2310 static int sym_evaluate_dp (hcb_p np, ccb_p cp, u32 scr, int *ofs); 2311 static void sym_modify_dp (hcb_p np, tcb_p tp, ccb_p cp, int ofs); 2312 static int sym_compute_residual (hcb_p np, ccb_p cp); 2313 static int sym_show_msg (u_char * msg); 2314 static void sym_print_msg (ccb_p cp, char *label, u_char *msg); 2315 static void sym_sync_nego (hcb_p np, tcb_p tp, ccb_p cp); 2316 static void sym_ppr_nego (hcb_p np, tcb_p tp, ccb_p cp); 2317 static void sym_wide_nego (hcb_p np, tcb_p tp, ccb_p cp); 2318 static void sym_nego_default (hcb_p np, tcb_p tp, ccb_p cp); 2319 static void sym_nego_rejected (hcb_p np, tcb_p tp, ccb_p cp); 2320 static void sym_int_sir (hcb_p np); 2321 static void sym_free_ccb (hcb_p np, ccb_p cp); 2322 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order); 2323 static ccb_p sym_alloc_ccb (hcb_p np); 2324 static ccb_p sym_ccb_from_dsa (hcb_p np, u_long dsa); 2325 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln); 2326 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln); 2327 static int sym_snooptest (hcb_p np); 2328 static void sym_selectclock(hcb_p np, u_char scntl3); 2329 static void sym_getclock (hcb_p np, int mult); 2330 static int sym_getpciclock (hcb_p np); 2331 static void sym_complete_ok (hcb_p np, ccb_p cp); 2332 static void sym_complete_error (hcb_p np, ccb_p cp); 2333 static void sym_timeout (void *arg); 2334 static int sym_abort_scsiio (hcb_p np, union ccb *ccb, int timed_out); 2335 static void sym_reset_dev (hcb_p np, union ccb *ccb); 2336 static void sym_action (struct cam_sim *sim, union ccb *ccb); 2337 static void sym_action1 (struct cam_sim *sim, union ccb *ccb); 2338 static int sym_setup_cdb (hcb_p np, struct ccb_scsiio *csio, ccb_p cp); 2339 static void sym_setup_data_and_start (hcb_p np, struct ccb_scsiio *csio, 2340 ccb_p cp); 2341 #ifdef FreeBSD_Bus_Dma_Abstraction 2342 static int sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, 2343 bus_dma_segment_t *psegs, int nsegs); 2344 #else 2345 static int sym_scatter_virtual (hcb_p np, ccb_p cp, vm_offset_t vaddr, 2346 vm_size_t len); 2347 static int sym_scatter_sg_virtual (hcb_p np, ccb_p cp, 2348 bus_dma_segment_t *psegs, int nsegs); 2349 static int sym_scatter_physical (hcb_p np, ccb_p cp, vm_offset_t paddr, 2350 vm_size_t len); 2351 #endif 2352 static int sym_scatter_sg_physical (hcb_p np, ccb_p cp, 2353 bus_dma_segment_t *psegs, int nsegs); 2354 static void sym_action2 (struct cam_sim *sim, union ccb *ccb); 2355 static void sym_update_trans (hcb_p np, tcb_p tp, struct sym_trans *tip, 2356 struct ccb_trans_settings *cts); 2357 static void sym_update_dflags(hcb_p np, u_char *flags, 2358 struct ccb_trans_settings *cts); 2359 2360 #ifdef FreeBSD_Bus_Io_Abstraction 2361 static struct sym_pci_chip *sym_find_pci_chip (device_t dev); 2362 static int sym_pci_probe (device_t dev); 2363 static int sym_pci_attach (device_t dev); 2364 #else 2365 static struct sym_pci_chip *sym_find_pci_chip (pcici_t tag); 2366 static const char *sym_pci_probe (pcici_t tag, pcidi_t type); 2367 static void sym_pci_attach (pcici_t tag, int unit); 2368 static int sym_pci_attach2 (pcici_t tag, int unit); 2369 #endif 2370 2371 static void sym_pci_free (hcb_p np); 2372 static int sym_cam_attach (hcb_p np); 2373 static void sym_cam_free (hcb_p np); 2374 2375 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram); 2376 static void sym_nvram_setup_target (hcb_p np, int targ, struct sym_nvram *nvp); 2377 static int sym_read_nvram (hcb_p np, struct sym_nvram *nvp); 2378 2379 /* 2380 * Print something which allows to retrieve the controler type, 2381 * unit, target, lun concerned by a kernel message. 2382 */ 2383 static void PRINT_TARGET (hcb_p np, int target) 2384 { 2385 printf ("%s:%d:", sym_name(np), target); 2386 } 2387 2388 static void PRINT_LUN(hcb_p np, int target, int lun) 2389 { 2390 printf ("%s:%d:%d:", sym_name(np), target, lun); 2391 } 2392 2393 static void PRINT_ADDR (ccb_p cp) 2394 { 2395 if (cp && cp->cam_ccb) 2396 xpt_print_path(cp->cam_ccb->ccb_h.path); 2397 } 2398 2399 /* 2400 * Take into account this ccb in the freeze count. 2401 * The flag that tells user about avoids doing that 2402 * more than once for a ccb. 2403 */ 2404 static void sym_freeze_cam_ccb(union ccb *ccb) 2405 { 2406 if (!(ccb->ccb_h.flags & CAM_DEV_QFRZDIS)) { 2407 if (!(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 2408 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2409 xpt_freeze_devq(ccb->ccb_h.path, 1); 2410 } 2411 } 2412 } 2413 2414 /* 2415 * Set the status field of a CAM CCB. 2416 */ 2417 static __inline void sym_set_cam_status(union ccb *ccb, cam_status status) 2418 { 2419 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2420 ccb->ccb_h.status |= status; 2421 } 2422 2423 /* 2424 * Get the status field of a CAM CCB. 2425 */ 2426 static __inline int sym_get_cam_status(union ccb *ccb) 2427 { 2428 return ccb->ccb_h.status & CAM_STATUS_MASK; 2429 } 2430 2431 /* 2432 * Enqueue a CAM CCB. 2433 */ 2434 static void sym_enqueue_cam_ccb(hcb_p np, union ccb *ccb) 2435 { 2436 assert(!(ccb->ccb_h.status & CAM_SIM_QUEUED)); 2437 ccb->ccb_h.status = CAM_REQ_INPROG; 2438 2439 ccb->ccb_h.timeout_ch = timeout(sym_timeout, (caddr_t) ccb, 2440 ccb->ccb_h.timeout*hz/1000); 2441 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2442 ccb->ccb_h.sym_hcb_ptr = np; 2443 2444 sym_insque_tail(sym_qptr(&ccb->ccb_h.sim_links), &np->cam_ccbq); 2445 } 2446 2447 /* 2448 * Complete a pending CAM CCB. 2449 */ 2450 static void sym_xpt_done(hcb_p np, union ccb *ccb) 2451 { 2452 if (ccb->ccb_h.status & CAM_SIM_QUEUED) { 2453 untimeout(sym_timeout, (caddr_t) ccb, ccb->ccb_h.timeout_ch); 2454 sym_remque(sym_qptr(&ccb->ccb_h.sim_links)); 2455 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2456 ccb->ccb_h.sym_hcb_ptr = 0; 2457 } 2458 if (ccb->ccb_h.flags & CAM_DEV_QFREEZE) 2459 sym_freeze_cam_ccb(ccb); 2460 xpt_done(ccb); 2461 } 2462 2463 static void sym_xpt_done2(hcb_p np, union ccb *ccb, int cam_status) 2464 { 2465 sym_set_cam_status(ccb, cam_status); 2466 sym_xpt_done(np, ccb); 2467 } 2468 2469 /* 2470 * SYMBIOS chip clock divisor table. 2471 * 2472 * Divisors are multiplied by 10,000,000 in order to make 2473 * calculations more simple. 2474 */ 2475 #define _5M 5000000 2476 static u_long div_10M[] = 2477 {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; 2478 2479 /* 2480 * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, 2481 * 128 transfers. All chips support at least 16 transfers 2482 * bursts. The 825A, 875 and 895 chips support bursts of up 2483 * to 128 transfers and the 895A and 896 support bursts of up 2484 * to 64 transfers. All other chips support up to 16 2485 * transfers bursts. 2486 * 2487 * For PCI 32 bit data transfers each transfer is a DWORD. 2488 * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. 2489 * Only the 896 is able to perform 64 bit data transfers. 2490 * 2491 * We use log base 2 (burst length) as internal code, with 2492 * value 0 meaning "burst disabled". 2493 */ 2494 2495 /* 2496 * Burst length from burst code. 2497 */ 2498 #define burst_length(bc) (!(bc))? 0 : 1 << (bc) 2499 2500 /* 2501 * Burst code from io register bits. 2502 */ 2503 #define burst_code(dmode, ctest4, ctest5) \ 2504 (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 2505 2506 /* 2507 * Set initial io register bits from burst code. 2508 */ 2509 static __inline void sym_init_burst(hcb_p np, u_char bc) 2510 { 2511 np->rv_ctest4 &= ~0x80; 2512 np->rv_dmode &= ~(0x3 << 6); 2513 np->rv_ctest5 &= ~0x4; 2514 2515 if (!bc) { 2516 np->rv_ctest4 |= 0x80; 2517 } 2518 else { 2519 --bc; 2520 np->rv_dmode |= ((bc & 0x3) << 6); 2521 np->rv_ctest5 |= (bc & 0x4); 2522 } 2523 } 2524 2525 2526 /* 2527 * Print out the list of targets that have some flag disabled by user. 2528 */ 2529 static void sym_print_targets_flag(hcb_p np, int mask, char *msg) 2530 { 2531 int cnt; 2532 int i; 2533 2534 for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 2535 if (i == np->myaddr) 2536 continue; 2537 if (np->target[i].usrflags & mask) { 2538 if (!cnt++) 2539 printf("%s: %s disabled for targets", 2540 sym_name(np), msg); 2541 printf(" %d", i); 2542 } 2543 } 2544 if (cnt) 2545 printf(".\n"); 2546 } 2547 2548 /* 2549 * Save initial settings of some IO registers. 2550 * Assumed to have been set by BIOS. 2551 * We cannot reset the chip prior to reading the 2552 * IO registers, since informations will be lost. 2553 * Since the SCRIPTS processor may be running, this 2554 * is not safe on paper, but it seems to work quite 2555 * well. :) 2556 */ 2557 static void sym_save_initial_setting (hcb_p np) 2558 { 2559 np->sv_scntl0 = INB(nc_scntl0) & 0x0a; 2560 np->sv_scntl3 = INB(nc_scntl3) & 0x07; 2561 np->sv_dmode = INB(nc_dmode) & 0xce; 2562 np->sv_dcntl = INB(nc_dcntl) & 0xa8; 2563 np->sv_ctest3 = INB(nc_ctest3) & 0x01; 2564 np->sv_ctest4 = INB(nc_ctest4) & 0x80; 2565 np->sv_gpcntl = INB(nc_gpcntl); 2566 np->sv_stest1 = INB(nc_stest1); 2567 np->sv_stest2 = INB(nc_stest2) & 0x20; 2568 np->sv_stest4 = INB(nc_stest4); 2569 if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */ 2570 np->sv_scntl4 = INB(nc_scntl4); 2571 np->sv_ctest5 = INB(nc_ctest5) & 0x04; 2572 } 2573 else 2574 np->sv_ctest5 = INB(nc_ctest5) & 0x24; 2575 } 2576 2577 /* 2578 * Prepare io register values used by sym_init() according 2579 * to selected and supported features. 2580 */ 2581 static int sym_prepare_setting(hcb_p np, struct sym_nvram *nvram) 2582 { 2583 u_char burst_max; 2584 u_long period; 2585 int i; 2586 2587 /* 2588 * Wide ? 2589 */ 2590 np->maxwide = (np->features & FE_WIDE)? 1 : 0; 2591 2592 /* 2593 * Get the frequency of the chip's clock. 2594 */ 2595 if (np->features & FE_QUAD) 2596 np->multiplier = 4; 2597 else if (np->features & FE_DBLR) 2598 np->multiplier = 2; 2599 else 2600 np->multiplier = 1; 2601 2602 np->clock_khz = (np->features & FE_CLK80)? 80000 : 40000; 2603 np->clock_khz *= np->multiplier; 2604 2605 if (np->clock_khz != 40000) 2606 sym_getclock(np, np->multiplier); 2607 2608 /* 2609 * Divisor to be used for async (timer pre-scaler). 2610 */ 2611 i = np->clock_divn - 1; 2612 while (--i >= 0) { 2613 if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) { 2614 ++i; 2615 break; 2616 } 2617 } 2618 np->rv_scntl3 = i+1; 2619 2620 /* 2621 * The C1010 uses hardwired divisors for async. 2622 * So, we just throw away, the async. divisor.:-) 2623 */ 2624 if (np->features & FE_C10) 2625 np->rv_scntl3 = 0; 2626 2627 /* 2628 * Minimum synchronous period factor supported by the chip. 2629 * Btw, 'period' is in tenths of nanoseconds. 2630 */ 2631 period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; 2632 if (period <= 250) np->minsync = 10; 2633 else if (period <= 303) np->minsync = 11; 2634 else if (period <= 500) np->minsync = 12; 2635 else np->minsync = (period + 40 - 1) / 40; 2636 2637 /* 2638 * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). 2639 */ 2640 if (np->minsync < 25 && 2641 !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3))) 2642 np->minsync = 25; 2643 else if (np->minsync < 12 && 2644 !(np->features & (FE_ULTRA2|FE_ULTRA3))) 2645 np->minsync = 12; 2646 2647 /* 2648 * Maximum synchronous period factor supported by the chip. 2649 */ 2650 period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); 2651 np->maxsync = period > 2540 ? 254 : period / 10; 2652 2653 /* 2654 * If chip is a C1010, guess the sync limits in DT mode. 2655 */ 2656 if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) { 2657 if (np->clock_khz == 160000) { 2658 np->minsync_dt = 9; 2659 np->maxsync_dt = 50; 2660 } 2661 } 2662 2663 /* 2664 * 64 bit (53C895A or 53C896) ? 2665 */ 2666 if (np->features & FE_64BIT) 2667 #if BITS_PER_LONG > 32 2668 np->rv_ccntl1 |= (XTIMOD | EXTIBMV); 2669 #else 2670 np->rv_ccntl1 |= (DDAC); 2671 #endif 2672 2673 /* 2674 * Phase mismatch handled by SCRIPTS (895A/896/1010) ? 2675 */ 2676 if (np->features & FE_NOPM) 2677 np->rv_ccntl0 |= (ENPMJ); 2678 2679 /* 2680 * C1010 Errata. 2681 * In dual channel mode, contention occurs if internal cycles 2682 * are used. Disable internal cycles. 2683 */ 2684 if (np->device_id == PCI_ID_LSI53C1010 && np->revision_id < 0x45) 2685 np->rv_ccntl0 |= DILS; 2686 2687 /* 2688 * Select burst length (dwords) 2689 */ 2690 burst_max = SYM_SETUP_BURST_ORDER; 2691 if (burst_max == 255) 2692 burst_max = burst_code(np->sv_dmode, np->sv_ctest4, 2693 np->sv_ctest5); 2694 if (burst_max > 7) 2695 burst_max = 7; 2696 if (burst_max > np->maxburst) 2697 burst_max = np->maxburst; 2698 2699 /* 2700 * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. 2701 * This chip and the 860 Rev 1 may wrongly use PCI cache line 2702 * based transactions on LOAD/STORE instructions. So we have 2703 * to prevent these chips from using such PCI transactions in 2704 * this driver. The generic ncr driver that does not use 2705 * LOAD/STORE instructions does not need this work-around. 2706 */ 2707 if ((np->device_id == PCI_ID_SYM53C810 && 2708 np->revision_id >= 0x10 && np->revision_id <= 0x11) || 2709 (np->device_id == PCI_ID_SYM53C860 && 2710 np->revision_id <= 0x1)) 2711 np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP); 2712 2713 /* 2714 * Select all supported special features. 2715 * If we are using on-board RAM for scripts, prefetch (PFEN) 2716 * does not help, but burst op fetch (BOF) does. 2717 * Disabling PFEN makes sure BOF will be used. 2718 */ 2719 if (np->features & FE_ERL) 2720 np->rv_dmode |= ERL; /* Enable Read Line */ 2721 if (np->features & FE_BOF) 2722 np->rv_dmode |= BOF; /* Burst Opcode Fetch */ 2723 if (np->features & FE_ERMP) 2724 np->rv_dmode |= ERMP; /* Enable Read Multiple */ 2725 #if 1 2726 if ((np->features & FE_PFEN) && !np->ram_ba) 2727 #else 2728 if (np->features & FE_PFEN) 2729 #endif 2730 np->rv_dcntl |= PFEN; /* Prefetch Enable */ 2731 if (np->features & FE_CLSE) 2732 np->rv_dcntl |= CLSE; /* Cache Line Size Enable */ 2733 if (np->features & FE_WRIE) 2734 np->rv_ctest3 |= WRIE; /* Write and Invalidate */ 2735 if (np->features & FE_DFS) 2736 np->rv_ctest5 |= DFS; /* Dma Fifo Size */ 2737 2738 /* 2739 * Select some other 2740 */ 2741 if (SYM_SETUP_PCI_PARITY) 2742 np->rv_ctest4 |= MPEE; /* Master parity checking */ 2743 if (SYM_SETUP_SCSI_PARITY) 2744 np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ 2745 2746 /* 2747 * Get parity checking, host ID and verbose mode from NVRAM 2748 */ 2749 np->myaddr = 255; 2750 sym_nvram_setup_host (np, nvram); 2751 2752 /* 2753 * Get SCSI addr of host adapter (set by bios?). 2754 */ 2755 if (np->myaddr == 255) { 2756 np->myaddr = INB(nc_scid) & 0x07; 2757 if (!np->myaddr) 2758 np->myaddr = SYM_SETUP_HOST_ID; 2759 } 2760 2761 /* 2762 * Prepare initial io register bits for burst length 2763 */ 2764 sym_init_burst(np, burst_max); 2765 2766 /* 2767 * Set SCSI BUS mode. 2768 * - LVD capable chips (895/895A/896/1010) report the 2769 * current BUS mode through the STEST4 IO register. 2770 * - For previous generation chips (825/825A/875), 2771 * user has to tell us how to check against HVD, 2772 * since a 100% safe algorithm is not possible. 2773 */ 2774 np->scsi_mode = SMODE_SE; 2775 if (np->features & (FE_ULTRA2|FE_ULTRA3)) 2776 np->scsi_mode = (np->sv_stest4 & SMODE); 2777 else if (np->features & FE_DIFF) { 2778 if (SYM_SETUP_SCSI_DIFF == 1) { 2779 if (np->sv_scntl3) { 2780 if (np->sv_stest2 & 0x20) 2781 np->scsi_mode = SMODE_HVD; 2782 } 2783 else if (nvram->type == SYM_SYMBIOS_NVRAM) { 2784 if (INB(nc_gpreg) & 0x08) 2785 np->scsi_mode = SMODE_HVD; 2786 } 2787 } 2788 else if (SYM_SETUP_SCSI_DIFF == 2) 2789 np->scsi_mode = SMODE_HVD; 2790 } 2791 if (np->scsi_mode == SMODE_HVD) 2792 np->rv_stest2 |= 0x20; 2793 2794 /* 2795 * Set LED support from SCRIPTS. 2796 * Ignore this feature for boards known to use a 2797 * specific GPIO wiring and for the 895A or 896 2798 * that drive the LED directly. 2799 */ 2800 if ((SYM_SETUP_SCSI_LED || nvram->type == SYM_SYMBIOS_NVRAM) && 2801 !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01)) 2802 np->features |= FE_LED0; 2803 2804 /* 2805 * Set irq mode. 2806 */ 2807 switch(SYM_SETUP_IRQ_MODE & 3) { 2808 case 2: 2809 np->rv_dcntl |= IRQM; 2810 break; 2811 case 1: 2812 np->rv_dcntl |= (np->sv_dcntl & IRQM); 2813 break; 2814 default: 2815 break; 2816 } 2817 2818 /* 2819 * Configure targets according to driver setup. 2820 * If NVRAM present get targets setup from NVRAM. 2821 */ 2822 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 2823 tcb_p tp = &np->target[i]; 2824 2825 tp->tinfo.user.period = np->minsync; 2826 tp->tinfo.user.offset = np->maxoffs; 2827 tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT; 2828 tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); 2829 tp->usrtags = SYM_SETUP_MAX_TAG; 2830 2831 sym_nvram_setup_target (np, i, nvram); 2832 2833 if (!tp->usrtags) 2834 tp->usrflags &= ~SYM_TAGS_ENABLED; 2835 } 2836 2837 /* 2838 * Let user know about the settings. 2839 */ 2840 i = nvram->type; 2841 printf("%s: %s NVRAM, ID %d, Fast-%d, %s, %s\n", sym_name(np), 2842 i == SYM_SYMBIOS_NVRAM ? "Symbios" : 2843 (i == SYM_TEKRAM_NVRAM ? "Tekram" : "No"), 2844 np->myaddr, 2845 (np->features & FE_ULTRA3) ? 80 : 2846 (np->features & FE_ULTRA2) ? 40 : 2847 (np->features & FE_ULTRA) ? 20 : 10, 2848 sym_scsi_bus_mode(np->scsi_mode), 2849 (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity"); 2850 /* 2851 * Tell him more on demand. 2852 */ 2853 if (sym_verbose) { 2854 printf("%s: %s IRQ line driver%s\n", 2855 sym_name(np), 2856 np->rv_dcntl & IRQM ? "totem pole" : "open drain", 2857 np->ram_ba ? ", using on-chip SRAM" : ""); 2858 printf("%s: using %s firmware.\n", sym_name(np), np->fw_name); 2859 if (np->features & FE_NOPM) 2860 printf("%s: handling phase mismatch from SCRIPTS.\n", 2861 sym_name(np)); 2862 } 2863 /* 2864 * And still more. 2865 */ 2866 if (sym_verbose > 1) { 2867 printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " 2868 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", 2869 sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, 2870 np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); 2871 2872 printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " 2873 "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", 2874 sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, 2875 np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); 2876 } 2877 /* 2878 * Let user be aware of targets that have some disable flags set. 2879 */ 2880 sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT"); 2881 if (sym_verbose) 2882 sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED, 2883 "SCAN FOR LUNS"); 2884 2885 return 0; 2886 } 2887 2888 /* 2889 * Prepare the next negotiation message if needed. 2890 * 2891 * Fill in the part of message buffer that contains the 2892 * negotiation and the nego_status field of the CCB. 2893 * Returns the size of the message in bytes. 2894 */ 2895 2896 static int sym_prepare_nego(hcb_p np, ccb_p cp, int nego, u_char *msgptr) 2897 { 2898 tcb_p tp = &np->target[cp->target]; 2899 int msglen = 0; 2900 2901 #if 1 2902 /* 2903 * For now, only use PPR with DT option if period factor = 9. 2904 */ 2905 if (tp->tinfo.goal.period == 9) { 2906 tp->tinfo.goal.width = BUS_16_BIT; 2907 tp->tinfo.goal.options |= PPR_OPT_DT; 2908 } 2909 else 2910 tp->tinfo.goal.options &= ~PPR_OPT_DT; 2911 #endif 2912 /* 2913 * Early C1010 chips need a work-around for DT 2914 * data transfer to work. 2915 */ 2916 if (!(np->features & FE_U3EN)) 2917 tp->tinfo.goal.options = 0; 2918 /* 2919 * negotiate using PPR ? 2920 */ 2921 if (tp->tinfo.goal.options & PPR_OPT_MASK) 2922 nego = NS_PPR; 2923 /* 2924 * negotiate wide transfers ? 2925 */ 2926 else if (tp->tinfo.current.width != tp->tinfo.goal.width) 2927 nego = NS_WIDE; 2928 /* 2929 * negotiate synchronous transfers? 2930 */ 2931 else if (tp->tinfo.current.period != tp->tinfo.goal.period || 2932 tp->tinfo.current.offset != tp->tinfo.goal.offset) 2933 nego = NS_SYNC; 2934 2935 switch (nego) { 2936 case NS_SYNC: 2937 msgptr[msglen++] = M_EXTENDED; 2938 msgptr[msglen++] = 3; 2939 msgptr[msglen++] = M_X_SYNC_REQ; 2940 msgptr[msglen++] = tp->tinfo.goal.period; 2941 msgptr[msglen++] = tp->tinfo.goal.offset; 2942 break; 2943 case NS_WIDE: 2944 msgptr[msglen++] = M_EXTENDED; 2945 msgptr[msglen++] = 2; 2946 msgptr[msglen++] = M_X_WIDE_REQ; 2947 msgptr[msglen++] = tp->tinfo.goal.width; 2948 break; 2949 case NS_PPR: 2950 msgptr[msglen++] = M_EXTENDED; 2951 msgptr[msglen++] = 6; 2952 msgptr[msglen++] = M_X_PPR_REQ; 2953 msgptr[msglen++] = tp->tinfo.goal.period; 2954 msgptr[msglen++] = 0; 2955 msgptr[msglen++] = tp->tinfo.goal.offset; 2956 msgptr[msglen++] = tp->tinfo.goal.width; 2957 msgptr[msglen++] = tp->tinfo.goal.options & PPR_OPT_DT; 2958 break; 2959 }; 2960 2961 cp->nego_status = nego; 2962 2963 if (nego) { 2964 tp->nego_cp = cp; /* Keep track a nego will be performed */ 2965 if (DEBUG_FLAGS & DEBUG_NEGO) { 2966 sym_print_msg(cp, nego == NS_SYNC ? "sync msgout" : 2967 nego == NS_WIDE ? "wide msgout" : 2968 "ppr msgout", msgptr); 2969 }; 2970 }; 2971 2972 return msglen; 2973 } 2974 2975 /* 2976 * Insert a job into the start queue. 2977 */ 2978 static void sym_put_start_queue(hcb_p np, ccb_p cp) 2979 { 2980 u_short qidx; 2981 2982 #ifdef SYM_CONF_IARB_SUPPORT 2983 /* 2984 * If the previously queued CCB is not yet done, 2985 * set the IARB hint. The SCRIPTS will go with IARB 2986 * for this job when starting the previous one. 2987 * We leave devices a chance to win arbitration by 2988 * not using more than 'iarb_max' consecutive 2989 * immediate arbitrations. 2990 */ 2991 if (np->last_cp && np->iarb_count < np->iarb_max) { 2992 np->last_cp->host_flags |= HF_HINT_IARB; 2993 ++np->iarb_count; 2994 } 2995 else 2996 np->iarb_count = 0; 2997 np->last_cp = cp; 2998 #endif 2999 3000 /* 3001 * Insert first the idle task and then our job. 3002 * The MB should ensure proper ordering. 3003 */ 3004 qidx = np->squeueput + 2; 3005 if (qidx >= MAX_QUEUE*2) qidx = 0; 3006 3007 np->squeue [qidx] = cpu_to_scr(np->idletask_ba); 3008 MEMORY_BARRIER(); 3009 np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba); 3010 3011 np->squeueput = qidx; 3012 3013 if (DEBUG_FLAGS & DEBUG_QUEUE) 3014 printf ("%s: queuepos=%d.\n", sym_name (np), np->squeueput); 3015 3016 /* 3017 * Script processor may be waiting for reselect. 3018 * Wake it up. 3019 */ 3020 MEMORY_BARRIER(); 3021 OUTB (nc_istat, SIGP|np->istat_sem); 3022 } 3023 3024 3025 /* 3026 * Soft reset the chip. 3027 * 3028 * Raising SRST when the chip is running may cause 3029 * problems on dual function chips (see below). 3030 * On the other hand, LVD devices need some delay 3031 * to settle and report actual BUS mode in STEST4. 3032 */ 3033 static void sym_chip_reset (hcb_p np) 3034 { 3035 OUTB (nc_istat, SRST); 3036 UDELAY (10); 3037 OUTB (nc_istat, 0); 3038 UDELAY(2000); /* For BUS MODE to settle */ 3039 } 3040 3041 /* 3042 * Soft reset the chip. 3043 * 3044 * Some 896 and 876 chip revisions may hang-up if we set 3045 * the SRST (soft reset) bit at the wrong time when SCRIPTS 3046 * are running. 3047 * So, we need to abort the current operation prior to 3048 * soft resetting the chip. 3049 */ 3050 static void sym_soft_reset (hcb_p np) 3051 { 3052 u_char istat; 3053 int i; 3054 3055 OUTB (nc_istat, CABRT); 3056 for (i = 1000000 ; i ; --i) { 3057 istat = INB (nc_istat); 3058 if (istat & SIP) { 3059 INW (nc_sist); 3060 continue; 3061 } 3062 if (istat & DIP) { 3063 OUTB (nc_istat, 0); 3064 INB (nc_dstat); 3065 break; 3066 } 3067 } 3068 if (!i) 3069 printf("%s: unable to abort current chip operation.\n", 3070 sym_name(np)); 3071 sym_chip_reset (np); 3072 } 3073 3074 /* 3075 * Start reset process. 3076 * 3077 * The interrupt handler will reinitialize the chip. 3078 */ 3079 static void sym_start_reset(hcb_p np) 3080 { 3081 (void) sym_reset_scsi_bus(np, 1); 3082 } 3083 3084 static int sym_reset_scsi_bus(hcb_p np, int enab_int) 3085 { 3086 u32 term; 3087 int retv = 0; 3088 3089 sym_soft_reset(np); /* Soft reset the chip */ 3090 if (enab_int) 3091 OUTW (nc_sien, RST); 3092 /* 3093 * Enable Tolerant, reset IRQD if present and 3094 * properly set IRQ mode, prior to resetting the bus. 3095 */ 3096 OUTB (nc_stest3, TE); 3097 OUTB (nc_dcntl, (np->rv_dcntl & IRQM)); 3098 OUTB (nc_scntl1, CRST); 3099 UDELAY (200); 3100 3101 if (!SYM_SETUP_SCSI_BUS_CHECK) 3102 goto out; 3103 /* 3104 * Check for no terminators or SCSI bus shorts to ground. 3105 * Read SCSI data bus, data parity bits and control signals. 3106 * We are expecting RESET to be TRUE and other signals to be 3107 * FALSE. 3108 */ 3109 term = INB(nc_sstat0); 3110 term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ 3111 term |= ((INB(nc_sstat2) & 0x01) << 26) | /* sdp1 */ 3112 ((INW(nc_sbdl) & 0xff) << 9) | /* d7-0 */ 3113 ((INW(nc_sbdl) & 0xff00) << 10) | /* d15-8 */ 3114 INB(nc_sbcl); /* req ack bsy sel atn msg cd io */ 3115 3116 if (!(np->features & FE_WIDE)) 3117 term &= 0x3ffff; 3118 3119 if (term != (2<<7)) { 3120 printf("%s: suspicious SCSI data while resetting the BUS.\n", 3121 sym_name(np)); 3122 printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " 3123 "0x%lx, expecting 0x%lx\n", 3124 sym_name(np), 3125 (np->features & FE_WIDE) ? "dp1,d15-8," : "", 3126 (u_long)term, (u_long)(2<<7)); 3127 if (SYM_SETUP_SCSI_BUS_CHECK == 1) 3128 retv = 1; 3129 } 3130 out: 3131 OUTB (nc_scntl1, 0); 3132 /* MDELAY(100); */ 3133 return retv; 3134 } 3135 3136 /* 3137 * The chip may have completed jobs. Look at the DONE QUEUE. 3138 */ 3139 static int sym_wakeup_done (hcb_p np) 3140 { 3141 ccb_p cp; 3142 int i, n; 3143 u_long dsa; 3144 3145 n = 0; 3146 i = np->dqueueget; 3147 while (1) { 3148 dsa = scr_to_cpu(np->dqueue[i]); 3149 if (!dsa) 3150 break; 3151 np->dqueue[i] = 0; 3152 if ((i = i+2) >= MAX_QUEUE*2) 3153 i = 0; 3154 3155 cp = sym_ccb_from_dsa(np, dsa); 3156 if (cp) { 3157 sym_complete_ok (np, cp); 3158 ++n; 3159 } 3160 else 3161 printf ("%s: bad DSA (%lx) in done queue.\n", 3162 sym_name(np), dsa); 3163 } 3164 np->dqueueget = i; 3165 3166 return n; 3167 } 3168 3169 /* 3170 * Complete all active CCBs with error. 3171 * Used on CHIP/SCSI RESET. 3172 */ 3173 static void sym_flush_busy_queue (hcb_p np, int cam_status) 3174 { 3175 /* 3176 * Move all active CCBs to the COMP queue 3177 * and flush this queue. 3178 */ 3179 sym_que_splice(&np->busy_ccbq, &np->comp_ccbq); 3180 sym_que_init(&np->busy_ccbq); 3181 sym_flush_comp_queue(np, cam_status); 3182 } 3183 3184 /* 3185 * Start chip. 3186 * 3187 * 'reason' means: 3188 * 0: initialisation. 3189 * 1: SCSI BUS RESET delivered or received. 3190 * 2: SCSI BUS MODE changed. 3191 */ 3192 static void sym_init (hcb_p np, int reason) 3193 { 3194 int i; 3195 u_long phys; 3196 3197 /* 3198 * Reset chip if asked, otherwise just clear fifos. 3199 */ 3200 if (reason == 1) 3201 sym_soft_reset(np); 3202 else { 3203 OUTB (nc_stest3, TE|CSF); 3204 OUTONB (nc_ctest3, CLF); 3205 } 3206 3207 /* 3208 * Clear Start Queue 3209 */ 3210 phys = np->squeue_ba; 3211 for (i = 0; i < MAX_QUEUE*2; i += 2) { 3212 np->squeue[i] = cpu_to_scr(np->idletask_ba); 3213 np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4); 3214 } 3215 np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys); 3216 3217 /* 3218 * Start at first entry. 3219 */ 3220 np->squeueput = 0; 3221 3222 /* 3223 * Clear Done Queue 3224 */ 3225 phys = np->dqueue_ba; 3226 for (i = 0; i < MAX_QUEUE*2; i += 2) { 3227 np->dqueue[i] = 0; 3228 np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4); 3229 } 3230 np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys); 3231 3232 /* 3233 * Start at first entry. 3234 */ 3235 np->dqueueget = 0; 3236 3237 /* 3238 * Install patches in scripts. 3239 * This also let point to first position the start 3240 * and done queue pointers used from SCRIPTS. 3241 */ 3242 np->fw_patch(np); 3243 3244 /* 3245 * Wakeup all pending jobs. 3246 */ 3247 sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET); 3248 3249 /* 3250 * Init chip. 3251 */ 3252 OUTB (nc_istat, 0x00 ); /* Remove Reset, abort */ 3253 UDELAY (2000); /* The 895 needs time for the bus mode to settle */ 3254 3255 OUTB (nc_scntl0, np->rv_scntl0 | 0xc0); 3256 /* full arb., ena parity, par->ATN */ 3257 OUTB (nc_scntl1, 0x00); /* odd parity, and remove CRST!! */ 3258 3259 sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ 3260 3261 OUTB (nc_scid , RRE|np->myaddr); /* Adapter SCSI address */ 3262 OUTW (nc_respid, 1ul<<np->myaddr); /* Id to respond to */ 3263 OUTB (nc_istat , SIGP ); /* Signal Process */ 3264 OUTB (nc_dmode , np->rv_dmode); /* Burst length, dma mode */ 3265 OUTB (nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */ 3266 3267 OUTB (nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */ 3268 OUTB (nc_ctest3, np->rv_ctest3); /* Write and invalidate */ 3269 OUTB (nc_ctest4, np->rv_ctest4); /* Master parity checking */ 3270 3271 /* Extended Sreq/Sack filtering not supported on the C10 */ 3272 if (np->features & FE_C10) 3273 OUTB (nc_stest2, np->rv_stest2); 3274 else 3275 OUTB (nc_stest2, EXT|np->rv_stest2); 3276 3277 OUTB (nc_stest3, TE); /* TolerANT enable */ 3278 OUTB (nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */ 3279 3280 /* 3281 * C10101 Errata. 3282 * Errant SGE's when in narrow. Write bits 4 & 5 of 3283 * STEST1 register to disable SGE. We probably should do 3284 * that from SCRIPTS for each selection/reselection, but 3285 * I just don't want. :) 3286 */ 3287 if (np->device_id == PCI_ID_LSI53C1010 && np->revision_id < 0x45) 3288 OUTB (nc_stest1, INB(nc_stest1) | 0x30); 3289 3290 /* 3291 * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. 3292 * Disable overlapped arbitration for some dual function devices, 3293 * regardless revision id (kind of post-chip-design feature. ;-)) 3294 */ 3295 if (np->device_id == PCI_ID_SYM53C875) 3296 OUTB (nc_ctest0, (1<<5)); 3297 else if (np->device_id == PCI_ID_SYM53C896) 3298 np->rv_ccntl0 |= DPR; 3299 3300 /* 3301 * If 64 bit (895A/896/1010) write CCNTL1 to enable 40 bit 3302 * address table indirect addressing for MOVE. 3303 * Also write CCNTL0 if 64 bit chip, since this register seems 3304 * to only be used by 64 bit cores. 3305 */ 3306 if (np->features & FE_64BIT) { 3307 OUTB (nc_ccntl0, np->rv_ccntl0); 3308 OUTB (nc_ccntl1, np->rv_ccntl1); 3309 } 3310 3311 /* 3312 * If phase mismatch handled by scripts (895A/896/1010), 3313 * set PM jump addresses. 3314 */ 3315 if (np->features & FE_NOPM) { 3316 OUTL (nc_pmjad1, SCRIPTB_BA (np, pm_handle)); 3317 OUTL (nc_pmjad2, SCRIPTB_BA (np, pm_handle)); 3318 } 3319 3320 /* 3321 * Enable GPIO0 pin for writing if LED support from SCRIPTS. 3322 * Also set GPIO5 and clear GPIO6 if hardware LED control. 3323 */ 3324 if (np->features & FE_LED0) 3325 OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01); 3326 else if (np->features & FE_LEDC) 3327 OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20); 3328 3329 /* 3330 * enable ints 3331 */ 3332 OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR); 3333 OUTB (nc_dien , MDPE|BF|SSI|SIR|IID); 3334 3335 /* 3336 * For 895/6 enable SBMC interrupt and save current SCSI bus mode. 3337 * Try to eat the spurious SBMC interrupt that may occur when 3338 * we reset the chip but not the SCSI BUS (at initialization). 3339 */ 3340 if (np->features & (FE_ULTRA2|FE_ULTRA3)) { 3341 OUTONW (nc_sien, SBMC); 3342 if (reason == 0) { 3343 MDELAY(100); 3344 INW (nc_sist); 3345 } 3346 np->scsi_mode = INB (nc_stest4) & SMODE; 3347 } 3348 3349 /* 3350 * Fill in target structure. 3351 * Reinitialize usrsync. 3352 * Reinitialize usrwide. 3353 * Prepare sync negotiation according to actual SCSI bus mode. 3354 */ 3355 for (i=0;i<SYM_CONF_MAX_TARGET;i++) { 3356 tcb_p tp = &np->target[i]; 3357 3358 tp->to_reset = 0; 3359 tp->head.sval = 0; 3360 tp->head.wval = np->rv_scntl3; 3361 tp->head.uval = 0; 3362 3363 tp->tinfo.current.period = 0; 3364 tp->tinfo.current.offset = 0; 3365 tp->tinfo.current.width = BUS_8_BIT; 3366 tp->tinfo.current.options = 0; 3367 } 3368 3369 /* 3370 * Download SCSI SCRIPTS to on-chip RAM if present, 3371 * and start script processor. 3372 */ 3373 if (np->ram_ba) { 3374 if (sym_verbose > 1) 3375 printf ("%s: Downloading SCSI SCRIPTS.\n", 3376 sym_name(np)); 3377 if (np->ram_ws == 8192) { 3378 memcpy_to_pci(np->ram_va + 4096, 3379 np->scriptb0, np->scriptb_sz); 3380 OUTL (nc_mmws, np->scr_ram_seg); 3381 OUTL (nc_mmrs, np->scr_ram_seg); 3382 OUTL (nc_sfs, np->scr_ram_seg); 3383 phys = SCRIPTB_BA (np, start64); 3384 } 3385 else 3386 phys = SCRIPTA_BA (np, init); 3387 memcpy_to_pci(np->ram_va, np->scripta0, np->scripta_sz); 3388 } 3389 else 3390 phys = SCRIPTA_BA (np, init); 3391 3392 np->istat_sem = 0; 3393 3394 MEMORY_BARRIER(); 3395 OUTL (nc_dsa, np->hcb_ba); 3396 OUTL (nc_dsp, phys); 3397 3398 /* 3399 * Notify the XPT about the RESET condition. 3400 */ 3401 if (reason != 0) 3402 xpt_async(AC_BUS_RESET, np->path, NULL); 3403 } 3404 3405 /* 3406 * Get clock factor and sync divisor for a given 3407 * synchronous factor period. 3408 */ 3409 static int 3410 sym_getsync(hcb_p np, u_char dt, u_char sfac, u_char *divp, u_char *fakp) 3411 { 3412 u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */ 3413 int div = np->clock_divn; /* Number of divisors supported */ 3414 u32 fak; /* Sync factor in sxfer */ 3415 u32 per; /* Period in tenths of ns */ 3416 u32 kpc; /* (per * clk) */ 3417 int ret; 3418 3419 /* 3420 * Compute the synchronous period in tenths of nano-seconds 3421 */ 3422 if (dt && sfac <= 9) per = 125; 3423 else if (sfac <= 10) per = 250; 3424 else if (sfac == 11) per = 303; 3425 else if (sfac == 12) per = 500; 3426 else per = 40 * sfac; 3427 ret = per; 3428 3429 kpc = per * clk; 3430 if (dt) 3431 kpc <<= 1; 3432 3433 /* 3434 * For earliest C10, the extra clocks does not apply 3435 * to CRC cycles, so it may be safe not to use them. 3436 * Note that this limits the lowest sync data transfer 3437 * to 5 Mega-transfers per second and may result in 3438 * using higher clock divisors. 3439 */ 3440 #if 1 3441 if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) { 3442 /* 3443 * Look for the lowest clock divisor that allows an 3444 * output speed not faster than the period. 3445 */ 3446 while (div > 0) { 3447 --div; 3448 if (kpc > (div_10M[div] << 2)) { 3449 ++div; 3450 break; 3451 } 3452 } 3453 fak = 0; /* No extra clocks */ 3454 if (div == np->clock_divn) { /* Are we too fast ? */ 3455 ret = -1; 3456 } 3457 *divp = div; 3458 *fakp = fak; 3459 return ret; 3460 } 3461 #endif 3462 3463 /* 3464 * Look for the greatest clock divisor that allows an 3465 * input speed faster than the period. 3466 */ 3467 while (div-- > 0) 3468 if (kpc >= (div_10M[div] << 2)) break; 3469 3470 /* 3471 * Calculate the lowest clock factor that allows an output 3472 * speed not faster than the period, and the max output speed. 3473 * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT. 3474 * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT. 3475 */ 3476 if (dt) { 3477 fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2; 3478 /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */ 3479 } 3480 else { 3481 fak = (kpc - 1) / div_10M[div] + 1 - 4; 3482 /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */ 3483 } 3484 3485 /* 3486 * Check against our hardware limits, or bugs :). 3487 */ 3488 if (fak < 0) {fak = 0; ret = -1;} 3489 if (fak > 2) {fak = 2; ret = -1;} 3490 3491 /* 3492 * Compute and return sync parameters. 3493 */ 3494 *divp = div; 3495 *fakp = fak; 3496 3497 return ret; 3498 } 3499 3500 /* 3501 * We received a WDTR. 3502 * Let everything be aware of the changes. 3503 */ 3504 static void sym_setwide(hcb_p np, ccb_p cp, u_char wide) 3505 { 3506 struct ccb_trans_settings neg; 3507 union ccb *ccb = cp->cam_ccb; 3508 tcb_p tp = &np->target[cp->target]; 3509 3510 sym_settrans(np, cp, 0, 0, 0, wide, 0, 0); 3511 3512 /* 3513 * Tell the SCSI layer about the new transfer parameters. 3514 */ 3515 tp->tinfo.goal.width = tp->tinfo.current.width = wide; 3516 tp->tinfo.current.offset = 0; 3517 tp->tinfo.current.period = 0; 3518 tp->tinfo.current.options = 0; 3519 neg.bus_width = wide ? BUS_16_BIT : BUS_8_BIT; 3520 neg.sync_period = tp->tinfo.current.period; 3521 neg.sync_offset = tp->tinfo.current.offset; 3522 neg.valid = CCB_TRANS_BUS_WIDTH_VALID 3523 | CCB_TRANS_SYNC_RATE_VALID 3524 | CCB_TRANS_SYNC_OFFSET_VALID; 3525 xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); 3526 xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); 3527 } 3528 3529 /* 3530 * We received a SDTR. 3531 * Let everything be aware of the changes. 3532 */ 3533 static void 3534 sym_setsync(hcb_p np, ccb_p cp, u_char ofs, u_char per, u_char div, u_char fak) 3535 { 3536 struct ccb_trans_settings neg; 3537 union ccb *ccb = cp->cam_ccb; 3538 tcb_p tp = &np->target[cp->target]; 3539 u_char wide = (cp->phys.select.sel_scntl3 & EWS) ? 1 : 0; 3540 3541 sym_settrans(np, cp, 0, ofs, per, wide, div, fak); 3542 3543 /* 3544 * Tell the SCSI layer about the new transfer parameters. 3545 */ 3546 tp->tinfo.goal.period = tp->tinfo.current.period = per; 3547 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; 3548 tp->tinfo.goal.options = tp->tinfo.current.options = 0; 3549 neg.sync_period = tp->tinfo.current.period; 3550 neg.sync_offset = tp->tinfo.current.offset; 3551 neg.valid = CCB_TRANS_SYNC_RATE_VALID 3552 | CCB_TRANS_SYNC_OFFSET_VALID; 3553 xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); 3554 xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); 3555 } 3556 3557 /* 3558 * We received a PPR. 3559 * Let everything be aware of the changes. 3560 */ 3561 static void sym_setpprot(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 3562 u_char per, u_char wide, u_char div, u_char fak) 3563 { 3564 struct ccb_trans_settings neg; 3565 union ccb *ccb = cp->cam_ccb; 3566 tcb_p tp = &np->target[cp->target]; 3567 3568 sym_settrans(np, cp, dt, ofs, per, wide, div, fak); 3569 3570 /* 3571 * Tell the SCSI layer about the new transfer parameters. 3572 */ 3573 tp->tinfo.goal.width = tp->tinfo.current.width = wide; 3574 tp->tinfo.goal.period = tp->tinfo.current.period = per; 3575 tp->tinfo.goal.offset = tp->tinfo.current.offset = ofs; 3576 tp->tinfo.goal.options = tp->tinfo.current.options = dt; 3577 neg.sync_period = tp->tinfo.current.period; 3578 neg.sync_offset = tp->tinfo.current.offset; 3579 neg.bus_width = wide ? BUS_16_BIT : BUS_8_BIT; 3580 neg.valid = CCB_TRANS_BUS_WIDTH_VALID 3581 | CCB_TRANS_SYNC_RATE_VALID 3582 | CCB_TRANS_SYNC_OFFSET_VALID; 3583 xpt_setup_ccb(&neg.ccb_h, ccb->ccb_h.path, /*priority*/1); 3584 xpt_async(AC_TRANSFER_NEG, ccb->ccb_h.path, &neg); 3585 } 3586 3587 /* 3588 * Switch trans mode for current job and it's target. 3589 */ 3590 static void sym_settrans(hcb_p np, ccb_p cp, u_char dt, u_char ofs, 3591 u_char per, u_char wide, u_char div, u_char fak) 3592 { 3593 SYM_QUEHEAD *qp; 3594 union ccb *ccb; 3595 tcb_p tp; 3596 u_char target = INB (nc_sdid) & 0x0f; 3597 u_char sval, wval, uval; 3598 3599 assert (cp); 3600 if (!cp) return; 3601 ccb = cp->cam_ccb; 3602 assert (ccb); 3603 if (!ccb) return; 3604 assert (target == (cp->target & 0xf)); 3605 tp = &np->target[target]; 3606 3607 sval = tp->head.sval; 3608 wval = tp->head.wval; 3609 uval = tp->head.uval; 3610 3611 #if 0 3612 printf("XXXX sval=%x wval=%x uval=%x (%x)\n", 3613 sval, wval, uval, np->rv_scntl3); 3614 #endif 3615 /* 3616 * Set the offset. 3617 */ 3618 if (!(np->features & FE_C10)) 3619 sval = (sval & ~0x1f) | ofs; 3620 else 3621 sval = (sval & ~0x3f) | ofs; 3622 3623 /* 3624 * Set the sync divisor and extra clock factor. 3625 */ 3626 if (ofs != 0) { 3627 wval = (wval & ~0x70) | ((div+1) << 4); 3628 if (!(np->features & FE_C10)) 3629 sval = (sval & ~0xe0) | (fak << 5); 3630 else { 3631 uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT); 3632 if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT); 3633 if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT); 3634 } 3635 } 3636 3637 /* 3638 * Set the bus width. 3639 */ 3640 wval = wval & ~EWS; 3641 if (wide != 0) 3642 wval |= EWS; 3643 3644 /* 3645 * Set misc. ultra enable bits. 3646 */ 3647 if (np->features & FE_C10) { 3648 uval = uval & ~U3EN; 3649 if (dt) { 3650 assert(np->features & FE_U3EN); 3651 uval |= U3EN; 3652 } 3653 } 3654 else { 3655 wval = wval & ~ULTRA; 3656 if (per <= 12) wval |= ULTRA; 3657 } 3658 3659 /* 3660 * Stop there if sync parameters are unchanged. 3661 */ 3662 if (tp->head.sval == sval && 3663 tp->head.wval == wval && 3664 tp->head.uval == uval) 3665 return; 3666 tp->head.sval = sval; 3667 tp->head.wval = wval; 3668 tp->head.uval = uval; 3669 3670 /* 3671 * Disable extended Sreq/Sack filtering if per < 50. 3672 * Not supported on the C1010. 3673 */ 3674 if (per < 50 && !(np->features & FE_C10)) 3675 OUTOFFB (nc_stest2, EXT); 3676 3677 /* 3678 * set actual value and sync_status 3679 */ 3680 OUTB (nc_sxfer, tp->head.sval); 3681 OUTB (nc_scntl3, tp->head.wval); 3682 3683 if (np->features & FE_C10) { 3684 OUTB (nc_scntl4, tp->head.uval); 3685 } 3686 3687 /* 3688 * patch ALL busy ccbs of this target. 3689 */ 3690 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 3691 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 3692 if (cp->target != target) 3693 continue; 3694 cp->phys.select.sel_scntl3 = tp->head.wval; 3695 cp->phys.select.sel_sxfer = tp->head.sval; 3696 if (np->features & FE_C10) { 3697 cp->phys.select.sel_scntl4 = tp->head.uval; 3698 } 3699 } 3700 } 3701 3702 /* 3703 * log message for real hard errors 3704 * 3705 * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc). 3706 * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf. 3707 * 3708 * exception register: 3709 * ds: dstat 3710 * si: sist 3711 * 3712 * SCSI bus lines: 3713 * so: control lines as driven by chip. 3714 * si: control lines as seen by chip. 3715 * sd: scsi data lines as seen by chip. 3716 * 3717 * wide/fastmode: 3718 * sxfer: (see the manual) 3719 * scntl3: (see the manual) 3720 * 3721 * current script command: 3722 * dsp: script adress (relative to start of script). 3723 * dbc: first word of script command. 3724 * 3725 * First 24 register of the chip: 3726 * r0..rf 3727 */ 3728 static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat) 3729 { 3730 u32 dsp; 3731 int script_ofs; 3732 int script_size; 3733 char *script_name; 3734 u_char *script_base; 3735 int i; 3736 3737 dsp = INL (nc_dsp); 3738 3739 if (dsp > np->scripta_ba && 3740 dsp <= np->scripta_ba + np->scripta_sz) { 3741 script_ofs = dsp - np->scripta_ba; 3742 script_size = np->scripta_sz; 3743 script_base = (u_char *) np->scripta0; 3744 script_name = "scripta"; 3745 } 3746 else if (np->scriptb_ba < dsp && 3747 dsp <= np->scriptb_ba + np->scriptb_sz) { 3748 script_ofs = dsp - np->scriptb_ba; 3749 script_size = np->scriptb_sz; 3750 script_base = (u_char *) np->scriptb0; 3751 script_name = "scriptb"; 3752 } else { 3753 script_ofs = dsp; 3754 script_size = 0; 3755 script_base = 0; 3756 script_name = "mem"; 3757 } 3758 3759 printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", 3760 sym_name (np), (unsigned)INB (nc_sdid)&0x0f, dstat, sist, 3761 (unsigned)INB (nc_socl), (unsigned)INB (nc_sbcl), 3762 (unsigned)INB (nc_sbdl), (unsigned)INB (nc_sxfer), 3763 (unsigned)INB (nc_scntl3), script_name, script_ofs, 3764 (unsigned)INL (nc_dbc)); 3765 3766 if (((script_ofs & 3) == 0) && 3767 (unsigned)script_ofs < script_size) { 3768 printf ("%s: script cmd = %08x\n", sym_name(np), 3769 scr_to_cpu((int) *(u32 *)(script_base + script_ofs))); 3770 } 3771 3772 printf ("%s: regdump:", sym_name(np)); 3773 for (i=0; i<24;i++) 3774 printf (" %02x", (unsigned)INB_OFF(i)); 3775 printf (".\n"); 3776 3777 /* 3778 * PCI BUS error, read the PCI ststus register. 3779 */ 3780 if (dstat & (MDPE|BF)) { 3781 u_short pci_sts; 3782 #ifdef FreeBSD_Bus_Io_Abstraction 3783 pci_sts = pci_read_config(np->device, PCIR_STATUS, 2); 3784 #else 3785 pci_sts = pci_cfgread(np->pci_tag, PCIR_STATUS, 2); 3786 #endif 3787 if (pci_sts & 0xf900) { 3788 #ifdef FreeBSD_Bus_Io_Abstraction 3789 pci_write_config(np->device, PCIR_STATUS, pci_sts, 2); 3790 #else 3791 pci_cfgwrite(np->pci_tag, PCIR_STATUS, pci_sts, 2); 3792 #endif 3793 printf("%s: PCI STATUS = 0x%04x\n", 3794 sym_name(np), pci_sts & 0xf900); 3795 } 3796 } 3797 } 3798 3799 /* 3800 * chip interrupt handler 3801 * 3802 * In normal situations, interrupt conditions occur one at 3803 * a time. But when something bad happens on the SCSI BUS, 3804 * the chip may raise several interrupt flags before 3805 * stopping and interrupting the CPU. The additionnal 3806 * interrupt flags are stacked in some extra registers 3807 * after the SIP and/or DIP flag has been raised in the 3808 * ISTAT. After the CPU has read the interrupt condition 3809 * flag from SIST or DSTAT, the chip unstacks the other 3810 * interrupt flags and sets the corresponding bits in 3811 * SIST or DSTAT. Since the chip starts stacking once the 3812 * SIP or DIP flag is set, there is a small window of time 3813 * where the stacking does not occur. 3814 * 3815 * Typically, multiple interrupt conditions may happen in 3816 * the following situations: 3817 * 3818 * - SCSI parity error + Phase mismatch (PAR|MA) 3819 * When an parity error is detected in input phase 3820 * and the device switches to msg-in phase inside a 3821 * block MOV. 3822 * - SCSI parity error + Unexpected disconnect (PAR|UDC) 3823 * When a stupid device does not want to handle the 3824 * recovery of an SCSI parity error. 3825 * - Some combinations of STO, PAR, UDC, ... 3826 * When using non compliant SCSI stuff, when user is 3827 * doing non compliant hot tampering on the BUS, when 3828 * something really bad happens to a device, etc ... 3829 * 3830 * The heuristic suggested by SYMBIOS to handle 3831 * multiple interrupts is to try unstacking all 3832 * interrupts conditions and to handle them on some 3833 * priority based on error severity. 3834 * This will work when the unstacking has been 3835 * successful, but we cannot be 100 % sure of that, 3836 * since the CPU may have been faster to unstack than 3837 * the chip is able to stack. Hmmm ... But it seems that 3838 * such a situation is very unlikely to happen. 3839 * 3840 * If this happen, for example STO caught by the CPU 3841 * then UDC happenning before the CPU have restarted 3842 * the SCRIPTS, the driver may wrongly complete the 3843 * same command on UDC, since the SCRIPTS didn't restart 3844 * and the DSA still points to the same command. 3845 * We avoid this situation by setting the DSA to an 3846 * invalid value when the CCB is completed and before 3847 * restarting the SCRIPTS. 3848 * 3849 * Another issue is that we need some section of our 3850 * recovery procedures to be somehow uninterruptible but 3851 * the SCRIPTS processor does not provides such a 3852 * feature. For this reason, we handle recovery preferently 3853 * from the C code and check against some SCRIPTS critical 3854 * sections from the C code. 3855 * 3856 * Hopefully, the interrupt handling of the driver is now 3857 * able to resist to weird BUS error conditions, but donnot 3858 * ask me for any guarantee that it will never fail. :-) 3859 * Use at your own decision and risk. 3860 */ 3861 3862 static void sym_intr1 (hcb_p np) 3863 { 3864 u_char istat, istatc; 3865 u_char dstat; 3866 u_short sist; 3867 3868 /* 3869 * interrupt on the fly ? 3870 */ 3871 istat = INB (nc_istat); 3872 if (istat & INTF) { 3873 OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem); 3874 #if 1 3875 istat = INB (nc_istat); /* DUMMY READ */ 3876 #endif 3877 if (DEBUG_FLAGS & DEBUG_TINY) printf ("F "); 3878 (void)sym_wakeup_done (np); 3879 }; 3880 3881 if (!(istat & (SIP|DIP))) 3882 return; 3883 3884 #if 0 /* We should never get this one */ 3885 if (istat & CABRT) 3886 OUTB (nc_istat, CABRT); 3887 #endif 3888 3889 /* 3890 * PAR and MA interrupts may occur at the same time, 3891 * and we need to know of both in order to handle 3892 * this situation properly. We try to unstack SCSI 3893 * interrupts for that reason. BTW, I dislike a LOT 3894 * such a loop inside the interrupt routine. 3895 * Even if DMA interrupt stacking is very unlikely to 3896 * happen, we also try unstacking these ones, since 3897 * this has no performance impact. 3898 */ 3899 sist = 0; 3900 dstat = 0; 3901 istatc = istat; 3902 do { 3903 if (istatc & SIP) 3904 sist |= INW (nc_sist); 3905 if (istatc & DIP) 3906 dstat |= INB (nc_dstat); 3907 istatc = INB (nc_istat); 3908 istat |= istatc; 3909 } while (istatc & (SIP|DIP)); 3910 3911 if (DEBUG_FLAGS & DEBUG_TINY) 3912 printf ("<%d|%x:%x|%x:%x>", 3913 (int)INB(nc_scr0), 3914 dstat,sist, 3915 (unsigned)INL(nc_dsp), 3916 (unsigned)INL(nc_dbc)); 3917 /* 3918 * First, interrupts we want to service cleanly. 3919 * 3920 * Phase mismatch (MA) is the most frequent interrupt 3921 * for chip earlier than the 896 and so we have to service 3922 * it as quickly as possible. 3923 * A SCSI parity error (PAR) may be combined with a phase 3924 * mismatch condition (MA). 3925 * Programmed interrupts (SIR) are used to call the C code 3926 * from SCRIPTS. 3927 * The single step interrupt (SSI) is not used in this 3928 * driver. 3929 */ 3930 if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) && 3931 !(dstat & (MDPE|BF|ABRT|IID))) { 3932 if (sist & PAR) sym_int_par (np, sist); 3933 else if (sist & MA) sym_int_ma (np); 3934 else if (dstat & SIR) sym_int_sir (np); 3935 else if (dstat & SSI) OUTONB (nc_dcntl, (STD|NOCOM)); 3936 else goto unknown_int; 3937 return; 3938 }; 3939 3940 /* 3941 * Now, interrupts that donnot happen in normal 3942 * situations and that we may need to recover from. 3943 * 3944 * On SCSI RESET (RST), we reset everything. 3945 * On SCSI BUS MODE CHANGE (SBMC), we complete all 3946 * active CCBs with RESET status, prepare all devices 3947 * for negotiating again and restart the SCRIPTS. 3948 * On STO and UDC, we complete the CCB with the corres- 3949 * ponding status and restart the SCRIPTS. 3950 */ 3951 if (sist & RST) { 3952 xpt_print_path(np->path); 3953 printf("SCSI BUS reset detected.\n"); 3954 sym_init (np, 1); 3955 return; 3956 }; 3957 3958 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ 3959 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ 3960 3961 if (!(sist & (GEN|HTH|SGE)) && 3962 !(dstat & (MDPE|BF|ABRT|IID))) { 3963 if (sist & SBMC) sym_int_sbmc (np); 3964 else if (sist & STO) sym_int_sto (np); 3965 else if (sist & UDC) sym_int_udc (np); 3966 else goto unknown_int; 3967 return; 3968 }; 3969 3970 /* 3971 * Now, interrupts we are not able to recover cleanly. 3972 * 3973 * Log message for hard errors. 3974 * Reset everything. 3975 */ 3976 3977 sym_log_hard_error(np, sist, dstat); 3978 3979 if ((sist & (GEN|HTH|SGE)) || 3980 (dstat & (MDPE|BF|ABRT|IID))) { 3981 sym_start_reset(np); 3982 return; 3983 }; 3984 3985 unknown_int: 3986 /* 3987 * We just miss the cause of the interrupt. :( 3988 * Print a message. The timeout will do the real work. 3989 */ 3990 printf( "%s: unknown interrupt(s) ignored, " 3991 "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", 3992 sym_name(np), istat, dstat, sist); 3993 } 3994 3995 static void sym_intr(void *arg) 3996 { 3997 if (DEBUG_FLAGS & DEBUG_TINY) printf ("["); 3998 sym_intr1((hcb_p) arg); 3999 if (DEBUG_FLAGS & DEBUG_TINY) printf ("]"); 4000 return; 4001 } 4002 4003 static void sym_poll(struct cam_sim *sim) 4004 { 4005 int s = splcam(); 4006 sym_intr(cam_sim_softc(sim)); 4007 splx(s); 4008 } 4009 4010 4011 /* 4012 * generic recovery from scsi interrupt 4013 * 4014 * The doc says that when the chip gets an SCSI interrupt, 4015 * it tries to stop in an orderly fashion, by completing 4016 * an instruction fetch that had started or by flushing 4017 * the DMA fifo for a write to memory that was executing. 4018 * Such a fashion is not enough to know if the instruction 4019 * that was just before the current DSP value has been 4020 * executed or not. 4021 * 4022 * There are some small SCRIPTS sections that deal with 4023 * the start queue and the done queue that may break any 4024 * assomption from the C code if we are interrupted 4025 * inside, so we reset if this happens. Btw, since these 4026 * SCRIPTS sections are executed while the SCRIPTS hasn't 4027 * started SCSI operations, it is very unlikely to happen. 4028 * 4029 * All the driver data structures are supposed to be 4030 * allocated from the same 4 GB memory window, so there 4031 * is a 1 to 1 relationship between DSA and driver data 4032 * structures. Since we are careful :) to invalidate the 4033 * DSA when we complete a command or when the SCRIPTS 4034 * pushes a DSA into a queue, we can trust it when it 4035 * points to a CCB. 4036 */ 4037 static void sym_recover_scsi_int (hcb_p np, u_char hsts) 4038 { 4039 u32 dsp = INL (nc_dsp); 4040 u32 dsa = INL (nc_dsa); 4041 ccb_p cp = sym_ccb_from_dsa(np, dsa); 4042 4043 /* 4044 * If we haven't been interrupted inside the SCRIPTS 4045 * critical pathes, we can safely restart the SCRIPTS 4046 * and trust the DSA value if it matches a CCB. 4047 */ 4048 if ((!(dsp > SCRIPTA_BA (np, getjob_begin) && 4049 dsp < SCRIPTA_BA (np, getjob_end) + 1)) && 4050 (!(dsp > SCRIPTA_BA (np, ungetjob) && 4051 dsp < SCRIPTA_BA (np, reselect) + 1)) && 4052 (!(dsp > SCRIPTB_BA (np, sel_for_abort) && 4053 dsp < SCRIPTB_BA (np, sel_for_abort_1) + 1)) && 4054 (!(dsp > SCRIPTA_BA (np, done) && 4055 dsp < SCRIPTA_BA (np, done_end) + 1))) { 4056 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */ 4057 OUTB (nc_stest3, TE|CSF); /* clear scsi fifo */ 4058 /* 4059 * If we have a CCB, let the SCRIPTS call us back for 4060 * the handling of the error with SCRATCHA filled with 4061 * STARTPOS. This way, we will be able to freeze the 4062 * device queue and requeue awaiting IOs. 4063 */ 4064 if (cp) { 4065 cp->host_status = hsts; 4066 OUTL (nc_dsp, SCRIPTA_BA (np, complete_error)); 4067 } 4068 /* 4069 * Otherwise just restart the SCRIPTS. 4070 */ 4071 else { 4072 OUTL (nc_dsa, 0xffffff); 4073 OUTL (nc_dsp, SCRIPTA_BA (np, start)); 4074 } 4075 } 4076 else 4077 goto reset_all; 4078 4079 return; 4080 4081 reset_all: 4082 sym_start_reset(np); 4083 } 4084 4085 /* 4086 * chip exception handler for selection timeout 4087 */ 4088 void sym_int_sto (hcb_p np) 4089 { 4090 u32 dsp = INL (nc_dsp); 4091 4092 if (DEBUG_FLAGS & DEBUG_TINY) printf ("T"); 4093 4094 if (dsp == SCRIPTA_BA (np, wf_sel_done) + 8) 4095 sym_recover_scsi_int(np, HS_SEL_TIMEOUT); 4096 else 4097 sym_start_reset(np); 4098 } 4099 4100 /* 4101 * chip exception handler for unexpected disconnect 4102 */ 4103 void sym_int_udc (hcb_p np) 4104 { 4105 printf ("%s: unexpected disconnect\n", sym_name(np)); 4106 sym_recover_scsi_int(np, HS_UNEXPECTED); 4107 } 4108 4109 /* 4110 * chip exception handler for SCSI bus mode change 4111 * 4112 * spi2-r12 11.2.3 says a transceiver mode change must 4113 * generate a reset event and a device that detects a reset 4114 * event shall initiate a hard reset. It says also that a 4115 * device that detects a mode change shall set data transfer 4116 * mode to eight bit asynchronous, etc... 4117 * So, just reinitializing all except chip should be enough. 4118 */ 4119 static void sym_int_sbmc (hcb_p np) 4120 { 4121 u_char scsi_mode = INB (nc_stest4) & SMODE; 4122 4123 /* 4124 * Notify user. 4125 */ 4126 xpt_print_path(np->path); 4127 printf("SCSI BUS mode change from %s to %s.\n", 4128 sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode)); 4129 4130 /* 4131 * Should suspend command processing for a few seconds and 4132 * reinitialize all except the chip. 4133 */ 4134 sym_init (np, 2); 4135 } 4136 4137 /* 4138 * chip exception handler for SCSI parity error. 4139 * 4140 * When the chip detects a SCSI parity error and is 4141 * currently executing a (CH)MOV instruction, it does 4142 * not interrupt immediately, but tries to finish the 4143 * transfer of the current scatter entry before 4144 * interrupting. The following situations may occur: 4145 * 4146 * - The complete scatter entry has been transferred 4147 * without the device having changed phase. 4148 * The chip will then interrupt with the DSP pointing 4149 * to the instruction that follows the MOV. 4150 * 4151 * - A phase mismatch occurs before the MOV finished 4152 * and phase errors are to be handled by the C code. 4153 * The chip will then interrupt with both PAR and MA 4154 * conditions set. 4155 * 4156 * - A phase mismatch occurs before the MOV finished and 4157 * phase errors are to be handled by SCRIPTS. 4158 * The chip will load the DSP with the phase mismatch 4159 * JUMP address and interrupt the host processor. 4160 */ 4161 static void sym_int_par (hcb_p np, u_short sist) 4162 { 4163 u_char hsts = INB (HS_PRT); 4164 u32 dsp = INL (nc_dsp); 4165 u32 dbc = INL (nc_dbc); 4166 u32 dsa = INL (nc_dsa); 4167 u_char sbcl = INB (nc_sbcl); 4168 u_char cmd = dbc >> 24; 4169 int phase = cmd & 7; 4170 ccb_p cp = sym_ccb_from_dsa(np, dsa); 4171 4172 printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", 4173 sym_name(np), hsts, dbc, sbcl); 4174 4175 /* 4176 * Check that the chip is connected to the SCSI BUS. 4177 */ 4178 if (!(INB (nc_scntl1) & ISCON)) { 4179 sym_recover_scsi_int(np, HS_UNEXPECTED); 4180 return; 4181 } 4182 4183 /* 4184 * If the nexus is not clearly identified, reset the bus. 4185 * We will try to do better later. 4186 */ 4187 if (!cp) 4188 goto reset_all; 4189 4190 /* 4191 * Check instruction was a MOV, direction was INPUT and 4192 * ATN is asserted. 4193 */ 4194 if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) 4195 goto reset_all; 4196 4197 /* 4198 * Keep track of the parity error. 4199 */ 4200 OUTONB (HF_PRT, HF_EXT_ERR); 4201 cp->xerr_status |= XE_PARITY_ERR; 4202 4203 /* 4204 * Prepare the message to send to the device. 4205 */ 4206 np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR; 4207 4208 /* 4209 * If the old phase was DATA IN phase, we have to deal with 4210 * the 3 situations described above. 4211 * For other input phases (MSG IN and STATUS), the device 4212 * must resend the whole thing that failed parity checking 4213 * or signal error. So, jumping to dispatcher should be OK. 4214 */ 4215 if (phase == 1) { 4216 /* Phase mismatch handled by SCRIPTS */ 4217 if (dsp == SCRIPTB_BA (np, pm_handle)) 4218 OUTL (nc_dsp, dsp); 4219 /* Phase mismatch handled by the C code */ 4220 else if (sist & MA) 4221 sym_int_ma (np); 4222 /* No phase mismatch occurred */ 4223 else { 4224 OUTL (nc_temp, dsp); 4225 OUTL (nc_dsp, SCRIPTA_BA (np, dispatch)); 4226 } 4227 } 4228 else 4229 OUTL (nc_dsp, SCRIPTA_BA (np, clrack)); 4230 return; 4231 4232 reset_all: 4233 sym_start_reset(np); 4234 return; 4235 } 4236 4237 /* 4238 * chip exception handler for phase errors. 4239 * 4240 * We have to construct a new transfer descriptor, 4241 * to transfer the rest of the current block. 4242 */ 4243 static void sym_int_ma (hcb_p np) 4244 { 4245 u32 dbc; 4246 u32 rest; 4247 u32 dsp; 4248 u32 dsa; 4249 u32 nxtdsp; 4250 u32 *vdsp; 4251 u32 oadr, olen; 4252 u32 *tblp; 4253 u32 newcmd; 4254 u_int delta; 4255 u_char cmd; 4256 u_char hflags, hflags0; 4257 struct sym_pmc *pm; 4258 ccb_p cp; 4259 4260 dsp = INL (nc_dsp); 4261 dbc = INL (nc_dbc); 4262 dsa = INL (nc_dsa); 4263 4264 cmd = dbc >> 24; 4265 rest = dbc & 0xffffff; 4266 delta = 0; 4267 4268 /* 4269 * locate matching cp if any. 4270 */ 4271 cp = sym_ccb_from_dsa(np, dsa); 4272 4273 /* 4274 * Donnot take into account dma fifo and various buffers in 4275 * INPUT phase since the chip flushes everything before 4276 * raising the MA interrupt for interrupted INPUT phases. 4277 * For DATA IN phase, we will check for the SWIDE later. 4278 */ 4279 if ((cmd & 7) != 1) { 4280 u_char ss0, ss2; 4281 4282 if (np->features & FE_DFBC) 4283 delta = INW (nc_dfbc); 4284 else { 4285 u32 dfifo; 4286 4287 /* 4288 * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership. 4289 */ 4290 dfifo = INL(nc_dfifo); 4291 4292 /* 4293 * Calculate remaining bytes in DMA fifo. 4294 * (CTEST5 = dfifo >> 16) 4295 */ 4296 if (dfifo & (DFS << 16)) 4297 delta = ((((dfifo >> 8) & 0x300) | 4298 (dfifo & 0xff)) - rest) & 0x3ff; 4299 else 4300 delta = ((dfifo & 0xff) - rest) & 0x7f; 4301 } 4302 4303 /* 4304 * The data in the dma fifo has not been transfered to 4305 * the target -> add the amount to the rest 4306 * and clear the data. 4307 * Check the sstat2 register in case of wide transfer. 4308 */ 4309 rest += delta; 4310 ss0 = INB (nc_sstat0); 4311 if (ss0 & OLF) rest++; 4312 if (!(np->features & FE_C10)) 4313 if (ss0 & ORF) rest++; 4314 if (cp && (cp->phys.select.sel_scntl3 & EWS)) { 4315 ss2 = INB (nc_sstat2); 4316 if (ss2 & OLF1) rest++; 4317 if (!(np->features & FE_C10)) 4318 if (ss2 & ORF1) rest++; 4319 }; 4320 4321 /* 4322 * Clear fifos. 4323 */ 4324 OUTB (nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */ 4325 OUTB (nc_stest3, TE|CSF); /* scsi fifo */ 4326 } 4327 4328 /* 4329 * log the information 4330 */ 4331 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE)) 4332 printf ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)&7, 4333 (unsigned) rest, (unsigned) delta); 4334 4335 /* 4336 * try to find the interrupted script command, 4337 * and the address at which to continue. 4338 */ 4339 vdsp = 0; 4340 nxtdsp = 0; 4341 if (dsp > np->scripta_ba && 4342 dsp <= np->scripta_ba + np->scripta_sz) { 4343 vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8)); 4344 nxtdsp = dsp; 4345 } 4346 else if (dsp > np->scriptb_ba && 4347 dsp <= np->scriptb_ba + np->scriptb_sz) { 4348 vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8)); 4349 nxtdsp = dsp; 4350 } 4351 4352 /* 4353 * log the information 4354 */ 4355 if (DEBUG_FLAGS & DEBUG_PHASE) { 4356 printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", 4357 cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); 4358 }; 4359 4360 if (!vdsp) { 4361 printf ("%s: interrupted SCRIPT address not found.\n", 4362 sym_name (np)); 4363 goto reset_all; 4364 } 4365 4366 if (!cp) { 4367 printf ("%s: SCSI phase error fixup: CCB already dequeued.\n", 4368 sym_name (np)); 4369 goto reset_all; 4370 } 4371 4372 /* 4373 * get old startaddress and old length. 4374 */ 4375 oadr = scr_to_cpu(vdsp[1]); 4376 4377 if (cmd & 0x10) { /* Table indirect */ 4378 tblp = (u32 *) ((char*) &cp->phys + oadr); 4379 olen = scr_to_cpu(tblp[0]); 4380 oadr = scr_to_cpu(tblp[1]); 4381 } else { 4382 tblp = (u32 *) 0; 4383 olen = scr_to_cpu(vdsp[0]) & 0xffffff; 4384 }; 4385 4386 if (DEBUG_FLAGS & DEBUG_PHASE) { 4387 printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", 4388 (unsigned) (scr_to_cpu(vdsp[0]) >> 24), 4389 tblp, 4390 (unsigned) olen, 4391 (unsigned) oadr); 4392 }; 4393 4394 /* 4395 * check cmd against assumed interrupted script command. 4396 */ 4397 if (cmd != (scr_to_cpu(vdsp[0]) >> 24)) { 4398 PRINT_ADDR(cp); 4399 printf ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", 4400 (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0]) >> 24); 4401 4402 goto reset_all; 4403 }; 4404 4405 /* 4406 * if old phase not dataphase, leave here. 4407 */ 4408 if ((cmd & 5) != (cmd & 7)) { 4409 PRINT_ADDR(cp); 4410 printf ("phase change %x-%x %d@%08x resid=%d.\n", 4411 cmd&7, INB(nc_sbcl)&7, (unsigned)olen, 4412 (unsigned)oadr, (unsigned)rest); 4413 goto unexpected_phase; 4414 }; 4415 4416 /* 4417 * Choose the correct PM save area. 4418 * 4419 * Look at the PM_SAVE SCRIPT if you want to understand 4420 * this stuff. The equivalent code is implemented in 4421 * SCRIPTS for the 895A and 896 that are able to handle 4422 * PM from the SCRIPTS processor. 4423 */ 4424 hflags0 = INB (HF_PRT); 4425 hflags = hflags0; 4426 4427 if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) { 4428 if (hflags & HF_IN_PM0) 4429 nxtdsp = scr_to_cpu(cp->phys.pm0.ret); 4430 else if (hflags & HF_IN_PM1) 4431 nxtdsp = scr_to_cpu(cp->phys.pm1.ret); 4432 4433 if (hflags & HF_DP_SAVED) 4434 hflags ^= HF_ACT_PM; 4435 } 4436 4437 if (!(hflags & HF_ACT_PM)) { 4438 pm = &cp->phys.pm0; 4439 newcmd = SCRIPTA_BA (np, pm0_data); 4440 } 4441 else { 4442 pm = &cp->phys.pm1; 4443 newcmd = SCRIPTA_BA (np, pm1_data); 4444 } 4445 4446 hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED); 4447 if (hflags != hflags0) 4448 OUTB (HF_PRT, hflags); 4449 4450 /* 4451 * fillin the phase mismatch context 4452 */ 4453 pm->sg.addr = cpu_to_scr(oadr + olen - rest); 4454 pm->sg.size = cpu_to_scr(rest); 4455 pm->ret = cpu_to_scr(nxtdsp); 4456 4457 /* 4458 * If we have a SWIDE, 4459 * - prepare the address to write the SWIDE from SCRIPTS, 4460 * - compute the SCRIPTS address to restart from, 4461 * - move current data pointer context by one byte. 4462 */ 4463 nxtdsp = SCRIPTA_BA (np, dispatch); 4464 if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) && 4465 (INB (nc_scntl2) & WSR)) { 4466 u32 tmp; 4467 4468 /* 4469 * Set up the table indirect for the MOVE 4470 * of the residual byte and adjust the data 4471 * pointer context. 4472 */ 4473 tmp = scr_to_cpu(pm->sg.addr); 4474 cp->phys.wresid.addr = cpu_to_scr(tmp); 4475 pm->sg.addr = cpu_to_scr(tmp + 1); 4476 tmp = scr_to_cpu(pm->sg.size); 4477 cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1); 4478 pm->sg.size = cpu_to_scr(tmp - 1); 4479 4480 /* 4481 * If only the residual byte is to be moved, 4482 * no PM context is needed. 4483 */ 4484 if ((tmp&0xffffff) == 1) 4485 newcmd = pm->ret; 4486 4487 /* 4488 * Prepare the address of SCRIPTS that will 4489 * move the residual byte to memory. 4490 */ 4491 nxtdsp = SCRIPTB_BA (np, wsr_ma_helper); 4492 } 4493 4494 if (DEBUG_FLAGS & DEBUG_PHASE) { 4495 PRINT_ADDR(cp); 4496 printf ("PM %x %x %x / %x %x %x.\n", 4497 hflags0, hflags, newcmd, 4498 (unsigned)scr_to_cpu(pm->sg.addr), 4499 (unsigned)scr_to_cpu(pm->sg.size), 4500 (unsigned)scr_to_cpu(pm->ret)); 4501 } 4502 4503 /* 4504 * Restart the SCRIPTS processor. 4505 */ 4506 OUTL (nc_temp, newcmd); 4507 OUTL (nc_dsp, nxtdsp); 4508 return; 4509 4510 /* 4511 * Unexpected phase changes that occurs when the current phase 4512 * is not a DATA IN or DATA OUT phase are due to error conditions. 4513 * Such event may only happen when the SCRIPTS is using a 4514 * multibyte SCSI MOVE. 4515 * 4516 * Phase change Some possible cause 4517 * 4518 * COMMAND --> MSG IN SCSI parity error detected by target. 4519 * COMMAND --> STATUS Bad command or refused by target. 4520 * MSG OUT --> MSG IN Message rejected by target. 4521 * MSG OUT --> COMMAND Bogus target that discards extended 4522 * negotiation messages. 4523 * 4524 * The code below does not care of the new phase and so 4525 * trusts the target. Why to annoy it ? 4526 * If the interrupted phase is COMMAND phase, we restart at 4527 * dispatcher. 4528 * If a target does not get all the messages after selection, 4529 * the code assumes blindly that the target discards extended 4530 * messages and clears the negotiation status. 4531 * If the target does not want all our response to negotiation, 4532 * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids 4533 * bloat for such a should_not_happen situation). 4534 * In all other situation, we reset the BUS. 4535 * Are these assumptions reasonnable ? (Wait and see ...) 4536 */ 4537 unexpected_phase: 4538 dsp -= 8; 4539 nxtdsp = 0; 4540 4541 switch (cmd & 7) { 4542 case 2: /* COMMAND phase */ 4543 nxtdsp = SCRIPTA_BA (np, dispatch); 4544 break; 4545 #if 0 4546 case 3: /* STATUS phase */ 4547 nxtdsp = SCRIPTA_BA (np, dispatch); 4548 break; 4549 #endif 4550 case 6: /* MSG OUT phase */ 4551 /* 4552 * If the device may want to use untagged when we want 4553 * tagged, we prepare an IDENTIFY without disc. granted, 4554 * since we will not be able to handle reselect. 4555 * Otherwise, we just don't care. 4556 */ 4557 if (dsp == SCRIPTA_BA (np, send_ident)) { 4558 if (cp->tag != NO_TAG && olen - rest <= 3) { 4559 cp->host_status = HS_BUSY; 4560 np->msgout[0] = M_IDENTIFY | cp->lun; 4561 nxtdsp = SCRIPTB_BA (np, ident_break_atn); 4562 } 4563 else 4564 nxtdsp = SCRIPTB_BA (np, ident_break); 4565 } 4566 else if (dsp == SCRIPTB_BA (np, send_wdtr) || 4567 dsp == SCRIPTB_BA (np, send_sdtr) || 4568 dsp == SCRIPTB_BA (np, send_ppr)) { 4569 nxtdsp = SCRIPTB_BA (np, nego_bad_phase); 4570 } 4571 break; 4572 #if 0 4573 case 7: /* MSG IN phase */ 4574 nxtdsp = SCRIPTA_BA (np, clrack); 4575 break; 4576 #endif 4577 } 4578 4579 if (nxtdsp) { 4580 OUTL (nc_dsp, nxtdsp); 4581 return; 4582 } 4583 4584 reset_all: 4585 sym_start_reset(np); 4586 } 4587 4588 /* 4589 * Dequeue from the START queue all CCBs that match 4590 * a given target/lun/task condition (-1 means all), 4591 * and move them from the BUSY queue to the COMP queue 4592 * with CAM_REQUEUE_REQ status condition. 4593 * This function is used during error handling/recovery. 4594 * It is called with SCRIPTS not running. 4595 */ 4596 static int 4597 sym_dequeue_from_squeue(hcb_p np, int i, int target, int lun, int task) 4598 { 4599 int j; 4600 ccb_p cp; 4601 4602 /* 4603 * Make sure the starting index is within range. 4604 */ 4605 assert((i >= 0) && (i < 2*MAX_QUEUE)); 4606 4607 /* 4608 * Walk until end of START queue and dequeue every job 4609 * that matches the target/lun/task condition. 4610 */ 4611 j = i; 4612 while (i != np->squeueput) { 4613 cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])); 4614 assert(cp); 4615 #ifdef SYM_CONF_IARB_SUPPORT 4616 /* Forget hints for IARB, they may be no longer relevant */ 4617 cp->host_flags &= ~HF_HINT_IARB; 4618 #endif 4619 if ((target == -1 || cp->target == target) && 4620 (lun == -1 || cp->lun == lun) && 4621 (task == -1 || cp->tag == task)) { 4622 sym_set_cam_status(cp->cam_ccb, CAM_REQUEUE_REQ); 4623 sym_remque(&cp->link_ccbq); 4624 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); 4625 } 4626 else { 4627 if (i != j) 4628 np->squeue[j] = np->squeue[i]; 4629 if ((j += 2) >= MAX_QUEUE*2) j = 0; 4630 } 4631 if ((i += 2) >= MAX_QUEUE*2) i = 0; 4632 } 4633 if (i != j) /* Copy back the idle task if needed */ 4634 np->squeue[j] = np->squeue[i]; 4635 np->squeueput = j; /* Update our current start queue pointer */ 4636 4637 return (i - j) / 2; 4638 } 4639 4640 /* 4641 * Complete all CCBs queued to the COMP queue. 4642 * 4643 * These CCBs are assumed: 4644 * - Not to be referenced either by devices or 4645 * SCRIPTS-related queues and datas. 4646 * - To have to be completed with an error condition 4647 * or requeued. 4648 * 4649 * The device queue freeze count is incremented 4650 * for each CCB that does not prevent this. 4651 * This function is called when all CCBs involved 4652 * in error handling/recovery have been reaped. 4653 */ 4654 static void 4655 sym_flush_comp_queue(hcb_p np, int cam_status) 4656 { 4657 SYM_QUEHEAD *qp; 4658 ccb_p cp; 4659 4660 while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) { 4661 union ccb *ccb; 4662 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 4663 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); 4664 /* Leave quiet CCBs waiting for resources */ 4665 if (cp->host_status == HS_WAIT) 4666 continue; 4667 ccb = cp->cam_ccb; 4668 if (cam_status) 4669 sym_set_cam_status(ccb, cam_status); 4670 sym_free_ccb(np, cp); 4671 sym_freeze_cam_ccb(ccb); 4672 sym_xpt_done(np, ccb); 4673 } 4674 } 4675 4676 /* 4677 * chip handler for bad SCSI status condition 4678 * 4679 * In case of bad SCSI status, we unqueue all the tasks 4680 * currently queued to the controller but not yet started 4681 * and then restart the SCRIPTS processor immediately. 4682 * 4683 * QUEUE FULL and BUSY conditions are handled the same way. 4684 * Basically all the not yet started tasks are requeued in 4685 * device queue and the queue is frozen until a completion. 4686 * 4687 * For CHECK CONDITION and COMMAND TERMINATED status, we use 4688 * the CCB of the failed command to prepare a REQUEST SENSE 4689 * SCSI command and queue it to the controller queue. 4690 * 4691 * SCRATCHA is assumed to have been loaded with STARTPOS 4692 * before the SCRIPTS called the C code. 4693 */ 4694 static void sym_sir_bad_scsi_status(hcb_p np, int num, ccb_p cp) 4695 { 4696 tcb_p tp = &np->target[cp->target]; 4697 u32 startp; 4698 u_char s_status = cp->ssss_status; 4699 u_char h_flags = cp->host_flags; 4700 int msglen; 4701 int nego; 4702 int i; 4703 4704 /* 4705 * Compute the index of the next job to start from SCRIPTS. 4706 */ 4707 i = (INL (nc_scratcha) - np->squeue_ba) / 4; 4708 4709 /* 4710 * The last CCB queued used for IARB hint may be 4711 * no longer relevant. Forget it. 4712 */ 4713 #ifdef SYM_CONF_IARB_SUPPORT 4714 if (np->last_cp) 4715 np->last_cp = 0; 4716 #endif 4717 4718 /* 4719 * Now deal with the SCSI status. 4720 */ 4721 switch(s_status) { 4722 case S_BUSY: 4723 case S_QUEUE_FULL: 4724 if (sym_verbose >= 2) { 4725 PRINT_ADDR(cp); 4726 printf (s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n"); 4727 } 4728 default: /* S_INT, S_INT_COND_MET, S_CONFLICT */ 4729 sym_complete_error (np, cp); 4730 break; 4731 case S_TERMINATED: 4732 case S_CHECK_COND: 4733 /* 4734 * If we get an SCSI error when requesting sense, give up. 4735 */ 4736 if (h_flags & HF_SENSE) { 4737 sym_complete_error (np, cp); 4738 break; 4739 } 4740 4741 /* 4742 * Dequeue all queued CCBs for that device not yet started, 4743 * and restart the SCRIPTS processor immediately. 4744 */ 4745 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); 4746 OUTL (nc_dsp, SCRIPTA_BA (np, start)); 4747 4748 /* 4749 * Save some info of the actual IO. 4750 * Compute the data residual. 4751 */ 4752 cp->sv_scsi_status = cp->ssss_status; 4753 cp->sv_xerr_status = cp->xerr_status; 4754 cp->sv_resid = sym_compute_residual(np, cp); 4755 4756 /* 4757 * Prepare all needed data structures for 4758 * requesting sense data. 4759 */ 4760 4761 /* 4762 * identify message 4763 */ 4764 cp->scsi_smsg2[0] = M_IDENTIFY | cp->lun; 4765 msglen = 1; 4766 4767 /* 4768 * If we are currently using anything different from 4769 * async. 8 bit data transfers with that target, 4770 * start a negotiation, since the device may want 4771 * to report us a UNIT ATTENTION condition due to 4772 * a cause we currently ignore, and we donnot want 4773 * to be stuck with WIDE and/or SYNC data transfer. 4774 * 4775 * cp->nego_status is filled by sym_prepare_nego(). 4776 */ 4777 cp->nego_status = 0; 4778 nego = 0; 4779 if (tp->tinfo.current.options & PPR_OPT_MASK) 4780 nego = NS_PPR; 4781 else if (tp->tinfo.current.width != BUS_8_BIT) 4782 nego = NS_WIDE; 4783 else if (tp->tinfo.current.offset != 0) 4784 nego = NS_SYNC; 4785 if (nego) 4786 msglen += 4787 sym_prepare_nego (np,cp, nego, &cp->scsi_smsg2[msglen]); 4788 /* 4789 * Message table indirect structure. 4790 */ 4791 cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg2)); 4792 cp->phys.smsg.size = cpu_to_scr(msglen); 4793 4794 /* 4795 * sense command 4796 */ 4797 cp->phys.cmd.addr = cpu_to_scr(CCB_BA (cp, sensecmd)); 4798 cp->phys.cmd.size = cpu_to_scr(6); 4799 4800 /* 4801 * patch requested size into sense command 4802 */ 4803 cp->sensecmd[0] = 0x03; 4804 cp->sensecmd[1] = cp->lun << 5; 4805 cp->sensecmd[4] = SYM_SNS_BBUF_LEN; 4806 cp->data_len = SYM_SNS_BBUF_LEN; 4807 4808 /* 4809 * sense data 4810 */ 4811 bzero(cp->sns_bbuf, SYM_SNS_BBUF_LEN); 4812 cp->phys.sense.addr = cpu_to_scr(vtobus(cp->sns_bbuf)); 4813 cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); 4814 4815 /* 4816 * requeue the command. 4817 */ 4818 startp = SCRIPTB_BA (np, sdata_in); 4819 4820 cp->phys.head.savep = cpu_to_scr(startp); 4821 cp->phys.head.goalp = cpu_to_scr(startp + 16); 4822 cp->phys.head.lastp = cpu_to_scr(startp); 4823 cp->startp = cpu_to_scr(startp); 4824 4825 cp->actualquirks = SYM_QUIRK_AUTOSAVE; 4826 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; 4827 cp->ssss_status = S_ILLEGAL; 4828 cp->host_flags = (HF_SENSE|HF_DATA_IN); 4829 cp->xerr_status = 0; 4830 cp->extra_bytes = 0; 4831 4832 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); 4833 4834 /* 4835 * Requeue the command. 4836 */ 4837 sym_put_start_queue(np, cp); 4838 4839 /* 4840 * Give back to upper layer everything we have dequeued. 4841 */ 4842 sym_flush_comp_queue(np, 0); 4843 break; 4844 } 4845 } 4846 4847 /* 4848 * After a device has accepted some management message 4849 * as BUS DEVICE RESET, ABORT TASK, etc ..., or when 4850 * a device signals a UNIT ATTENTION condition, some 4851 * tasks are thrown away by the device. We are required 4852 * to reflect that on our tasks list since the device 4853 * will never complete these tasks. 4854 * 4855 * This function move from the BUSY queue to the COMP 4856 * queue all disconnected CCBs for a given target that 4857 * match the following criteria: 4858 * - lun=-1 means any logical UNIT otherwise a given one. 4859 * - task=-1 means any task, otherwise a given one. 4860 */ 4861 static int 4862 sym_clear_tasks(hcb_p np, int cam_status, int target, int lun, int task) 4863 { 4864 SYM_QUEHEAD qtmp, *qp; 4865 int i = 0; 4866 ccb_p cp; 4867 4868 /* 4869 * Move the entire BUSY queue to our temporary queue. 4870 */ 4871 sym_que_init(&qtmp); 4872 sym_que_splice(&np->busy_ccbq, &qtmp); 4873 sym_que_init(&np->busy_ccbq); 4874 4875 /* 4876 * Put all CCBs that matches our criteria into 4877 * the COMP queue and put back other ones into 4878 * the BUSY queue. 4879 */ 4880 while ((qp = sym_remque_head(&qtmp)) != 0) { 4881 union ccb *ccb; 4882 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 4883 ccb = cp->cam_ccb; 4884 if (cp->host_status != HS_DISCONNECT || 4885 cp->target != target || 4886 (lun != -1 && cp->lun != lun) || 4887 (task != -1 && 4888 (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) { 4889 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); 4890 continue; 4891 } 4892 sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); 4893 4894 /* Preserve the software timeout condition */ 4895 if (sym_get_cam_status(ccb) != CAM_CMD_TIMEOUT) 4896 sym_set_cam_status(ccb, cam_status); 4897 ++i; 4898 #if 0 4899 printf("XXXX TASK @%p CLEARED\n", cp); 4900 #endif 4901 } 4902 return i; 4903 } 4904 4905 /* 4906 * chip handler for TASKS recovery 4907 * 4908 * We cannot safely abort a command, while the SCRIPTS 4909 * processor is running, since we just would be in race 4910 * with it. 4911 * 4912 * As long as we have tasks to abort, we keep the SEM 4913 * bit set in the ISTAT. When this bit is set, the 4914 * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) 4915 * each time it enters the scheduler. 4916 * 4917 * If we have to reset a target, clear tasks of a unit, 4918 * or to perform the abort of a disconnected job, we 4919 * restart the SCRIPTS for selecting the target. Once 4920 * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). 4921 * If it loses arbitration, the SCRIPTS will interrupt again 4922 * the next time it will enter its scheduler, and so on ... 4923 * 4924 * On SIR_TARGET_SELECTED, we scan for the more 4925 * appropriate thing to do: 4926 * 4927 * - If nothing, we just sent a M_ABORT message to the 4928 * target to get rid of the useless SCSI bus ownership. 4929 * According to the specs, no tasks shall be affected. 4930 * - If the target is to be reset, we send it a M_RESET 4931 * message. 4932 * - If a logical UNIT is to be cleared , we send the 4933 * IDENTIFY(lun) + M_ABORT. 4934 * - If an untagged task is to be aborted, we send the 4935 * IDENTIFY(lun) + M_ABORT. 4936 * - If a tagged task is to be aborted, we send the 4937 * IDENTIFY(lun) + task attributes + M_ABORT_TAG. 4938 * 4939 * Once our 'kiss of death' :) message has been accepted 4940 * by the target, the SCRIPTS interrupts again 4941 * (SIR_ABORT_SENT). On this interrupt, we complete 4942 * all the CCBs that should have been aborted by the 4943 * target according to our message. 4944 */ 4945 static void sym_sir_task_recovery(hcb_p np, int num) 4946 { 4947 SYM_QUEHEAD *qp; 4948 ccb_p cp; 4949 tcb_p tp; 4950 int target=-1, lun=-1, task; 4951 int i, k; 4952 4953 switch(num) { 4954 /* 4955 * The SCRIPTS processor stopped before starting 4956 * the next command in order to allow us to perform 4957 * some task recovery. 4958 */ 4959 case SIR_SCRIPT_STOPPED: 4960 /* 4961 * Do we have any target to reset or unit to clear ? 4962 */ 4963 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 4964 tp = &np->target[i]; 4965 if (tp->to_reset || 4966 (tp->lun0p && tp->lun0p->to_clear)) { 4967 target = i; 4968 break; 4969 } 4970 if (!tp->lunmp) 4971 continue; 4972 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { 4973 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { 4974 target = i; 4975 break; 4976 } 4977 } 4978 if (target != -1) 4979 break; 4980 } 4981 4982 /* 4983 * If not, walk the busy queue for any 4984 * disconnected CCB to be aborted. 4985 */ 4986 if (target == -1) { 4987 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 4988 cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); 4989 if (cp->host_status != HS_DISCONNECT) 4990 continue; 4991 if (cp->to_abort) { 4992 target = cp->target; 4993 break; 4994 } 4995 } 4996 } 4997 4998 /* 4999 * If some target is to be selected, 5000 * prepare and start the selection. 5001 */ 5002 if (target != -1) { 5003 tp = &np->target[target]; 5004 np->abrt_sel.sel_id = target; 5005 np->abrt_sel.sel_scntl3 = tp->head.wval; 5006 np->abrt_sel.sel_sxfer = tp->head.sval; 5007 OUTL(nc_dsa, np->hcb_ba); 5008 OUTL (nc_dsp, SCRIPTB_BA (np, sel_for_abort)); 5009 return; 5010 } 5011 5012 /* 5013 * Now look for a CCB to abort that haven't started yet. 5014 * Btw, the SCRIPTS processor is still stopped, so 5015 * we are not in race. 5016 */ 5017 i = 0; 5018 cp = 0; 5019 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 5020 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 5021 if (cp->host_status != HS_BUSY && 5022 cp->host_status != HS_NEGOTIATE) 5023 continue; 5024 if (!cp->to_abort) 5025 continue; 5026 #ifdef SYM_CONF_IARB_SUPPORT 5027 /* 5028 * If we are using IMMEDIATE ARBITRATION, we donnot 5029 * want to cancel the last queued CCB, since the 5030 * SCRIPTS may have anticipated the selection. 5031 */ 5032 if (cp == np->last_cp) { 5033 cp->to_abort = 0; 5034 continue; 5035 } 5036 #endif 5037 i = 1; /* Means we have found some */ 5038 break; 5039 } 5040 if (!i) { 5041 /* 5042 * We are done, so we donnot need 5043 * to synchronize with the SCRIPTS anylonger. 5044 * Remove the SEM flag from the ISTAT. 5045 */ 5046 np->istat_sem = 0; 5047 OUTB (nc_istat, SIGP); 5048 break; 5049 } 5050 /* 5051 * Compute index of next position in the start 5052 * queue the SCRIPTS intends to start and dequeue 5053 * all CCBs for that device that haven't been started. 5054 */ 5055 i = (INL (nc_scratcha) - np->squeue_ba) / 4; 5056 i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); 5057 5058 /* 5059 * Make sure at least our IO to abort has been dequeued. 5060 */ 5061 assert(i && sym_get_cam_status(cp->cam_ccb) == CAM_REQUEUE_REQ); 5062 5063 /* 5064 * Keep track in cam status of the reason of the abort. 5065 */ 5066 if (cp->to_abort == 2) 5067 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); 5068 else 5069 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); 5070 5071 /* 5072 * Complete with error everything that we have dequeued. 5073 */ 5074 sym_flush_comp_queue(np, 0); 5075 break; 5076 /* 5077 * The SCRIPTS processor has selected a target 5078 * we may have some manual recovery to perform for. 5079 */ 5080 case SIR_TARGET_SELECTED: 5081 target = (INB (nc_sdid) & 0xf); 5082 tp = &np->target[target]; 5083 5084 np->abrt_tbl.addr = vtobus(np->abrt_msg); 5085 5086 /* 5087 * If the target is to be reset, prepare a 5088 * M_RESET message and clear the to_reset flag 5089 * since we donnot expect this operation to fail. 5090 */ 5091 if (tp->to_reset) { 5092 np->abrt_msg[0] = M_RESET; 5093 np->abrt_tbl.size = 1; 5094 tp->to_reset = 0; 5095 break; 5096 } 5097 5098 /* 5099 * Otherwise, look for some logical unit to be cleared. 5100 */ 5101 if (tp->lun0p && tp->lun0p->to_clear) 5102 lun = 0; 5103 else if (tp->lunmp) { 5104 for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) { 5105 if (tp->lunmp[k] && tp->lunmp[k]->to_clear) { 5106 lun = k; 5107 break; 5108 } 5109 } 5110 } 5111 5112 /* 5113 * If a logical unit is to be cleared, prepare 5114 * an IDENTIFY(lun) + ABORT MESSAGE. 5115 */ 5116 if (lun != -1) { 5117 lcb_p lp = sym_lp(np, tp, lun); 5118 lp->to_clear = 0; /* We donnot expect to fail here */ 5119 np->abrt_msg[0] = M_IDENTIFY | lun; 5120 np->abrt_msg[1] = M_ABORT; 5121 np->abrt_tbl.size = 2; 5122 break; 5123 } 5124 5125 /* 5126 * Otherwise, look for some disconnected job to 5127 * abort for this target. 5128 */ 5129 i = 0; 5130 cp = 0; 5131 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 5132 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 5133 if (cp->host_status != HS_DISCONNECT) 5134 continue; 5135 if (cp->target != target) 5136 continue; 5137 if (!cp->to_abort) 5138 continue; 5139 i = 1; /* Means we have some */ 5140 break; 5141 } 5142 5143 /* 5144 * If we have none, probably since the device has 5145 * completed the command before we won abitration, 5146 * send a M_ABORT message without IDENTIFY. 5147 * According to the specs, the device must just 5148 * disconnect the BUS and not abort any task. 5149 */ 5150 if (!i) { 5151 np->abrt_msg[0] = M_ABORT; 5152 np->abrt_tbl.size = 1; 5153 break; 5154 } 5155 5156 /* 5157 * We have some task to abort. 5158 * Set the IDENTIFY(lun) 5159 */ 5160 np->abrt_msg[0] = M_IDENTIFY | cp->lun; 5161 5162 /* 5163 * If we want to abort an untagged command, we 5164 * will send a IDENTIFY + M_ABORT. 5165 * Otherwise (tagged command), we will send 5166 * a IDENTITFY + task attributes + ABORT TAG. 5167 */ 5168 if (cp->tag == NO_TAG) { 5169 np->abrt_msg[1] = M_ABORT; 5170 np->abrt_tbl.size = 2; 5171 } 5172 else { 5173 np->abrt_msg[1] = cp->scsi_smsg[1]; 5174 np->abrt_msg[2] = cp->scsi_smsg[2]; 5175 np->abrt_msg[3] = M_ABORT_TAG; 5176 np->abrt_tbl.size = 4; 5177 } 5178 /* 5179 * Keep track of software timeout condition, since the 5180 * peripheral driver may not count retries on abort 5181 * conditions not due to timeout. 5182 */ 5183 if (cp->to_abort == 2) 5184 sym_set_cam_status(cp->cam_ccb, CAM_CMD_TIMEOUT); 5185 cp->to_abort = 0; /* We donnot expect to fail here */ 5186 break; 5187 5188 /* 5189 * The target has accepted our message and switched 5190 * to BUS FREE phase as we expected. 5191 */ 5192 case SIR_ABORT_SENT: 5193 target = (INB (nc_sdid) & 0xf); 5194 tp = &np->target[target]; 5195 5196 /* 5197 ** If we didn't abort anything, leave here. 5198 */ 5199 if (np->abrt_msg[0] == M_ABORT) 5200 break; 5201 5202 /* 5203 * If we sent a M_RESET, then a hardware reset has 5204 * been performed by the target. 5205 * - Reset everything to async 8 bit 5206 * - Tell ourself to negotiate next time :-) 5207 * - Prepare to clear all disconnected CCBs for 5208 * this target from our task list (lun=task=-1) 5209 */ 5210 lun = -1; 5211 task = -1; 5212 if (np->abrt_msg[0] == M_RESET) { 5213 tp->head.sval = 0; 5214 tp->head.wval = np->rv_scntl3; 5215 tp->head.uval = 0; 5216 tp->tinfo.current.period = 0; 5217 tp->tinfo.current.offset = 0; 5218 tp->tinfo.current.width = BUS_8_BIT; 5219 tp->tinfo.current.options = 0; 5220 } 5221 5222 /* 5223 * Otherwise, check for the LUN and TASK(s) 5224 * concerned by the cancelation. 5225 * If it is not ABORT_TAG then it is CLEAR_QUEUE 5226 * or an ABORT message :-) 5227 */ 5228 else { 5229 lun = np->abrt_msg[0] & 0x3f; 5230 if (np->abrt_msg[1] == M_ABORT_TAG) 5231 task = np->abrt_msg[2]; 5232 } 5233 5234 /* 5235 * Complete all the CCBs the device should have 5236 * aborted due to our 'kiss of death' message. 5237 */ 5238 i = (INL (nc_scratcha) - np->squeue_ba) / 4; 5239 (void) sym_dequeue_from_squeue(np, i, target, lun, -1); 5240 (void) sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task); 5241 sym_flush_comp_queue(np, 0); 5242 5243 /* 5244 * If we sent a BDR, make uper layer aware of that. 5245 */ 5246 if (np->abrt_msg[0] == M_RESET) 5247 xpt_async(AC_SENT_BDR, np->path, NULL); 5248 break; 5249 } 5250 5251 /* 5252 * Print to the log the message we intend to send. 5253 */ 5254 if (num == SIR_TARGET_SELECTED) { 5255 PRINT_TARGET(np, target); 5256 sym_printl_hex("control msgout:", np->abrt_msg, 5257 np->abrt_tbl.size); 5258 np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); 5259 } 5260 5261 /* 5262 * Let the SCRIPTS processor continue. 5263 */ 5264 OUTONB (nc_dcntl, (STD|NOCOM)); 5265 } 5266 5267 /* 5268 * Gerard's alchemy:) that deals with with the data 5269 * pointer for both MDP and the residual calculation. 5270 * 5271 * I didn't want to bloat the code by more than 200 5272 * lignes for the handling of both MDP and the residual. 5273 * This has been achieved by using a data pointer 5274 * representation consisting in an index in the data 5275 * array (dp_sg) and a negative offset (dp_ofs) that 5276 * have the following meaning: 5277 * 5278 * - dp_sg = SYM_CONF_MAX_SG 5279 * we are at the end of the data script. 5280 * - dp_sg < SYM_CONF_MAX_SG 5281 * dp_sg points to the next entry of the scatter array 5282 * we want to transfer. 5283 * - dp_ofs < 0 5284 * dp_ofs represents the residual of bytes of the 5285 * previous entry scatter entry we will send first. 5286 * - dp_ofs = 0 5287 * no residual to send first. 5288 * 5289 * The function sym_evaluate_dp() accepts an arbitray 5290 * offset (basically from the MDP message) and returns 5291 * the corresponding values of dp_sg and dp_ofs. 5292 */ 5293 5294 static int sym_evaluate_dp(hcb_p np, ccb_p cp, u32 scr, int *ofs) 5295 { 5296 u32 dp_scr; 5297 int dp_ofs, dp_sg, dp_sgmin; 5298 int tmp; 5299 struct sym_pmc *pm; 5300 5301 /* 5302 * Compute the resulted data pointer in term of a script 5303 * address within some DATA script and a signed byte offset. 5304 */ 5305 dp_scr = scr; 5306 dp_ofs = *ofs; 5307 if (dp_scr == SCRIPTA_BA (np, pm0_data)) 5308 pm = &cp->phys.pm0; 5309 else if (dp_scr == SCRIPTA_BA (np, pm1_data)) 5310 pm = &cp->phys.pm1; 5311 else 5312 pm = 0; 5313 5314 if (pm) { 5315 dp_scr = scr_to_cpu(pm->ret); 5316 dp_ofs -= scr_to_cpu(pm->sg.size); 5317 } 5318 5319 /* 5320 * If we are auto-sensing, then we are done. 5321 */ 5322 if (cp->host_flags & HF_SENSE) { 5323 *ofs = dp_ofs; 5324 return 0; 5325 } 5326 5327 /* 5328 * Deduce the index of the sg entry. 5329 * Keep track of the index of the first valid entry. 5330 * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the 5331 * end of the data. 5332 */ 5333 tmp = scr_to_cpu(cp->phys.head.goalp); 5334 dp_sg = SYM_CONF_MAX_SG; 5335 if (dp_scr != tmp) 5336 dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); 5337 dp_sgmin = SYM_CONF_MAX_SG - cp->segments; 5338 5339 /* 5340 * Move to the sg entry the data pointer belongs to. 5341 * 5342 * If we are inside the data area, we expect result to be: 5343 * 5344 * Either, 5345 * dp_ofs = 0 and dp_sg is the index of the sg entry 5346 * the data pointer belongs to (or the end of the data) 5347 * Or, 5348 * dp_ofs < 0 and dp_sg is the index of the sg entry 5349 * the data pointer belongs to + 1. 5350 */ 5351 if (dp_ofs < 0) { 5352 int n; 5353 while (dp_sg > dp_sgmin) { 5354 --dp_sg; 5355 tmp = scr_to_cpu(cp->phys.data[dp_sg].size); 5356 n = dp_ofs + (tmp & 0xffffff); 5357 if (n > 0) { 5358 ++dp_sg; 5359 break; 5360 } 5361 dp_ofs = n; 5362 } 5363 } 5364 else if (dp_ofs > 0) { 5365 while (dp_sg < SYM_CONF_MAX_SG) { 5366 tmp = scr_to_cpu(cp->phys.data[dp_sg].size); 5367 dp_ofs -= (tmp & 0xffffff); 5368 ++dp_sg; 5369 if (dp_ofs <= 0) 5370 break; 5371 } 5372 } 5373 5374 /* 5375 * Make sure the data pointer is inside the data area. 5376 * If not, return some error. 5377 */ 5378 if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) 5379 goto out_err; 5380 else if (dp_sg > SYM_CONF_MAX_SG || 5381 (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0)) 5382 goto out_err; 5383 5384 /* 5385 * Save the extreme pointer if needed. 5386 */ 5387 if (dp_sg > cp->ext_sg || 5388 (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { 5389 cp->ext_sg = dp_sg; 5390 cp->ext_ofs = dp_ofs; 5391 } 5392 5393 /* 5394 * Return data. 5395 */ 5396 *ofs = dp_ofs; 5397 return dp_sg; 5398 5399 out_err: 5400 return -1; 5401 } 5402 5403 /* 5404 * chip handler for MODIFY DATA POINTER MESSAGE 5405 * 5406 * We also call this function on IGNORE WIDE RESIDUE 5407 * messages that do not match a SWIDE full condition. 5408 * Btw, we assume in that situation that such a message 5409 * is equivalent to a MODIFY DATA POINTER (offset=-1). 5410 */ 5411 5412 static void sym_modify_dp(hcb_p np, tcb_p tp, ccb_p cp, int ofs) 5413 { 5414 int dp_ofs = ofs; 5415 u32 dp_scr = INL (nc_temp); 5416 u32 dp_ret; 5417 u32 tmp; 5418 u_char hflags; 5419 int dp_sg; 5420 struct sym_pmc *pm; 5421 5422 /* 5423 * Not supported for auto-sense. 5424 */ 5425 if (cp->host_flags & HF_SENSE) 5426 goto out_reject; 5427 5428 /* 5429 * Apply our alchemy:) (see comments in sym_evaluate_dp()), 5430 * to the resulted data pointer. 5431 */ 5432 dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs); 5433 if (dp_sg < 0) 5434 goto out_reject; 5435 5436 /* 5437 * And our alchemy:) allows to easily calculate the data 5438 * script address we want to return for the next data phase. 5439 */ 5440 dp_ret = cpu_to_scr(cp->phys.head.goalp); 5441 dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); 5442 5443 /* 5444 * If offset / scatter entry is zero we donnot need 5445 * a context for the new current data pointer. 5446 */ 5447 if (dp_ofs == 0) { 5448 dp_scr = dp_ret; 5449 goto out_ok; 5450 } 5451 5452 /* 5453 * Get a context for the new current data pointer. 5454 */ 5455 hflags = INB (HF_PRT); 5456 5457 if (hflags & HF_DP_SAVED) 5458 hflags ^= HF_ACT_PM; 5459 5460 if (!(hflags & HF_ACT_PM)) { 5461 pm = &cp->phys.pm0; 5462 dp_scr = SCRIPTA_BA (np, pm0_data); 5463 } 5464 else { 5465 pm = &cp->phys.pm1; 5466 dp_scr = SCRIPTA_BA (np, pm1_data); 5467 } 5468 5469 hflags &= ~(HF_DP_SAVED); 5470 5471 OUTB (HF_PRT, hflags); 5472 5473 /* 5474 * Set up the new current data pointer. 5475 * ofs < 0 there, and for the next data phase, we 5476 * want to transfer part of the data of the sg entry 5477 * corresponding to index dp_sg-1 prior to returning 5478 * to the main data script. 5479 */ 5480 pm->ret = cpu_to_scr(dp_ret); 5481 tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr); 5482 tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs; 5483 pm->sg.addr = cpu_to_scr(tmp); 5484 pm->sg.size = cpu_to_scr(-dp_ofs); 5485 5486 out_ok: 5487 OUTL (nc_temp, dp_scr); 5488 OUTL (nc_dsp, SCRIPTA_BA (np, clrack)); 5489 return; 5490 5491 out_reject: 5492 OUTL (nc_dsp, SCRIPTB_BA (np, msg_bad)); 5493 } 5494 5495 5496 /* 5497 * chip calculation of the data residual. 5498 * 5499 * As I used to say, the requirement of data residual 5500 * in SCSI is broken, useless and cannot be achieved 5501 * without huge complexity. 5502 * But most OSes and even the official CAM require it. 5503 * When stupidity happens to be so widely spread inside 5504 * a community, it gets hard to convince. 5505 * 5506 * Anyway, I don't care, since I am not going to use 5507 * any software that considers this data residual as 5508 * a relevant information. :) 5509 */ 5510 5511 static int sym_compute_residual(hcb_p np, ccb_p cp) 5512 { 5513 int dp_sg, dp_sgmin, resid = 0; 5514 int dp_ofs = 0; 5515 5516 /* 5517 * Check for some data lost or just thrown away. 5518 * We are not required to be quite accurate in this 5519 * situation. Btw, if we are odd for output and the 5520 * device claims some more data, it may well happen 5521 * than our residual be zero. :-) 5522 */ 5523 if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) { 5524 if (cp->xerr_status & XE_EXTRA_DATA) 5525 resid -= cp->extra_bytes; 5526 if (cp->xerr_status & XE_SODL_UNRUN) 5527 ++resid; 5528 if (cp->xerr_status & XE_SWIDE_OVRUN) 5529 --resid; 5530 } 5531 5532 /* 5533 * If all data has been transferred, 5534 * there is no residual. 5535 */ 5536 if (cp->phys.head.lastp == cp->phys.head.goalp) 5537 return resid; 5538 5539 /* 5540 * If no data transfer occurs, or if the data 5541 * pointer is weird, return full residual. 5542 */ 5543 if (cp->startp == cp->phys.head.lastp || 5544 sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp), 5545 &dp_ofs) < 0) { 5546 return cp->data_len; 5547 } 5548 5549 /* 5550 * If we were auto-sensing, then we are done. 5551 */ 5552 if (cp->host_flags & HF_SENSE) { 5553 return -dp_ofs; 5554 } 5555 5556 /* 5557 * We are now full comfortable in the computation 5558 * of the data residual (2's complement). 5559 */ 5560 dp_sgmin = SYM_CONF_MAX_SG - cp->segments; 5561 resid = -cp->ext_ofs; 5562 for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { 5563 u_long tmp = scr_to_cpu(cp->phys.data[dp_sg].size); 5564 resid += (tmp & 0xffffff); 5565 } 5566 5567 /* 5568 * Hopefully, the result is not too wrong. 5569 */ 5570 return resid; 5571 } 5572 5573 /* 5574 * Print out the content of a SCSI message. 5575 */ 5576 5577 static int sym_show_msg (u_char * msg) 5578 { 5579 u_char i; 5580 printf ("%x",*msg); 5581 if (*msg==M_EXTENDED) { 5582 for (i=1;i<8;i++) { 5583 if (i-1>msg[1]) break; 5584 printf ("-%x",msg[i]); 5585 }; 5586 return (i+1); 5587 } else if ((*msg & 0xf0) == 0x20) { 5588 printf ("-%x",msg[1]); 5589 return (2); 5590 }; 5591 return (1); 5592 } 5593 5594 static void sym_print_msg (ccb_p cp, char *label, u_char *msg) 5595 { 5596 PRINT_ADDR(cp); 5597 if (label) 5598 printf ("%s: ", label); 5599 5600 (void) sym_show_msg (msg); 5601 printf (".\n"); 5602 } 5603 5604 /* 5605 * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. 5606 * 5607 * When we try to negotiate, we append the negotiation message 5608 * to the identify and (maybe) simple tag message. 5609 * The host status field is set to HS_NEGOTIATE to mark this 5610 * situation. 5611 * 5612 * If the target doesn't answer this message immediately 5613 * (as required by the standard), the SIR_NEGO_FAILED interrupt 5614 * will be raised eventually. 5615 * The handler removes the HS_NEGOTIATE status, and sets the 5616 * negotiated value to the default (async / nowide). 5617 * 5618 * If we receive a matching answer immediately, we check it 5619 * for validity, and set the values. 5620 * 5621 * If we receive a Reject message immediately, we assume the 5622 * negotiation has failed, and fall back to standard values. 5623 * 5624 * If we receive a negotiation message while not in HS_NEGOTIATE 5625 * state, it's a target initiated negotiation. We prepare a 5626 * (hopefully) valid answer, set our parameters, and send back 5627 * this answer to the target. 5628 * 5629 * If the target doesn't fetch the answer (no message out phase), 5630 * we assume the negotiation has failed, and fall back to default 5631 * settings (SIR_NEGO_PROTO interrupt). 5632 * 5633 * When we set the values, we adjust them in all ccbs belonging 5634 * to this target, in the controller's register, and in the "phys" 5635 * field of the controller's struct sym_hcb. 5636 */ 5637 5638 /* 5639 * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message. 5640 */ 5641 static void sym_sync_nego(hcb_p np, tcb_p tp, ccb_p cp) 5642 { 5643 u_char chg, ofs, per, fak, div; 5644 int req = 1; 5645 5646 /* 5647 * Synchronous request message received. 5648 */ 5649 if (DEBUG_FLAGS & DEBUG_NEGO) { 5650 sym_print_msg(cp, "sync msgin", np->msgin); 5651 }; 5652 5653 /* 5654 * request or answer ? 5655 */ 5656 if (INB (HS_PRT) == HS_NEGOTIATE) { 5657 OUTB (HS_PRT, HS_BUSY); 5658 if (cp->nego_status && cp->nego_status != NS_SYNC) 5659 goto reject_it; 5660 req = 0; 5661 } 5662 5663 /* 5664 * get requested values. 5665 */ 5666 chg = 0; 5667 per = np->msgin[3]; 5668 ofs = np->msgin[4]; 5669 5670 /* 5671 * check values against our limits. 5672 */ 5673 if (ofs) { 5674 if (ofs > np->maxoffs) 5675 {chg = 1; ofs = np->maxoffs;} 5676 if (req) { 5677 if (ofs > tp->tinfo.user.offset) 5678 {chg = 1; ofs = tp->tinfo.user.offset;} 5679 } 5680 } 5681 5682 if (ofs) { 5683 if (per < np->minsync) 5684 {chg = 1; per = np->minsync;} 5685 if (req) { 5686 if (per < tp->tinfo.user.period) 5687 {chg = 1; per = tp->tinfo.user.period;} 5688 } 5689 } 5690 5691 div = fak = 0; 5692 if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0) 5693 goto reject_it; 5694 5695 if (DEBUG_FLAGS & DEBUG_NEGO) { 5696 PRINT_ADDR(cp); 5697 printf ("sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n", 5698 ofs, per, div, fak, chg); 5699 } 5700 5701 /* 5702 * This was an answer message 5703 */ 5704 if (req == 0) { 5705 if (chg) /* Answer wasn't acceptable. */ 5706 goto reject_it; 5707 sym_setsync (np, cp, ofs, per, div, fak); 5708 OUTL (nc_dsp, SCRIPTA_BA (np, clrack)); 5709 return; 5710 } 5711 5712 /* 5713 * It was a request. Set value and 5714 * prepare an answer message 5715 */ 5716 sym_setsync (np, cp, ofs, per, div, fak); 5717 5718 np->msgout[0] = M_EXTENDED; 5719 np->msgout[1] = 3; 5720 np->msgout[2] = M_X_SYNC_REQ; 5721 np->msgout[3] = per; 5722 np->msgout[4] = ofs; 5723 5724 cp->nego_status = NS_SYNC; 5725 5726 if (DEBUG_FLAGS & DEBUG_NEGO) { 5727 sym_print_msg(cp, "sync msgout", np->msgout); 5728 } 5729 5730 np->msgin [0] = M_NOOP; 5731 5732 OUTL (nc_dsp, SCRIPTB_BA (np, sdtr_resp)); 5733 return; 5734 reject_it: 5735 sym_setsync (np, cp, 0, 0, 0, 0); 5736 OUTL (nc_dsp, SCRIPTB_BA (np, msg_bad)); 5737 } 5738 5739 /* 5740 * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message. 5741 */ 5742 static void sym_ppr_nego(hcb_p np, tcb_p tp, ccb_p cp) 5743 { 5744 u_char chg, ofs, per, fak, dt, div, wide; 5745 int req = 1; 5746 5747 /* 5748 * Synchronous request message received. 5749 */ 5750 if (DEBUG_FLAGS & DEBUG_NEGO) { 5751 sym_print_msg(cp, "ppr msgin", np->msgin); 5752 }; 5753 5754 /* 5755 * request or answer ? 5756 */ 5757 if (INB (HS_PRT) == HS_NEGOTIATE) { 5758 OUTB (HS_PRT, HS_BUSY); 5759 if (cp->nego_status && cp->nego_status != NS_PPR) 5760 goto reject_it; 5761 req = 0; 5762 } 5763 5764 /* 5765 * get requested values. 5766 */ 5767 chg = 0; 5768 per = np->msgin[3]; 5769 ofs = np->msgin[5]; 5770 wide = np->msgin[6]; 5771 dt = np->msgin[7] & PPR_OPT_DT; 5772 5773 /* 5774 * check values against our limits. 5775 */ 5776 if (wide > np->maxwide) 5777 {chg = 1; wide = np->maxwide;} 5778 if (!wide || !(np->features & FE_ULTRA3)) 5779 dt &= ~PPR_OPT_DT; 5780 if (req) { 5781 if (wide > tp->tinfo.user.width) 5782 {chg = 1; wide = tp->tinfo.user.width;} 5783 } 5784 5785 if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */ 5786 dt &= ~PPR_OPT_DT; 5787 5788 if (dt != (np->msgin[7] & PPR_OPT_MASK)) chg = 1; 5789 5790 if (ofs) { 5791 if (ofs > np->maxoffs) 5792 {chg = 1; ofs = np->maxoffs;} 5793 if (req) { 5794 if (ofs > tp->tinfo.user.offset) 5795 {chg = 1; ofs = tp->tinfo.user.offset;} 5796 } 5797 } 5798 5799 if (ofs) { 5800 if (dt) { 5801 if (per < np->minsync_dt) 5802 {chg = 1; per = np->minsync_dt;} 5803 } 5804 else if (per < np->minsync) 5805 {chg = 1; per = np->minsync;} 5806 if (req) { 5807 if (per < tp->tinfo.user.period) 5808 {chg = 1; per = tp->tinfo.user.period;} 5809 } 5810 } 5811 5812 div = fak = 0; 5813 if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0) 5814 goto reject_it; 5815 5816 if (DEBUG_FLAGS & DEBUG_NEGO) { 5817 PRINT_ADDR(cp); 5818 printf ("ppr: " 5819 "dt=%x ofs=%d per=%d wide=%d div=%d fak=%d chg=%d.\n", 5820 dt, ofs, per, wide, div, fak, chg); 5821 } 5822 5823 /* 5824 * It was an answer. 5825 */ 5826 if (req == 0) { 5827 if (chg) /* Answer wasn't acceptable */ 5828 goto reject_it; 5829 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); 5830 OUTL (nc_dsp, SCRIPTA_BA (np, clrack)); 5831 return; 5832 } 5833 5834 /* 5835 * It was a request. Set value and 5836 * prepare an answer message 5837 */ 5838 sym_setpprot (np, cp, dt, ofs, per, wide, div, fak); 5839 5840 np->msgout[0] = M_EXTENDED; 5841 np->msgout[1] = 6; 5842 np->msgout[2] = M_X_PPR_REQ; 5843 np->msgout[3] = per; 5844 np->msgout[4] = 0; 5845 np->msgout[5] = ofs; 5846 np->msgout[6] = wide; 5847 np->msgout[7] = dt; 5848 5849 cp->nego_status = NS_PPR; 5850 5851 if (DEBUG_FLAGS & DEBUG_NEGO) { 5852 sym_print_msg(cp, "ppr msgout", np->msgout); 5853 } 5854 5855 np->msgin [0] = M_NOOP; 5856 5857 OUTL (nc_dsp, SCRIPTB_BA (np, ppr_resp)); 5858 return; 5859 reject_it: 5860 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); 5861 OUTL (nc_dsp, SCRIPTB_BA (np, msg_bad)); 5862 } 5863 5864 /* 5865 * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message. 5866 */ 5867 static void sym_wide_nego(hcb_p np, tcb_p tp, ccb_p cp) 5868 { 5869 u_char chg, wide; 5870 int req = 1; 5871 5872 /* 5873 * Wide request message received. 5874 */ 5875 if (DEBUG_FLAGS & DEBUG_NEGO) { 5876 sym_print_msg(cp, "wide msgin", np->msgin); 5877 }; 5878 5879 /* 5880 * Is it an request from the device? 5881 */ 5882 if (INB (HS_PRT) == HS_NEGOTIATE) { 5883 OUTB (HS_PRT, HS_BUSY); 5884 if (cp->nego_status && cp->nego_status != NS_WIDE) 5885 goto reject_it; 5886 req = 0; 5887 } 5888 5889 /* 5890 * get requested values. 5891 */ 5892 chg = 0; 5893 wide = np->msgin[3]; 5894 5895 /* 5896 * check values against driver limits. 5897 */ 5898 if (wide > np->maxoffs) 5899 {chg = 1; wide = np->maxoffs;} 5900 if (req) { 5901 if (wide > tp->tinfo.user.width) 5902 {chg = 1; wide = tp->tinfo.user.width;} 5903 } 5904 5905 if (DEBUG_FLAGS & DEBUG_NEGO) { 5906 PRINT_ADDR(cp); 5907 printf ("wdtr: wide=%d chg=%d.\n", wide, chg); 5908 } 5909 5910 /* 5911 * This was an answer message 5912 */ 5913 if (req == 0) { 5914 if (chg) /* Answer wasn't acceptable. */ 5915 goto reject_it; 5916 sym_setwide (np, cp, wide); 5917 5918 /* 5919 * Negotiate for SYNC immediately after WIDE response. 5920 * This allows to negotiate for both WIDE and SYNC on 5921 * a single SCSI command (Suggested by Justin Gibbs). 5922 */ 5923 if (tp->tinfo.goal.offset) { 5924 np->msgout[0] = M_EXTENDED; 5925 np->msgout[1] = 3; 5926 np->msgout[2] = M_X_SYNC_REQ; 5927 np->msgout[3] = tp->tinfo.goal.period; 5928 np->msgout[4] = tp->tinfo.goal.offset; 5929 5930 if (DEBUG_FLAGS & DEBUG_NEGO) { 5931 sym_print_msg(cp, "sync msgout", np->msgout); 5932 } 5933 5934 cp->nego_status = NS_SYNC; 5935 OUTB (HS_PRT, HS_NEGOTIATE); 5936 OUTL (nc_dsp, SCRIPTB_BA (np, sdtr_resp)); 5937 return; 5938 } 5939 5940 OUTL (nc_dsp, SCRIPTA_BA (np, clrack)); 5941 return; 5942 }; 5943 5944 /* 5945 * It was a request, set value and 5946 * prepare an answer message 5947 */ 5948 sym_setwide (np, cp, wide); 5949 5950 np->msgout[0] = M_EXTENDED; 5951 np->msgout[1] = 2; 5952 np->msgout[2] = M_X_WIDE_REQ; 5953 np->msgout[3] = wide; 5954 5955 np->msgin [0] = M_NOOP; 5956 5957 cp->nego_status = NS_WIDE; 5958 5959 if (DEBUG_FLAGS & DEBUG_NEGO) { 5960 sym_print_msg(cp, "wide msgout", np->msgout); 5961 } 5962 5963 OUTL (nc_dsp, SCRIPTB_BA (np, wdtr_resp)); 5964 return; 5965 reject_it: 5966 OUTL (nc_dsp, SCRIPTB_BA (np, msg_bad)); 5967 } 5968 5969 /* 5970 * Reset SYNC or WIDE to default settings. 5971 * 5972 * Called when a negotiation does not succeed either 5973 * on rejection or on protocol error. 5974 */ 5975 static void sym_nego_default(hcb_p np, tcb_p tp, ccb_p cp) 5976 { 5977 /* 5978 * any error in negotiation: 5979 * fall back to default mode. 5980 */ 5981 switch (cp->nego_status) { 5982 case NS_PPR: 5983 sym_setpprot (np, cp, 0, 0, 0, 0, 0, 0); 5984 break; 5985 case NS_SYNC: 5986 sym_setsync (np, cp, 0, 0, 0, 0); 5987 break; 5988 case NS_WIDE: 5989 sym_setwide (np, cp, 0); 5990 break; 5991 }; 5992 np->msgin [0] = M_NOOP; 5993 np->msgout[0] = M_NOOP; 5994 cp->nego_status = 0; 5995 } 5996 5997 /* 5998 * chip handler for MESSAGE REJECT received in response to 5999 * a WIDE or SYNCHRONOUS negotiation. 6000 */ 6001 static void sym_nego_rejected(hcb_p np, tcb_p tp, ccb_p cp) 6002 { 6003 sym_nego_default(np, tp, cp); 6004 OUTB (HS_PRT, HS_BUSY); 6005 } 6006 6007 /* 6008 * chip exception handler for programmed interrupts. 6009 */ 6010 void sym_int_sir (hcb_p np) 6011 { 6012 u_char num = INB (nc_dsps); 6013 u_long dsa = INL (nc_dsa); 6014 ccb_p cp = sym_ccb_from_dsa(np, dsa); 6015 u_char target = INB (nc_sdid) & 0x0f; 6016 tcb_p tp = &np->target[target]; 6017 int tmp; 6018 6019 if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num); 6020 6021 switch (num) { 6022 /* 6023 * Command has been completed with error condition 6024 * or has been auto-sensed. 6025 */ 6026 case SIR_COMPLETE_ERROR: 6027 sym_complete_error(np, cp); 6028 return; 6029 /* 6030 * The C code is currently trying to recover from something. 6031 * Typically, user want to abort some command. 6032 */ 6033 case SIR_SCRIPT_STOPPED: 6034 case SIR_TARGET_SELECTED: 6035 case SIR_ABORT_SENT: 6036 sym_sir_task_recovery(np, num); 6037 return; 6038 /* 6039 * The device didn't go to MSG OUT phase after having 6040 * been selected with ATN. We donnot want to handle 6041 * that. 6042 */ 6043 case SIR_SEL_ATN_NO_MSG_OUT: 6044 printf ("%s:%d: No MSG OUT phase after selection with ATN.\n", 6045 sym_name (np), target); 6046 goto out_stuck; 6047 /* 6048 * The device didn't switch to MSG IN phase after 6049 * having reseleted the initiator. 6050 */ 6051 case SIR_RESEL_NO_MSG_IN: 6052 printf ("%s:%d: No MSG IN phase after reselection.\n", 6053 sym_name (np), target); 6054 goto out_stuck; 6055 /* 6056 * After reselection, the device sent a message that wasn't 6057 * an IDENTIFY. 6058 */ 6059 case SIR_RESEL_NO_IDENTIFY: 6060 printf ("%s:%d: No IDENTIFY after reselection.\n", 6061 sym_name (np), target); 6062 goto out_stuck; 6063 /* 6064 * The device reselected a LUN we donnot know about. 6065 */ 6066 case SIR_RESEL_BAD_LUN: 6067 np->msgout[0] = M_RESET; 6068 goto out; 6069 /* 6070 * The device reselected for an untagged nexus and we 6071 * haven't any. 6072 */ 6073 case SIR_RESEL_BAD_I_T_L: 6074 np->msgout[0] = M_ABORT; 6075 goto out; 6076 /* 6077 * The device reselected for a tagged nexus that we donnot 6078 * have. 6079 */ 6080 case SIR_RESEL_BAD_I_T_L_Q: 6081 np->msgout[0] = M_ABORT_TAG; 6082 goto out; 6083 /* 6084 * The SCRIPTS let us know that the device has grabbed 6085 * our message and will abort the job. 6086 */ 6087 case SIR_RESEL_ABORTED: 6088 np->lastmsg = np->msgout[0]; 6089 np->msgout[0] = M_NOOP; 6090 printf ("%s:%d: message %x sent on bad reselection.\n", 6091 sym_name (np), target, np->lastmsg); 6092 goto out; 6093 /* 6094 * The SCRIPTS let us know that a message has been 6095 * successfully sent to the device. 6096 */ 6097 case SIR_MSG_OUT_DONE: 6098 np->lastmsg = np->msgout[0]; 6099 np->msgout[0] = M_NOOP; 6100 /* Should we really care of that */ 6101 if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) { 6102 if (cp) { 6103 cp->xerr_status &= ~XE_PARITY_ERR; 6104 if (!cp->xerr_status) 6105 OUTOFFB (HF_PRT, HF_EXT_ERR); 6106 } 6107 } 6108 goto out; 6109 /* 6110 * The device didn't send a GOOD SCSI status. 6111 * We may have some work to do prior to allow 6112 * the SCRIPTS processor to continue. 6113 */ 6114 case SIR_BAD_SCSI_STATUS: 6115 if (!cp) 6116 goto out; 6117 sym_sir_bad_scsi_status(np, num, cp); 6118 return; 6119 /* 6120 * We are asked by the SCRIPTS to prepare a 6121 * REJECT message. 6122 */ 6123 case SIR_REJECT_TO_SEND: 6124 sym_print_msg(cp, "M_REJECT to send for ", np->msgin); 6125 np->msgout[0] = M_REJECT; 6126 goto out; 6127 /* 6128 * We have been ODD at the end of a DATA IN 6129 * transfer and the device didn't send a 6130 * IGNORE WIDE RESIDUE message. 6131 * It is a data overrun condition. 6132 */ 6133 case SIR_SWIDE_OVERRUN: 6134 if (cp) { 6135 OUTONB (HF_PRT, HF_EXT_ERR); 6136 cp->xerr_status |= XE_SWIDE_OVRUN; 6137 } 6138 goto out; 6139 /* 6140 * We have been ODD at the end of a DATA OUT 6141 * transfer. 6142 * It is a data underrun condition. 6143 */ 6144 case SIR_SODL_UNDERRUN: 6145 if (cp) { 6146 OUTONB (HF_PRT, HF_EXT_ERR); 6147 cp->xerr_status |= XE_SODL_UNRUN; 6148 } 6149 goto out; 6150 /* 6151 * The device wants us to tranfer more data than 6152 * expected or in the wrong direction. 6153 * The number of extra bytes is in scratcha. 6154 * It is a data overrun condition. 6155 */ 6156 case SIR_DATA_OVERRUN: 6157 if (cp) { 6158 OUTONB (HF_PRT, HF_EXT_ERR); 6159 cp->xerr_status |= XE_EXTRA_DATA; 6160 cp->extra_bytes += INL (nc_scratcha); 6161 } 6162 goto out; 6163 /* 6164 * The device switched to an illegal phase (4/5). 6165 */ 6166 case SIR_BAD_PHASE: 6167 if (cp) { 6168 OUTONB (HF_PRT, HF_EXT_ERR); 6169 cp->xerr_status |= XE_BAD_PHASE; 6170 } 6171 goto out; 6172 /* 6173 * We received a message. 6174 */ 6175 case SIR_MSG_RECEIVED: 6176 if (!cp) 6177 goto out_stuck; 6178 switch (np->msgin [0]) { 6179 /* 6180 * We received an extended message. 6181 * We handle MODIFY DATA POINTER, SDTR, WDTR 6182 * and reject all other extended messages. 6183 */ 6184 case M_EXTENDED: 6185 switch (np->msgin [2]) { 6186 case M_X_MODIFY_DP: 6187 if (DEBUG_FLAGS & DEBUG_POINTER) 6188 sym_print_msg(cp,"modify DP",np->msgin); 6189 tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + 6190 (np->msgin[5]<<8) + (np->msgin[6]); 6191 sym_modify_dp(np, tp, cp, tmp); 6192 return; 6193 case M_X_SYNC_REQ: 6194 sym_sync_nego(np, tp, cp); 6195 return; 6196 case M_X_PPR_REQ: 6197 sym_ppr_nego(np, tp, cp); 6198 return; 6199 case M_X_WIDE_REQ: 6200 sym_wide_nego(np, tp, cp); 6201 return; 6202 default: 6203 goto out_reject; 6204 } 6205 break; 6206 /* 6207 * We received a 1/2 byte message not handled from SCRIPTS. 6208 * We are only expecting MESSAGE REJECT and IGNORE WIDE 6209 * RESIDUE messages that haven't been anticipated by 6210 * SCRIPTS on SWIDE full condition. Unanticipated IGNORE 6211 * WIDE RESIDUE messages are aliased as MODIFY DP (-1). 6212 */ 6213 case M_IGN_RESIDUE: 6214 if (DEBUG_FLAGS & DEBUG_POINTER) 6215 sym_print_msg(cp,"ign wide residue", np->msgin); 6216 sym_modify_dp(np, tp, cp, -1); 6217 return; 6218 case M_REJECT: 6219 if (INB (HS_PRT) == HS_NEGOTIATE) 6220 sym_nego_rejected(np, tp, cp); 6221 else { 6222 PRINT_ADDR(cp); 6223 printf ("M_REJECT received (%x:%x).\n", 6224 scr_to_cpu(np->lastmsg), np->msgout[0]); 6225 } 6226 goto out_clrack; 6227 break; 6228 default: 6229 goto out_reject; 6230 } 6231 break; 6232 /* 6233 * We received an unknown message. 6234 * Ignore all MSG IN phases and reject it. 6235 */ 6236 case SIR_MSG_WEIRD: 6237 sym_print_msg(cp, "WEIRD message received", np->msgin); 6238 OUTL (nc_dsp, SCRIPTB_BA (np, msg_weird)); 6239 return; 6240 /* 6241 * Negotiation failed. 6242 * Target does not send us the reply. 6243 * Remove the HS_NEGOTIATE status. 6244 */ 6245 case SIR_NEGO_FAILED: 6246 OUTB (HS_PRT, HS_BUSY); 6247 /* 6248 * Negotiation failed. 6249 * Target does not want answer message. 6250 */ 6251 case SIR_NEGO_PROTO: 6252 sym_nego_default(np, tp, cp); 6253 goto out; 6254 }; 6255 6256 out: 6257 OUTONB (nc_dcntl, (STD|NOCOM)); 6258 return; 6259 out_reject: 6260 OUTL (nc_dsp, SCRIPTB_BA (np, msg_bad)); 6261 return; 6262 out_clrack: 6263 OUTL (nc_dsp, SCRIPTA_BA (np, clrack)); 6264 return; 6265 out_stuck: 6266 } 6267 6268 /* 6269 * Acquire a control block 6270 */ 6271 static ccb_p sym_get_ccb (hcb_p np, u_char tn, u_char ln, u_char tag_order) 6272 { 6273 tcb_p tp = &np->target[tn]; 6274 lcb_p lp = sym_lp(np, tp, ln); 6275 u_short tag = NO_TAG; 6276 SYM_QUEHEAD *qp; 6277 ccb_p cp = (ccb_p) 0; 6278 6279 /* 6280 * Look for a free CCB 6281 */ 6282 if (sym_que_empty(&np->free_ccbq)) 6283 (void) sym_alloc_ccb(np); 6284 qp = sym_remque_head(&np->free_ccbq); 6285 if (!qp) 6286 goto out; 6287 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 6288 6289 /* 6290 * If the LCB is not yet available and the LUN 6291 * has been probed ok, try to allocate the LCB. 6292 */ 6293 if (!lp && sym_is_bit(tp->lun_map, ln)) { 6294 lp = sym_alloc_lcb(np, tn, ln); 6295 if (!lp) 6296 goto out_free; 6297 } 6298 6299 /* 6300 * If the LCB is not available here, then the 6301 * logical unit is not yet discovered. For those 6302 * ones only accept 1 SCSI IO per logical unit, 6303 * since we cannot allow disconnections. 6304 */ 6305 if (!lp) { 6306 if (!sym_is_bit(tp->busy0_map, ln)) 6307 sym_set_bit(tp->busy0_map, ln); 6308 else 6309 goto out_free; 6310 } else { 6311 /* 6312 * If we have been asked for a tagged command. 6313 */ 6314 if (tag_order) { 6315 /* 6316 * Debugging purpose. 6317 */ 6318 assert(lp->busy_itl == 0); 6319 /* 6320 * Allocate resources for tags if not yet. 6321 */ 6322 if (!lp->cb_tags) { 6323 sym_alloc_lcb_tags(np, tn, ln); 6324 if (!lp->cb_tags) 6325 goto out_free; 6326 } 6327 /* 6328 * Get a tag for this SCSI IO and set up 6329 * the CCB bus address for reselection, 6330 * and count it for this LUN. 6331 * Toggle reselect path to tagged. 6332 */ 6333 if (lp->busy_itlq < SYM_CONF_MAX_TASK) { 6334 tag = lp->cb_tags[lp->ia_tag]; 6335 if (++lp->ia_tag == SYM_CONF_MAX_TASK) 6336 lp->ia_tag = 0; 6337 lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba); 6338 ++lp->busy_itlq; 6339 lp->head.resel_sa = 6340 cpu_to_scr(SCRIPTA_BA (np, resel_tag)); 6341 } 6342 else 6343 goto out_free; 6344 } 6345 /* 6346 * This command will not be tagged. 6347 * If we already have either a tagged or untagged 6348 * one, refuse to overlap this untagged one. 6349 */ 6350 else { 6351 /* 6352 * Debugging purpose. 6353 */ 6354 assert(lp->busy_itl == 0 && lp->busy_itlq == 0); 6355 /* 6356 * Count this nexus for this LUN. 6357 * Set up the CCB bus address for reselection. 6358 * Toggle reselect path to untagged. 6359 */ 6360 if (++lp->busy_itl == 1) { 6361 lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba); 6362 lp->head.resel_sa = 6363 cpu_to_scr(SCRIPTA_BA (np, resel_no_tag)); 6364 } 6365 else 6366 goto out_free; 6367 } 6368 } 6369 /* 6370 * Put the CCB into the busy queue. 6371 */ 6372 sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq); 6373 6374 /* 6375 * Remember all informations needed to free this CCB. 6376 */ 6377 cp->to_abort = 0; 6378 cp->tag = tag; 6379 cp->target = tn; 6380 cp->lun = ln; 6381 6382 if (DEBUG_FLAGS & DEBUG_TAGS) { 6383 PRINT_LUN(np, tn, ln); 6384 printf ("ccb @%p using tag %d.\n", cp, tag); 6385 } 6386 6387 out: 6388 return cp; 6389 out_free: 6390 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); 6391 return (ccb_p) 0; 6392 } 6393 6394 /* 6395 * Release one control block 6396 */ 6397 static void sym_free_ccb (hcb_p np, ccb_p cp) 6398 { 6399 tcb_p tp = &np->target[cp->target]; 6400 lcb_p lp = sym_lp(np, tp, cp->lun); 6401 6402 if (DEBUG_FLAGS & DEBUG_TAGS) { 6403 PRINT_LUN(np, cp->target, cp->lun); 6404 printf ("ccb @%p freeing tag %d.\n", cp, cp->tag); 6405 } 6406 6407 /* 6408 * If LCB available, 6409 */ 6410 if (lp) { 6411 /* 6412 * If tagged, release the tag, set the relect path 6413 */ 6414 if (cp->tag != NO_TAG) { 6415 /* 6416 * Free the tag value. 6417 */ 6418 lp->cb_tags[lp->if_tag] = cp->tag; 6419 if (++lp->if_tag == SYM_CONF_MAX_TASK) 6420 lp->if_tag = 0; 6421 /* 6422 * Make the reselect path invalid, 6423 * and uncount this CCB. 6424 */ 6425 lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba); 6426 --lp->busy_itlq; 6427 } else { /* Untagged */ 6428 /* 6429 * Make the reselect path invalid, 6430 * and uncount this CCB. 6431 */ 6432 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); 6433 --lp->busy_itl; 6434 } 6435 /* 6436 * If no JOB active, make the LUN reselect path invalid. 6437 */ 6438 if (lp->busy_itlq == 0 && lp->busy_itl == 0) 6439 lp->head.resel_sa = 6440 cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); 6441 } 6442 /* 6443 * Otherwise, we only accept 1 IO per LUN. 6444 * Clear the bit that keeps track of this IO. 6445 */ 6446 else 6447 sym_clr_bit(tp->busy0_map, cp->lun); 6448 6449 /* 6450 * We donnot queue more than 1 ccb per target 6451 * with negotiation at any time. If this ccb was 6452 * used for negotiation, clear this info in the tcb. 6453 */ 6454 if (cp == tp->nego_cp) 6455 tp->nego_cp = 0; 6456 6457 #ifdef SYM_CONF_IARB_SUPPORT 6458 /* 6459 * If we just complete the last queued CCB, 6460 * clear this info that is no longer relevant. 6461 */ 6462 if (cp == np->last_cp) 6463 np->last_cp = 0; 6464 #endif 6465 6466 #ifdef FreeBSD_Bus_Dma_Abstraction 6467 /* 6468 * Unmap user data from DMA map if needed. 6469 */ 6470 if (cp->dmamapped) { 6471 bus_dmamap_unload(np->data_dmat, cp->dmamap); 6472 cp->dmamapped = 0; 6473 } 6474 #endif 6475 6476 /* 6477 * Make this CCB available. 6478 */ 6479 cp->cam_ccb = 0; 6480 cp->host_status = HS_IDLE; 6481 sym_remque(&cp->link_ccbq); 6482 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); 6483 } 6484 6485 /* 6486 * Allocate a CCB from memory and initialize its fixed part. 6487 */ 6488 static ccb_p sym_alloc_ccb(hcb_p np) 6489 { 6490 ccb_p cp = 0; 6491 int hcode; 6492 6493 /* 6494 * Prevent from allocating more CCBs than we can 6495 * queue to the controller. 6496 */ 6497 if (np->actccbs >= SYM_CONF_MAX_START) 6498 return 0; 6499 6500 /* 6501 * Allocate memory for this CCB. 6502 */ 6503 cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB"); 6504 if (!cp) 6505 goto out_free; 6506 6507 /* 6508 * Allocate a bounce buffer for sense data. 6509 */ 6510 cp->sns_bbuf = sym_calloc_dma(SYM_SNS_BBUF_LEN, "SNS_BBUF"); 6511 if (!cp->sns_bbuf) 6512 goto out_free; 6513 6514 /* 6515 * Allocate a map for the DMA of user data. 6516 */ 6517 #ifdef FreeBSD_Bus_Dma_Abstraction 6518 if (bus_dmamap_create(np->data_dmat, 0, &cp->dmamap)) 6519 goto out_free; 6520 #endif 6521 /* 6522 * Count it. 6523 */ 6524 np->actccbs++; 6525 6526 /* 6527 * Compute the bus address of this ccb. 6528 */ 6529 cp->ccb_ba = vtobus(cp); 6530 6531 /* 6532 * Insert this ccb into the hashed list. 6533 */ 6534 hcode = CCB_HASH_CODE(cp->ccb_ba); 6535 cp->link_ccbh = np->ccbh[hcode]; 6536 np->ccbh[hcode] = cp; 6537 6538 /* 6539 * Initialyze the start and restart actions. 6540 */ 6541 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, idle)); 6542 cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); 6543 6544 /* 6545 * Initilialyze some other fields. 6546 */ 6547 cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2])); 6548 6549 /* 6550 * Chain into free ccb queue. 6551 */ 6552 sym_insque_head(&cp->link_ccbq, &np->free_ccbq); 6553 6554 return cp; 6555 out_free: 6556 if (cp) { 6557 if (cp->sns_bbuf) 6558 sym_mfree_dma(cp->sns_bbuf,SYM_SNS_BBUF_LEN,"SNS_BBUF"); 6559 sym_mfree_dma(cp, sizeof(*cp), "CCB"); 6560 } 6561 return 0; 6562 } 6563 6564 /* 6565 * Look up a CCB from a DSA value. 6566 */ 6567 static ccb_p sym_ccb_from_dsa(hcb_p np, u_long dsa) 6568 { 6569 int hcode; 6570 ccb_p cp; 6571 6572 hcode = CCB_HASH_CODE(dsa); 6573 cp = np->ccbh[hcode]; 6574 while (cp) { 6575 if (cp->ccb_ba == dsa) 6576 break; 6577 cp = cp->link_ccbh; 6578 } 6579 6580 return cp; 6581 } 6582 6583 /* 6584 * Target control block initialisation. 6585 * Nothing important to do at the moment. 6586 */ 6587 static void sym_init_tcb (hcb_p np, u_char tn) 6588 { 6589 /* 6590 * Check some alignments required by the chip. 6591 */ 6592 assert (((offsetof(struct sym_reg, nc_sxfer) ^ 6593 offsetof(struct sym_tcb, head.sval)) &3) == 0); 6594 assert (((offsetof(struct sym_reg, nc_scntl3) ^ 6595 offsetof(struct sym_tcb, head.wval)) &3) == 0); 6596 } 6597 6598 /* 6599 * Lun control block allocation and initialization. 6600 */ 6601 static lcb_p sym_alloc_lcb (hcb_p np, u_char tn, u_char ln) 6602 { 6603 tcb_p tp = &np->target[tn]; 6604 lcb_p lp = sym_lp(np, tp, ln); 6605 6606 /* 6607 * Already done, just return. 6608 */ 6609 if (lp) 6610 return lp; 6611 /* 6612 * Check against some race. 6613 */ 6614 assert(!sym_is_bit(tp->busy0_map, ln)); 6615 6616 /* 6617 * Initialize the target control block if not yet. 6618 */ 6619 sym_init_tcb (np, tn); 6620 6621 /* 6622 * Allocate the LCB bus address array. 6623 * Compute the bus address of this table. 6624 */ 6625 if (ln && !tp->luntbl) { 6626 int i; 6627 6628 tp->luntbl = sym_calloc_dma(256, "LUNTBL"); 6629 if (!tp->luntbl) 6630 goto fail; 6631 for (i = 0 ; i < 64 ; i++) 6632 tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); 6633 tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl)); 6634 } 6635 6636 /* 6637 * Allocate the table of pointers for LUN(s) > 0, if needed. 6638 */ 6639 if (ln && !tp->lunmp) { 6640 tp->lunmp = sym_calloc(SYM_CONF_MAX_LUN * sizeof(lcb_p), 6641 "LUNMP"); 6642 if (!tp->lunmp) 6643 goto fail; 6644 } 6645 6646 /* 6647 * Allocate the lcb. 6648 * Make it available to the chip. 6649 */ 6650 lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB"); 6651 if (!lp) 6652 goto fail; 6653 if (ln) { 6654 tp->lunmp[ln] = lp; 6655 tp->luntbl[ln] = cpu_to_scr(vtobus(lp)); 6656 } 6657 else { 6658 tp->lun0p = lp; 6659 tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); 6660 } 6661 6662 /* 6663 * Let the itl task point to error handling. 6664 */ 6665 lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba); 6666 6667 /* 6668 * Set the reselect pattern to our default. :) 6669 */ 6670 lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); 6671 6672 /* 6673 * Set user capabilities. 6674 */ 6675 lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); 6676 6677 fail: 6678 return lp; 6679 } 6680 6681 /* 6682 * Allocate LCB resources for tagged command queuing. 6683 */ 6684 static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln) 6685 { 6686 tcb_p tp = &np->target[tn]; 6687 lcb_p lp = sym_lp(np, tp, ln); 6688 int i; 6689 6690 /* 6691 * If LCB not available, try to allocate it. 6692 */ 6693 if (!lp && !(lp = sym_alloc_lcb(np, tn, ln))) 6694 goto fail; 6695 6696 /* 6697 * Allocate the task table and and the tag allocation 6698 * circular buffer. We want both or none. 6699 */ 6700 lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); 6701 if (!lp->itlq_tbl) 6702 goto fail; 6703 lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS"); 6704 if (!lp->cb_tags) { 6705 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); 6706 lp->itlq_tbl = 0; 6707 goto fail; 6708 } 6709 6710 /* 6711 * Initialize the task table with invalid entries. 6712 */ 6713 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) 6714 lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba); 6715 6716 /* 6717 * Fill up the tag buffer with tag numbers. 6718 */ 6719 for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++) 6720 lp->cb_tags[i] = i; 6721 6722 /* 6723 * Make the task table available to SCRIPTS, 6724 * And accept tagged commands now. 6725 */ 6726 lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl)); 6727 6728 return; 6729 fail: 6730 } 6731 6732 /* 6733 * Test the pci bus snoop logic :-( 6734 * 6735 * Has to be called with interrupts disabled. 6736 */ 6737 #ifndef SYM_CONF_IOMAPPED 6738 static int sym_regtest (hcb_p np) 6739 { 6740 register volatile u32 data; 6741 /* 6742 * chip registers may NOT be cached. 6743 * write 0xffffffff to a read only register area, 6744 * and try to read it back. 6745 */ 6746 data = 0xffffffff; 6747 OUTL_OFF(offsetof(struct sym_reg, nc_dstat), data); 6748 data = INL_OFF(offsetof(struct sym_reg, nc_dstat)); 6749 #if 1 6750 if (data == 0xffffffff) { 6751 #else 6752 if ((data & 0xe2f0fffd) != 0x02000080) { 6753 #endif 6754 printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", 6755 (unsigned) data); 6756 return (0x10); 6757 }; 6758 return (0); 6759 } 6760 #endif 6761 6762 static int sym_snooptest (hcb_p np) 6763 { 6764 u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc; 6765 int i, err=0; 6766 #ifndef SYM_CONF_IOMAPPED 6767 err |= sym_regtest (np); 6768 if (err) return (err); 6769 #endif 6770 /* 6771 * init 6772 */ 6773 pc = SCRIPTB0_BA (np, snooptest); 6774 host_wr = 1; 6775 sym_wr = 2; 6776 /* 6777 * Set memory and register. 6778 */ 6779 np->cache = cpu_to_scr(host_wr); 6780 OUTL (nc_temp, sym_wr); 6781 /* 6782 * Start script (exchange values) 6783 */ 6784 OUTL (nc_dsa, np->hcb_ba); 6785 OUTL (nc_dsp, pc); 6786 /* 6787 * Wait 'til done (with timeout) 6788 */ 6789 for (i=0; i<SYM_SNOOP_TIMEOUT; i++) 6790 if (INB(nc_istat) & (INTF|SIP|DIP)) 6791 break; 6792 /* 6793 * Save termination position. 6794 */ 6795 pc = INL (nc_dsp); 6796 /* 6797 * Read memory and register. 6798 */ 6799 host_rd = scr_to_cpu(np->cache); 6800 sym_rd = INL (nc_scratcha); 6801 sym_bk = INL (nc_temp); 6802 6803 /* 6804 * check for timeout 6805 */ 6806 if (i>=SYM_SNOOP_TIMEOUT) { 6807 printf ("CACHE TEST FAILED: timeout.\n"); 6808 return (0x20); 6809 }; 6810 /* 6811 * Check termination position. 6812 */ 6813 if (pc != SCRIPTB0_BA (np, snoopend)+8) { 6814 printf ("CACHE TEST FAILED: script execution failed.\n"); 6815 printf ("start=%08lx, pc=%08lx, end=%08lx\n", 6816 (u_long) SCRIPTB0_BA (np, snooptest), (u_long) pc, 6817 (u_long) SCRIPTB0_BA (np, snoopend) +8); 6818 return (0x40); 6819 }; 6820 /* 6821 * Show results. 6822 */ 6823 if (host_wr != sym_rd) { 6824 printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n", 6825 (int) host_wr, (int) sym_rd); 6826 err |= 1; 6827 }; 6828 if (host_rd != sym_wr) { 6829 printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n", 6830 (int) sym_wr, (int) host_rd); 6831 err |= 2; 6832 }; 6833 if (sym_bk != sym_wr) { 6834 printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n", 6835 (int) sym_wr, (int) sym_bk); 6836 err |= 4; 6837 }; 6838 6839 return (err); 6840 } 6841 6842 /* 6843 * Determine the chip's clock frequency. 6844 * 6845 * This is essential for the negotiation of the synchronous 6846 * transfer rate. 6847 * 6848 * Note: we have to return the correct value. 6849 * THERE IS NO SAFE DEFAULT VALUE. 6850 * 6851 * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. 6852 * 53C860 and 53C875 rev. 1 support fast20 transfers but 6853 * do not have a clock doubler and so are provided with a 6854 * 80 MHz clock. All other fast20 boards incorporate a doubler 6855 * and so should be delivered with a 40 MHz clock. 6856 * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base 6857 * clock and provide a clock quadrupler (160 Mhz). 6858 */ 6859 6860 /* 6861 * Select SCSI clock frequency 6862 */ 6863 static void sym_selectclock(hcb_p np, u_char scntl3) 6864 { 6865 /* 6866 * If multiplier not present or not selected, leave here. 6867 */ 6868 if (np->multiplier <= 1) { 6869 OUTB(nc_scntl3, scntl3); 6870 return; 6871 } 6872 6873 if (sym_verbose >= 2) 6874 printf ("%s: enabling clock multiplier\n", sym_name(np)); 6875 6876 OUTB(nc_stest1, DBLEN); /* Enable clock multiplier */ 6877 /* 6878 * Wait for the LCKFRQ bit to be set if supported by the chip. 6879 * Otherwise wait 20 micro-seconds. 6880 */ 6881 if (np->features & FE_LCKFRQ) { 6882 int i = 20; 6883 while (!(INB(nc_stest4) & LCKFRQ) && --i > 0) 6884 UDELAY (20); 6885 if (!i) 6886 printf("%s: the chip cannot lock the frequency\n", 6887 sym_name(np)); 6888 } else 6889 UDELAY (20); 6890 OUTB(nc_stest3, HSC); /* Halt the scsi clock */ 6891 OUTB(nc_scntl3, scntl3); 6892 OUTB(nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ 6893 OUTB(nc_stest3, 0x00); /* Restart scsi clock */ 6894 } 6895 6896 /* 6897 * calculate SCSI clock frequency (in KHz) 6898 */ 6899 static unsigned getfreq (hcb_p np, int gen) 6900 { 6901 unsigned int ms = 0; 6902 unsigned int f; 6903 6904 /* 6905 * Measure GEN timer delay in order 6906 * to calculate SCSI clock frequency 6907 * 6908 * This code will never execute too 6909 * many loop iterations (if DELAY is 6910 * reasonably correct). It could get 6911 * too low a delay (too high a freq.) 6912 * if the CPU is slow executing the 6913 * loop for some reason (an NMI, for 6914 * example). For this reason we will 6915 * if multiple measurements are to be 6916 * performed trust the higher delay 6917 * (lower frequency returned). 6918 */ 6919 OUTW (nc_sien , 0); /* mask all scsi interrupts */ 6920 (void) INW (nc_sist); /* clear pending scsi interrupt */ 6921 OUTB (nc_dien , 0); /* mask all dma interrupts */ 6922 (void) INW (nc_sist); /* another one, just to be sure :) */ 6923 OUTB (nc_scntl3, 4); /* set pre-scaler to divide by 3 */ 6924 OUTB (nc_stime1, 0); /* disable general purpose timer */ 6925 OUTB (nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */ 6926 while (!(INW(nc_sist) & GEN) && ms++ < 100000) 6927 UDELAY (1000); /* count ms */ 6928 OUTB (nc_stime1, 0); /* disable general purpose timer */ 6929 /* 6930 * set prescaler to divide by whatever 0 means 6931 * 0 ought to choose divide by 2, but appears 6932 * to set divide by 3.5 mode in my 53c810 ... 6933 */ 6934 OUTB (nc_scntl3, 0); 6935 6936 /* 6937 * adjust for prescaler, and convert into KHz 6938 */ 6939 f = ms ? ((1 << gen) * 4340) / ms : 0; 6940 6941 if (sym_verbose >= 2) 6942 printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n", 6943 sym_name(np), gen, ms, f); 6944 6945 return f; 6946 } 6947 6948 static unsigned sym_getfreq (hcb_p np) 6949 { 6950 u_int f1, f2; 6951 int gen = 11; 6952 6953 (void) getfreq (np, gen); /* throw away first result */ 6954 f1 = getfreq (np, gen); 6955 f2 = getfreq (np, gen); 6956 if (f1 > f2) f1 = f2; /* trust lower result */ 6957 return f1; 6958 } 6959 6960 /* 6961 * Get/probe chip SCSI clock frequency 6962 */ 6963 static void sym_getclock (hcb_p np, int mult) 6964 { 6965 unsigned char scntl3 = np->sv_scntl3; 6966 unsigned char stest1 = np->sv_stest1; 6967 unsigned f1; 6968 6969 /* 6970 * For the C10 core, assume 40 MHz. 6971 */ 6972 if (np->features & FE_C10) { 6973 np->multiplier = mult; 6974 np->clock_khz = 40000 * mult; 6975 return; 6976 } 6977 6978 np->multiplier = 1; 6979 f1 = 40000; 6980 /* 6981 * True with 875/895/896/895A with clock multiplier selected 6982 */ 6983 if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) { 6984 if (sym_verbose >= 2) 6985 printf ("%s: clock multiplier found\n", sym_name(np)); 6986 np->multiplier = mult; 6987 } 6988 6989 /* 6990 * If multiplier not found or scntl3 not 7,5,3, 6991 * reset chip and get frequency from general purpose timer. 6992 * Otherwise trust scntl3 BIOS setting. 6993 */ 6994 if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { 6995 OUTB (nc_stest1, 0); /* make sure doubler is OFF */ 6996 f1 = sym_getfreq (np); 6997 6998 if (sym_verbose) 6999 printf ("%s: chip clock is %uKHz\n", sym_name(np), f1); 7000 7001 if (f1 < 45000) f1 = 40000; 7002 else if (f1 < 55000) f1 = 50000; 7003 else f1 = 80000; 7004 7005 if (f1 < 80000 && mult > 1) { 7006 if (sym_verbose >= 2) 7007 printf ("%s: clock multiplier assumed\n", 7008 sym_name(np)); 7009 np->multiplier = mult; 7010 } 7011 } else { 7012 if ((scntl3 & 7) == 3) f1 = 40000; 7013 else if ((scntl3 & 7) == 5) f1 = 80000; 7014 else f1 = 160000; 7015 7016 f1 /= np->multiplier; 7017 } 7018 7019 /* 7020 * Compute controller synchronous parameters. 7021 */ 7022 f1 *= np->multiplier; 7023 np->clock_khz = f1; 7024 } 7025 7026 /* 7027 * Get/probe PCI clock frequency 7028 */ 7029 static int sym_getpciclock (hcb_p np) 7030 { 7031 static int f = 0; 7032 7033 /* For the C10, this will not work */ 7034 if (!f && !(np->features & FE_C10)) { 7035 OUTB (nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */ 7036 f = (int) sym_getfreq (np); 7037 OUTB (nc_stest1, 0); 7038 } 7039 return f; 7040 } 7041 7042 /*============= DRIVER ACTION/COMPLETION ====================*/ 7043 7044 /* 7045 * Print something that tells about extended errors. 7046 */ 7047 static void sym_print_xerr(ccb_p cp, int x_status) 7048 { 7049 if (x_status & XE_PARITY_ERR) { 7050 PRINT_ADDR(cp); 7051 printf ("unrecovered SCSI parity error.\n"); 7052 } 7053 if (x_status & XE_EXTRA_DATA) { 7054 PRINT_ADDR(cp); 7055 printf ("extraneous data discarded.\n"); 7056 } 7057 if (x_status & XE_BAD_PHASE) { 7058 PRINT_ADDR(cp); 7059 printf ("illegal scsi phase (4/5).\n"); 7060 } 7061 if (x_status & XE_SODL_UNRUN) { 7062 PRINT_ADDR(cp); 7063 printf ("ODD transfer in DATA OUT phase.\n"); 7064 } 7065 if (x_status & XE_SWIDE_OVRUN) { 7066 PRINT_ADDR(cp); 7067 printf ("ODD transfer in DATA IN phase.\n"); 7068 } 7069 } 7070 7071 /* 7072 * Choose the more appropriate CAM status if 7073 * the IO encountered an extended error. 7074 */ 7075 static int sym_xerr_cam_status(int cam_status, int x_status) 7076 { 7077 if (x_status) { 7078 if (x_status & XE_PARITY_ERR) 7079 cam_status = CAM_UNCOR_PARITY; 7080 else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) 7081 cam_status = CAM_DATA_RUN_ERR; 7082 else if (x_status & XE_BAD_PHASE) 7083 cam_status = CAM_REQ_CMP_ERR; 7084 else 7085 cam_status = CAM_REQ_CMP_ERR; 7086 } 7087 return cam_status; 7088 } 7089 7090 /* 7091 * Complete execution of a SCSI command with extented 7092 * error, SCSI status error, or having been auto-sensed. 7093 * 7094 * The SCRIPTS processor is not running there, so we 7095 * can safely access IO registers and remove JOBs from 7096 * the START queue. 7097 * SCRATCHA is assumed to have been loaded with STARTPOS 7098 * before the SCRIPTS called the C code. 7099 */ 7100 static void sym_complete_error (hcb_p np, ccb_p cp) 7101 { 7102 struct ccb_scsiio *csio; 7103 u_int cam_status; 7104 int i; 7105 7106 /* 7107 * Paranoid check. :) 7108 */ 7109 if (!cp || !cp->cam_ccb) 7110 return; 7111 7112 if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) { 7113 printf ("CCB=%lx STAT=%x/%x/%x DEV=%d/%d\n", (unsigned long)cp, 7114 cp->host_status, cp->ssss_status, cp->host_flags, 7115 cp->target, cp->lun); 7116 MDELAY(100); 7117 } 7118 7119 /* 7120 * Get command, target and lun pointers. 7121 */ 7122 csio = &cp->cam_ccb->csio; 7123 7124 /* 7125 * Check for extended errors. 7126 */ 7127 if (cp->xerr_status) { 7128 if (sym_verbose) 7129 sym_print_xerr(cp, cp->xerr_status); 7130 if (cp->host_status == HS_COMPLETE) 7131 cp->host_status = HS_COMP_ERR; 7132 } 7133 7134 /* 7135 * Calculate the residual. 7136 */ 7137 csio->sense_resid = 0; 7138 csio->resid = sym_compute_residual(np, cp); 7139 7140 if (!SYM_CONF_RESIDUAL_SUPPORT) {/* If user does not want residuals */ 7141 csio->resid = 0; /* throw them away. :) */ 7142 cp->sv_resid = 0; 7143 } 7144 7145 if (cp->host_flags & HF_SENSE) { /* Auto sense */ 7146 csio->scsi_status = cp->sv_scsi_status; /* Restore status */ 7147 csio->sense_resid = csio->resid; /* Swap residuals */ 7148 csio->resid = cp->sv_resid; 7149 cp->sv_resid = 0; 7150 if (sym_verbose && cp->sv_xerr_status) 7151 sym_print_xerr(cp, cp->sv_xerr_status); 7152 if (cp->host_status == HS_COMPLETE && 7153 cp->ssss_status == S_GOOD && 7154 cp->xerr_status == 0) { 7155 cam_status = sym_xerr_cam_status(CAM_SCSI_STATUS_ERROR, 7156 cp->sv_xerr_status); 7157 cam_status |= CAM_AUTOSNS_VALID; 7158 /* 7159 * Bounce back the sense data to user and 7160 * fix the residual. 7161 */ 7162 bzero(&csio->sense_data, csio->sense_len); 7163 bcopy(cp->sns_bbuf, &csio->sense_data, 7164 MIN(csio->sense_len, SYM_SNS_BBUF_LEN)); 7165 csio->sense_resid += csio->sense_len; 7166 csio->sense_resid -= SYM_SNS_BBUF_LEN; 7167 #if 0 7168 /* 7169 * If the device reports a UNIT ATTENTION condition 7170 * due to a RESET condition, we should consider all 7171 * disconnect CCBs for this unit as aborted. 7172 */ 7173 if (1) { 7174 u_char *p; 7175 p = (u_char *) csio->sense_data; 7176 if (p[0]==0x70 && p[2]==0x6 && p[12]==0x29) 7177 sym_clear_tasks(np, CAM_REQ_ABORTED, 7178 cp->target,cp->lun, -1); 7179 } 7180 #endif 7181 } 7182 else 7183 cam_status = CAM_AUTOSENSE_FAIL; 7184 } 7185 else if (cp->host_status == HS_COMPLETE) { /* Bad SCSI status */ 7186 csio->scsi_status = cp->ssss_status; 7187 cam_status = CAM_SCSI_STATUS_ERROR; 7188 } 7189 else if (cp->host_status == HS_SEL_TIMEOUT) /* Selection timeout */ 7190 cam_status = CAM_SEL_TIMEOUT; 7191 else if (cp->host_status == HS_UNEXPECTED) /* Unexpected BUS FREE*/ 7192 cam_status = CAM_UNEXP_BUSFREE; 7193 else { /* Extended error */ 7194 if (sym_verbose) { 7195 PRINT_ADDR(cp); 7196 printf ("COMMAND FAILED (%x %x %x).\n", 7197 cp->host_status, cp->ssss_status, 7198 cp->xerr_status); 7199 } 7200 csio->scsi_status = cp->ssss_status; 7201 /* 7202 * Set the most appropriate value for CAM status. 7203 */ 7204 cam_status = sym_xerr_cam_status(CAM_REQ_CMP_ERR, 7205 cp->xerr_status); 7206 } 7207 7208 /* 7209 * Dequeue all queued CCBs for that device 7210 * not yet started by SCRIPTS. 7211 */ 7212 i = (INL (nc_scratcha) - np->squeue_ba) / 4; 7213 (void) sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1); 7214 7215 /* 7216 * Restart the SCRIPTS processor. 7217 */ 7218 OUTL (nc_dsp, SCRIPTA_BA (np, start)); 7219 7220 #ifdef FreeBSD_Bus_Dma_Abstraction 7221 /* 7222 * Synchronize DMA map if needed. 7223 */ 7224 if (cp->dmamapped) { 7225 bus_dmamap_sync(np->data_dmat, cp->dmamap, 7226 (bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ? 7227 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 7228 } 7229 #endif 7230 /* 7231 * Add this one to the COMP queue. 7232 * Complete all those commands with either error 7233 * or requeue condition. 7234 */ 7235 sym_set_cam_status((union ccb *) csio, cam_status); 7236 sym_remque(&cp->link_ccbq); 7237 sym_insque_head(&cp->link_ccbq, &np->comp_ccbq); 7238 sym_flush_comp_queue(np, 0); 7239 } 7240 7241 /* 7242 * Complete execution of a successful SCSI command. 7243 * 7244 * Only successful commands go to the DONE queue, 7245 * since we need to have the SCRIPTS processor 7246 * stopped on any error condition. 7247 * The SCRIPTS processor is running while we are 7248 * completing successful commands. 7249 */ 7250 static void sym_complete_ok (hcb_p np, ccb_p cp) 7251 { 7252 struct ccb_scsiio *csio; 7253 tcb_p tp; 7254 lcb_p lp; 7255 7256 /* 7257 * Paranoid check. :) 7258 */ 7259 if (!cp || !cp->cam_ccb) 7260 return; 7261 assert (cp->host_status == HS_COMPLETE); 7262 7263 /* 7264 * Get command, target and lun pointers. 7265 */ 7266 csio = &cp->cam_ccb->csio; 7267 tp = &np->target[cp->target]; 7268 lp = sym_lp(np, tp, cp->lun); 7269 7270 /* 7271 * Assume device discovered on first success. 7272 */ 7273 if (!lp) 7274 sym_set_bit(tp->lun_map, cp->lun); 7275 7276 /* 7277 * If all data have been transferred, given than no 7278 * extended error did occur, there is no residual. 7279 */ 7280 csio->resid = 0; 7281 if (cp->phys.head.lastp != cp->phys.head.goalp) 7282 csio->resid = sym_compute_residual(np, cp); 7283 7284 /* 7285 * Wrong transfer residuals may be worse than just always 7286 * returning zero. User can disable this feature from 7287 * sym_conf.h. Residual support is enabled by default. 7288 */ 7289 if (!SYM_CONF_RESIDUAL_SUPPORT) 7290 csio->resid = 0; 7291 7292 #ifdef FreeBSD_Bus_Dma_Abstraction 7293 /* 7294 * Synchronize DMA map if needed. 7295 */ 7296 if (cp->dmamapped) { 7297 bus_dmamap_sync(np->data_dmat, cp->dmamap, 7298 (bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ? 7299 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 7300 } 7301 #endif 7302 /* 7303 * Set status and complete the command. 7304 */ 7305 csio->scsi_status = cp->ssss_status; 7306 sym_set_cam_status((union ccb *) csio, CAM_REQ_CMP); 7307 sym_free_ccb (np, cp); 7308 sym_xpt_done(np, (union ccb *) csio); 7309 } 7310 7311 /* 7312 * Our timeout handler. 7313 */ 7314 static void sym_timeout1(void *arg) 7315 { 7316 union ccb *ccb = (union ccb *) arg; 7317 hcb_p np = ccb->ccb_h.sym_hcb_ptr; 7318 7319 /* 7320 * Check that the CAM CCB is still queued. 7321 */ 7322 if (!np) 7323 return; 7324 7325 switch(ccb->ccb_h.func_code) { 7326 case XPT_SCSI_IO: 7327 (void) sym_abort_scsiio(np, ccb, 1); 7328 break; 7329 default: 7330 break; 7331 } 7332 } 7333 7334 static void sym_timeout(void *arg) 7335 { 7336 int s = splcam(); 7337 sym_timeout1(arg); 7338 splx(s); 7339 } 7340 7341 /* 7342 * Abort an SCSI IO. 7343 */ 7344 static int sym_abort_scsiio(hcb_p np, union ccb *ccb, int timed_out) 7345 { 7346 ccb_p cp; 7347 SYM_QUEHEAD *qp; 7348 7349 /* 7350 * Look up our CCB control block. 7351 */ 7352 cp = 0; 7353 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { 7354 ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); 7355 if (cp2->cam_ccb == ccb) { 7356 cp = cp2; 7357 break; 7358 } 7359 } 7360 if (!cp || cp->host_status == HS_WAIT) 7361 return -1; 7362 7363 /* 7364 * If a previous abort didn't succeed in time, 7365 * perform a BUS reset. 7366 */ 7367 if (cp->to_abort) { 7368 sym_reset_scsi_bus(np, 1); 7369 return 0; 7370 } 7371 7372 /* 7373 * Mark the CCB for abort and allow time for. 7374 */ 7375 cp->to_abort = timed_out ? 2 : 1; 7376 ccb->ccb_h.timeout_ch = timeout(sym_timeout, (caddr_t) ccb, 10*hz); 7377 7378 /* 7379 * Tell the SCRIPTS processor to stop and synchronize with us. 7380 */ 7381 np->istat_sem = SEM; 7382 OUTB (nc_istat, SIGP|SEM); 7383 return 0; 7384 } 7385 7386 /* 7387 * Reset a SCSI device (all LUNs of a target). 7388 */ 7389 static void sym_reset_dev(hcb_p np, union ccb *ccb) 7390 { 7391 tcb_p tp; 7392 struct ccb_hdr *ccb_h = &ccb->ccb_h; 7393 7394 if (ccb_h->target_id == np->myaddr || 7395 ccb_h->target_id >= SYM_CONF_MAX_TARGET || 7396 ccb_h->target_lun >= SYM_CONF_MAX_LUN) { 7397 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); 7398 return; 7399 } 7400 7401 tp = &np->target[ccb_h->target_id]; 7402 7403 tp->to_reset = 1; 7404 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 7405 7406 np->istat_sem = SEM; 7407 OUTB (nc_istat, SIGP|SEM); 7408 return; 7409 } 7410 7411 /* 7412 * SIM action entry point. 7413 */ 7414 static void sym_action(struct cam_sim *sim, union ccb *ccb) 7415 { 7416 int s = splcam(); 7417 sym_action1(sim, ccb); 7418 splx(s); 7419 } 7420 7421 static void sym_action1(struct cam_sim *sim, union ccb *ccb) 7422 { 7423 hcb_p np; 7424 tcb_p tp; 7425 lcb_p lp; 7426 ccb_p cp; 7427 int tmp; 7428 u_char idmsg, *msgptr; 7429 u_int msglen; 7430 struct ccb_scsiio *csio; 7431 struct ccb_hdr *ccb_h; 7432 7433 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sym_action\n")); 7434 7435 /* 7436 * Retrieve our controller data structure. 7437 */ 7438 np = (hcb_p) cam_sim_softc(sim); 7439 7440 /* 7441 * The common case is SCSI IO. 7442 * We deal with other ones elsewhere. 7443 */ 7444 if (ccb->ccb_h.func_code != XPT_SCSI_IO) { 7445 sym_action2(sim, ccb); 7446 return; 7447 } 7448 csio = &ccb->csio; 7449 ccb_h = &csio->ccb_h; 7450 7451 /* 7452 * Work around races. 7453 */ 7454 if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 7455 xpt_done(ccb); 7456 return; 7457 } 7458 7459 /* 7460 * Minimal checkings, so that we will not 7461 * go outside our tables. 7462 */ 7463 if (ccb_h->target_id == np->myaddr || 7464 ccb_h->target_id >= SYM_CONF_MAX_TARGET || 7465 ccb_h->target_lun >= SYM_CONF_MAX_LUN) { 7466 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); 7467 return; 7468 } 7469 7470 /* 7471 * Retreive the target and lun descriptors. 7472 */ 7473 tp = &np->target[ccb_h->target_id]; 7474 lp = sym_lp(np, tp, ccb_h->target_lun); 7475 7476 /* 7477 * Complete the 1st INQUIRY command with error 7478 * condition if the device is flagged NOSCAN 7479 * at BOOT in the NVRAM. This may speed up 7480 * the boot and maintain coherency with BIOS 7481 * device numbering. Clearing the flag allows 7482 * user to rescan skipped devices later. 7483 * We also return error for devices not flagged 7484 * for SCAN LUNS in the NVRAM since some mono-lun 7485 * devices behave badly when asked for some non 7486 * zero LUN. Btw, this is an absolute hack.:-) 7487 */ 7488 if (!(ccb_h->flags & CAM_CDB_PHYS) && 7489 (0x12 == ((ccb_h->flags & CAM_CDB_POINTER) ? 7490 csio->cdb_io.cdb_ptr[0] : csio->cdb_io.cdb_bytes[0]))) { 7491 if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) || 7492 ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && 7493 ccb_h->target_lun != 0)) { 7494 tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; 7495 sym_xpt_done2(np, ccb, CAM_DEV_NOT_THERE); 7496 return; 7497 } 7498 } 7499 7500 /* 7501 * Get a control block for this IO. 7502 */ 7503 tmp = ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0); 7504 cp = sym_get_ccb(np, ccb_h->target_id, ccb_h->target_lun, tmp); 7505 if (!cp) { 7506 sym_xpt_done2(np, ccb, CAM_RESRC_UNAVAIL); 7507 return; 7508 } 7509 7510 /* 7511 * Keep track of the IO in our CCB. 7512 */ 7513 cp->cam_ccb = ccb; 7514 7515 /* 7516 * Build the IDENTIFY message. 7517 */ 7518 idmsg = M_IDENTIFY | cp->lun; 7519 if (cp->tag != NO_TAG || (lp && (lp->current_flags & SYM_DISC_ENABLED))) 7520 idmsg |= 0x40; 7521 7522 msgptr = cp->scsi_smsg; 7523 msglen = 0; 7524 msgptr[msglen++] = idmsg; 7525 7526 /* 7527 * Build the tag message if present. 7528 */ 7529 if (cp->tag != NO_TAG) { 7530 u_char order = csio->tag_action; 7531 7532 switch(order) { 7533 case M_ORDERED_TAG: 7534 break; 7535 case M_HEAD_TAG: 7536 break; 7537 default: 7538 order = M_SIMPLE_TAG; 7539 } 7540 msgptr[msglen++] = order; 7541 7542 /* 7543 * For less than 128 tags, actual tags are numbered 7544 * 1,3,5,..2*MAXTAGS+1,since we may have to deal 7545 * with devices that have problems with #TAG 0 or too 7546 * great #TAG numbers. For more tags (up to 256), 7547 * we use directly our tag number. 7548 */ 7549 #if SYM_CONF_MAX_TASK > (512/4) 7550 msgptr[msglen++] = cp->tag; 7551 #else 7552 msgptr[msglen++] = (cp->tag << 1) + 1; 7553 #endif 7554 } 7555 7556 /* 7557 * Build a negotiation message if needed. 7558 * (nego_status is filled by sym_prepare_nego()) 7559 */ 7560 cp->nego_status = 0; 7561 if (tp->tinfo.current.width != tp->tinfo.goal.width || 7562 tp->tinfo.current.period != tp->tinfo.goal.period || 7563 tp->tinfo.current.offset != tp->tinfo.goal.offset || 7564 #if 0 /* For now only renegotiate, based on width, period and offset */ 7565 tp->tinfo.current.options != tp->tinfo.goal.options) { 7566 #else 7567 0) { 7568 #endif 7569 if (!tp->nego_cp && lp) 7570 msglen += sym_prepare_nego(np, cp, 0, msgptr + msglen); 7571 } 7572 7573 /* 7574 * Fill in our ccb 7575 */ 7576 7577 /* 7578 * Startqueue 7579 */ 7580 cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA (np, select)); 7581 cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA (np, resel_dsa)); 7582 7583 /* 7584 * select 7585 */ 7586 cp->phys.select.sel_id = cp->target; 7587 cp->phys.select.sel_scntl3 = tp->head.wval; 7588 cp->phys.select.sel_sxfer = tp->head.sval; 7589 cp->phys.select.sel_scntl4 = tp->head.uval; 7590 7591 /* 7592 * message 7593 */ 7594 cp->phys.smsg.addr = cpu_to_scr(CCB_BA (cp, scsi_smsg)); 7595 cp->phys.smsg.size = cpu_to_scr(msglen); 7596 7597 /* 7598 * command 7599 */ 7600 if (sym_setup_cdb(np, csio, cp) < 0) { 7601 sym_free_ccb(np, cp); 7602 sym_xpt_done(np, ccb); 7603 return; 7604 } 7605 7606 /* 7607 * status 7608 */ 7609 #if 0 /* Provision */ 7610 cp->actualquirks = tp->quirks; 7611 #endif 7612 cp->actualquirks = SYM_QUIRK_AUTOSAVE; 7613 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; 7614 cp->ssss_status = S_ILLEGAL; 7615 cp->xerr_status = 0; 7616 cp->host_flags = 0; 7617 cp->extra_bytes = 0; 7618 7619 /* 7620 * extreme data pointer. 7621 * shall be positive, so -1 is lower than lowest.:) 7622 */ 7623 cp->ext_sg = -1; 7624 cp->ext_ofs = 0; 7625 7626 /* 7627 * Build the data descriptor block 7628 * and start the IO. 7629 */ 7630 sym_setup_data_and_start(np, csio, cp); 7631 } 7632 7633 /* 7634 * Setup buffers and pointers that address the CDB. 7635 * I bet, physical CDBs will never be used on the planet, 7636 * since they can be bounced without significant overhead. 7637 */ 7638 static int sym_setup_cdb(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) 7639 { 7640 struct ccb_hdr *ccb_h; 7641 u32 cmd_ba; 7642 int cmd_len; 7643 7644 ccb_h = &csio->ccb_h; 7645 7646 /* 7647 * CDB is 16 bytes max. 7648 */ 7649 if (csio->cdb_len > sizeof(cp->cdb_buf)) { 7650 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 7651 return -1; 7652 } 7653 cmd_len = csio->cdb_len; 7654 7655 if (ccb_h->flags & CAM_CDB_POINTER) { 7656 /* CDB is a pointer */ 7657 if (!(ccb_h->flags & CAM_CDB_PHYS)) { 7658 /* CDB pointer is virtual */ 7659 bcopy(csio->cdb_io.cdb_ptr, cp->cdb_buf, cmd_len); 7660 cmd_ba = CCB_BA (cp, cdb_buf[0]); 7661 } else { 7662 /* CDB pointer is physical */ 7663 #if 0 7664 cmd_ba = ((u32)csio->cdb_io.cdb_ptr) & 0xffffffff; 7665 #else 7666 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 7667 return -1; 7668 #endif 7669 } 7670 } else { 7671 /* CDB is in the CAM ccb (buffer) */ 7672 bcopy(csio->cdb_io.cdb_bytes, cp->cdb_buf, cmd_len); 7673 cmd_ba = CCB_BA (cp, cdb_buf[0]); 7674 } 7675 7676 cp->phys.cmd.addr = cpu_to_scr(cmd_ba); 7677 cp->phys.cmd.size = cpu_to_scr(cmd_len); 7678 7679 return 0; 7680 } 7681 7682 /* 7683 * Set up data pointers used by SCRIPTS. 7684 */ 7685 static void __inline__ 7686 sym_setup_data_pointers(hcb_p np, ccb_p cp, int dir) 7687 { 7688 u32 lastp, goalp; 7689 7690 /* 7691 * No segments means no data. 7692 */ 7693 if (!cp->segments) 7694 dir = CAM_DIR_NONE; 7695 7696 /* 7697 * Set the data pointer. 7698 */ 7699 switch(dir) { 7700 case CAM_DIR_OUT: 7701 goalp = SCRIPTA_BA (np, data_out2) + 8; 7702 lastp = goalp - 8 - (cp->segments * (2*4)); 7703 break; 7704 case CAM_DIR_IN: 7705 cp->host_flags |= HF_DATA_IN; 7706 goalp = SCRIPTA_BA (np, data_in2) + 8; 7707 lastp = goalp - 8 - (cp->segments * (2*4)); 7708 break; 7709 case CAM_DIR_NONE: 7710 default: 7711 lastp = goalp = SCRIPTB_BA (np, no_data); 7712 break; 7713 } 7714 7715 cp->phys.head.lastp = cpu_to_scr(lastp); 7716 cp->phys.head.goalp = cpu_to_scr(goalp); 7717 cp->phys.head.savep = cpu_to_scr(lastp); 7718 cp->startp = cp->phys.head.savep; 7719 } 7720 7721 7722 #ifdef FreeBSD_Bus_Dma_Abstraction 7723 /* 7724 * Call back routine for the DMA map service. 7725 * If bounce buffers are used (why ?), we may sleep and then 7726 * be called there in another context. 7727 */ 7728 static void 7729 sym_execute_ccb(void *arg, bus_dma_segment_t *psegs, int nsegs, int error) 7730 { 7731 ccb_p cp; 7732 hcb_p np; 7733 union ccb *ccb; 7734 int s; 7735 7736 s = splcam(); 7737 7738 cp = (ccb_p) arg; 7739 ccb = cp->cam_ccb; 7740 np = (hcb_p) cp->arg; 7741 7742 /* 7743 * Deal with weird races. 7744 */ 7745 if (sym_get_cam_status(ccb) != CAM_REQ_INPROG) 7746 goto out_abort; 7747 7748 /* 7749 * Deal with weird errors. 7750 */ 7751 if (error) { 7752 cp->dmamapped = 0; 7753 sym_set_cam_status(cp->cam_ccb, CAM_REQ_ABORTED); 7754 goto out_abort; 7755 } 7756 7757 /* 7758 * Build the data descriptor for the chip. 7759 */ 7760 if (nsegs) { 7761 int retv; 7762 /* 896 rev 1 requires to be careful about boundaries */ 7763 if (np->device_id == PCI_ID_SYM53C896 && np->revision_id <= 1) 7764 retv = sym_scatter_sg_physical(np, cp, psegs, nsegs); 7765 else 7766 retv = sym_fast_scatter_sg_physical(np,cp, psegs,nsegs); 7767 if (retv < 0) { 7768 sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG); 7769 goto out_abort; 7770 } 7771 } 7772 7773 /* 7774 * Synchronize the DMA map only if we have 7775 * actually mapped the data. 7776 */ 7777 if (cp->dmamapped) { 7778 bus_dmamap_sync(np->data_dmat, cp->dmamap, 7779 (bus_dmasync_op_t)(cp->dmamapped == SYM_DMA_READ ? 7780 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 7781 } 7782 7783 /* 7784 * Set host status to busy state. 7785 * May have been set back to HS_WAIT to avoid a race. 7786 */ 7787 cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY; 7788 7789 /* 7790 * Set data pointers. 7791 */ 7792 sym_setup_data_pointers(np, cp, (ccb->ccb_h.flags & CAM_DIR_MASK)); 7793 7794 /* 7795 * Enqueue this IO in our pending queue. 7796 */ 7797 sym_enqueue_cam_ccb(np, ccb); 7798 7799 #if 0 7800 switch (cp->cdb_buf[0]) { 7801 case 0x0A: case 0x2A: case 0xAA: 7802 panic("XXXXXXXXXXXXX WRITE NOT YET ALLOWED XXXXXXXXXXXXXX\n"); 7803 MDELAY(10000); 7804 break; 7805 default: 7806 break; 7807 } 7808 #endif 7809 /* 7810 * Activate this job. 7811 */ 7812 sym_put_start_queue(np, cp); 7813 out: 7814 splx(s); 7815 return; 7816 out_abort: 7817 sym_free_ccb(np, cp); 7818 sym_xpt_done(np, ccb); 7819 goto out; 7820 } 7821 7822 /* 7823 * How complex it gets to deal with the data in CAM. 7824 * The Bus Dma stuff makes things still more complex. 7825 */ 7826 static void 7827 sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) 7828 { 7829 struct ccb_hdr *ccb_h; 7830 int dir, retv; 7831 7832 ccb_h = &csio->ccb_h; 7833 7834 /* 7835 * Now deal with the data. 7836 */ 7837 cp->data_len = csio->dxfer_len; 7838 cp->arg = np; 7839 7840 /* 7841 * No direction means no data. 7842 */ 7843 dir = (ccb_h->flags & CAM_DIR_MASK); 7844 if (dir == CAM_DIR_NONE) { 7845 sym_execute_ccb(cp, NULL, 0, 0); 7846 return; 7847 } 7848 7849 if (!(ccb_h->flags & CAM_SCATTER_VALID)) { 7850 /* Single buffer */ 7851 if (!(ccb_h->flags & CAM_DATA_PHYS)) { 7852 /* Buffer is virtual */ 7853 int s; 7854 7855 cp->dmamapped = (dir == CAM_DIR_IN) ? 7856 SYM_DMA_READ : SYM_DMA_WRITE; 7857 s = splsoftvm(); 7858 retv = bus_dmamap_load(np->data_dmat, cp->dmamap, 7859 csio->data_ptr, csio->dxfer_len, 7860 sym_execute_ccb, cp, 0); 7861 if (retv == EINPROGRESS) { 7862 cp->host_status = HS_WAIT; 7863 xpt_freeze_simq(np->sim, 1); 7864 csio->ccb_h.status |= CAM_RELEASE_SIMQ; 7865 } 7866 splx(s); 7867 } else { 7868 /* Buffer is physical */ 7869 struct bus_dma_segment seg; 7870 7871 seg.ds_addr = (bus_addr_t) csio->data_ptr; 7872 sym_execute_ccb(cp, &seg, 1, 0); 7873 } 7874 } else { 7875 /* Scatter/gather list */ 7876 struct bus_dma_segment *segs; 7877 7878 if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) { 7879 /* The SG list pointer is physical */ 7880 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 7881 goto out_abort; 7882 } 7883 7884 if (!(ccb_h->flags & CAM_DATA_PHYS)) { 7885 /* SG buffer pointers are virtual */ 7886 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 7887 goto out_abort; 7888 } 7889 7890 /* SG buffer pointers are physical */ 7891 segs = (struct bus_dma_segment *)csio->data_ptr; 7892 sym_execute_ccb(cp, segs, csio->sglist_cnt, 0); 7893 } 7894 return; 7895 out_abort: 7896 sym_free_ccb(np, cp); 7897 sym_xpt_done(np, (union ccb *) csio); 7898 } 7899 7900 /* 7901 * Move the scatter list to our data block. 7902 */ 7903 static int 7904 sym_fast_scatter_sg_physical(hcb_p np, ccb_p cp, 7905 bus_dma_segment_t *psegs, int nsegs) 7906 { 7907 struct sym_tblmove *data; 7908 bus_dma_segment_t *psegs2; 7909 7910 if (nsegs > SYM_CONF_MAX_SG) 7911 return -1; 7912 7913 data = &cp->phys.data[SYM_CONF_MAX_SG-1]; 7914 psegs2 = &psegs[nsegs-1]; 7915 cp->segments = nsegs; 7916 7917 while (1) { 7918 data->addr = cpu_to_scr(psegs2->ds_addr); 7919 data->size = cpu_to_scr(psegs2->ds_len); 7920 if (DEBUG_FLAGS & DEBUG_SCATTER) { 7921 printf ("%s scatter: paddr=%lx len=%ld\n", 7922 sym_name(np), (long) psegs2->ds_addr, 7923 (long) psegs2->ds_len); 7924 } 7925 if (psegs2 != psegs) { 7926 --data; 7927 --psegs2; 7928 continue; 7929 } 7930 break; 7931 } 7932 return 0; 7933 } 7934 7935 #else /* FreeBSD_Bus_Dma_Abstraction */ 7936 7937 /* 7938 * How complex it gets to deal with the data in CAM. 7939 * Variant without the Bus Dma Abstraction option. 7940 */ 7941 static void 7942 sym_setup_data_and_start(hcb_p np, struct ccb_scsiio *csio, ccb_p cp) 7943 { 7944 struct ccb_hdr *ccb_h; 7945 int dir, retv; 7946 7947 ccb_h = &csio->ccb_h; 7948 7949 /* 7950 * Now deal with the data. 7951 */ 7952 cp->data_len = 0; 7953 cp->segments = 0; 7954 7955 /* 7956 * No direction means no data. 7957 */ 7958 dir = (ccb_h->flags & CAM_DIR_MASK); 7959 if (dir == CAM_DIR_NONE) 7960 goto end_scatter; 7961 7962 if (!(ccb_h->flags & CAM_SCATTER_VALID)) { 7963 /* Single buffer */ 7964 if (!(ccb_h->flags & CAM_DATA_PHYS)) { 7965 /* Buffer is virtual */ 7966 retv = sym_scatter_virtual(np, cp, 7967 (vm_offset_t) csio->data_ptr, 7968 (vm_size_t) csio->dxfer_len); 7969 } else { 7970 /* Buffer is physical */ 7971 retv = sym_scatter_physical(np, cp, 7972 (vm_offset_t) csio->data_ptr, 7973 (vm_size_t) csio->dxfer_len); 7974 } 7975 } else { 7976 /* Scatter/gather list */ 7977 int nsegs; 7978 struct bus_dma_segment *segs; 7979 segs = (struct bus_dma_segment *)csio->data_ptr; 7980 nsegs = csio->sglist_cnt; 7981 7982 if ((ccb_h->flags & CAM_SG_LIST_PHYS) != 0) { 7983 /* The SG list pointer is physical */ 7984 sym_set_cam_status(cp->cam_ccb, CAM_REQ_INVALID); 7985 goto out_abort; 7986 } 7987 if (!(ccb_h->flags & CAM_DATA_PHYS)) { 7988 /* SG buffer pointers are virtual */ 7989 retv = sym_scatter_sg_virtual(np, cp, segs, nsegs); 7990 } else { 7991 /* SG buffer pointers are physical */ 7992 retv = sym_scatter_sg_physical(np, cp, segs, nsegs); 7993 } 7994 } 7995 if (retv < 0) { 7996 sym_set_cam_status(cp->cam_ccb, CAM_REQ_TOO_BIG); 7997 goto out_abort; 7998 } 7999 8000 end_scatter: 8001 /* 8002 * Set data pointers. 8003 */ 8004 sym_setup_data_pointers(np, cp, dir); 8005 8006 /* 8007 * Enqueue this IO in our pending queue. 8008 */ 8009 sym_enqueue_cam_ccb(np, (union ccb *) csio); 8010 8011 /* 8012 * Activate this job. 8013 */ 8014 sym_put_start_queue(np, cp); 8015 8016 /* 8017 * Command is successfully queued. 8018 */ 8019 return; 8020 out_abort: 8021 sym_free_ccb(np, cp); 8022 sym_xpt_done(np, (union ccb *) csio); 8023 } 8024 8025 /* 8026 * Scatter a virtual buffer into bus addressable chunks. 8027 */ 8028 static int 8029 sym_scatter_virtual(hcb_p np, ccb_p cp, vm_offset_t vaddr, vm_size_t len) 8030 { 8031 u_long pe, pn; 8032 u_long n, k; 8033 int s; 8034 8035 cp->data_len += len; 8036 8037 pe = vaddr + len; 8038 n = len; 8039 s = SYM_CONF_MAX_SG - 1 - cp->segments; 8040 8041 while (n && s >= 0) { 8042 pn = (pe - 1) & ~PAGE_MASK; 8043 k = pe - pn; 8044 if (k > n) { 8045 k = n; 8046 pn = pe - n; 8047 } 8048 if (DEBUG_FLAGS & DEBUG_SCATTER) { 8049 printf ("%s scatter: va=%lx pa=%lx siz=%ld\n", 8050 sym_name(np), pn, (u_long) vtobus(pn), k); 8051 } 8052 cp->phys.data[s].addr = cpu_to_scr(vtobus(pn)); 8053 cp->phys.data[s].size = cpu_to_scr(k); 8054 pe = pn; 8055 n -= k; 8056 --s; 8057 } 8058 cp->segments = SYM_CONF_MAX_SG - 1 - s; 8059 8060 return n ? -1 : 0; 8061 } 8062 8063 /* 8064 * Scatter a SG list with virtual addresses into bus addressable chunks. 8065 */ 8066 static int 8067 sym_scatter_sg_virtual(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) 8068 { 8069 int i, retv = 0; 8070 8071 for (i = nsegs - 1 ; i >= 0 ; --i) { 8072 retv = sym_scatter_virtual(np, cp, 8073 psegs[i].ds_addr, psegs[i].ds_len); 8074 if (retv < 0) 8075 break; 8076 } 8077 return retv; 8078 } 8079 8080 /* 8081 * Scatter a physical buffer into bus addressable chunks. 8082 */ 8083 static int 8084 sym_scatter_physical(hcb_p np, ccb_p cp, vm_offset_t paddr, vm_size_t len) 8085 { 8086 struct bus_dma_segment seg; 8087 8088 seg.ds_addr = paddr; 8089 seg.ds_len = len; 8090 return sym_scatter_sg_physical(np, cp, &seg, 1); 8091 } 8092 8093 #endif /* FreeBSD_Bus_Dma_Abstraction */ 8094 8095 /* 8096 * Scatter a SG list with physical addresses into bus addressable chunks. 8097 * We need to ensure 16MB boundaries not to be crossed during DMA of 8098 * each segment, due to some chips being flawed. 8099 */ 8100 #define BOUND_MASK ((1UL<<24)-1) 8101 static int 8102 sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs) 8103 { 8104 u_long ps, pe, pn; 8105 u_long k; 8106 int s, t; 8107 8108 #ifndef FreeBSD_Bus_Dma_Abstraction 8109 s = SYM_CONF_MAX_SG - 1 - cp->segments; 8110 #else 8111 s = SYM_CONF_MAX_SG - 1; 8112 #endif 8113 t = nsegs - 1; 8114 ps = psegs[t].ds_addr; 8115 pe = ps + psegs[t].ds_len; 8116 8117 while (s >= 0) { 8118 pn = (pe - 1) & ~BOUND_MASK; 8119 if (pn <= ps) 8120 pn = ps; 8121 k = pe - pn; 8122 if (DEBUG_FLAGS & DEBUG_SCATTER) { 8123 printf ("%s scatter: paddr=%lx len=%ld\n", 8124 sym_name(np), pn, k); 8125 } 8126 cp->phys.data[s].addr = cpu_to_scr(pn); 8127 cp->phys.data[s].size = cpu_to_scr(k); 8128 #ifndef FreeBSD_Bus_Dma_Abstraction 8129 cp->data_len += k; 8130 #endif 8131 --s; 8132 if (pn == ps) { 8133 if (--t < 0) 8134 break; 8135 ps = psegs[t].ds_addr; 8136 pe = ps + psegs[t].ds_len; 8137 } 8138 else 8139 pe = pn; 8140 } 8141 8142 cp->segments = SYM_CONF_MAX_SG - 1 - s; 8143 8144 return t >= 0 ? -1 : 0; 8145 } 8146 #undef BOUND_MASK 8147 8148 /* 8149 * SIM action for non performance critical stuff. 8150 */ 8151 static void sym_action2(struct cam_sim *sim, union ccb *ccb) 8152 { 8153 hcb_p np; 8154 tcb_p tp; 8155 lcb_p lp; 8156 struct ccb_hdr *ccb_h; 8157 8158 /* 8159 * Retrieve our controller data structure. 8160 */ 8161 np = (hcb_p) cam_sim_softc(sim); 8162 8163 ccb_h = &ccb->ccb_h; 8164 8165 switch (ccb_h->func_code) { 8166 case XPT_SET_TRAN_SETTINGS: 8167 { 8168 struct ccb_trans_settings *cts; 8169 8170 cts = &ccb->cts; 8171 tp = &np->target[ccb_h->target_id]; 8172 8173 /* 8174 * Update our transfer settings (basically WIDE/SYNC). 8175 * These features are to be handled in a per target 8176 * basis according to SCSI specifications. 8177 */ 8178 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) 8179 sym_update_trans(np, tp, &tp->tinfo.user, cts); 8180 8181 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 8182 sym_update_trans(np, tp, &tp->tinfo.goal, cts); 8183 8184 /* 8185 * Update our disconnect and tag settings. 8186 * SCSI requires CmdQue feature to be handled in a per 8187 * device (logical unit) basis. 8188 */ 8189 lp = sym_lp(np, tp, ccb_h->target_lun); 8190 if (lp) { 8191 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) 8192 sym_update_dflags(np, &lp->user_flags, cts); 8193 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 8194 sym_update_dflags(np, &lp->current_flags, cts); 8195 } 8196 8197 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8198 break; 8199 } 8200 case XPT_GET_TRAN_SETTINGS: 8201 { 8202 struct ccb_trans_settings *cts; 8203 struct sym_trans *tip; 8204 u_char dflags; 8205 8206 cts = &ccb->cts; 8207 tp = &np->target[ccb_h->target_id]; 8208 lp = sym_lp(np, tp, ccb_h->target_lun); 8209 8210 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 8211 tip = &tp->tinfo.current; 8212 dflags = lp ? lp->current_flags : 0; 8213 } 8214 else { 8215 tip = &tp->tinfo.user; 8216 dflags = lp ? lp->user_flags : tp->usrflags; 8217 } 8218 8219 cts->sync_period = tip->period; 8220 cts->sync_offset = tip->offset; 8221 cts->bus_width = tip->width; 8222 8223 cts->valid = CCB_TRANS_SYNC_RATE_VALID 8224 | CCB_TRANS_SYNC_OFFSET_VALID 8225 | CCB_TRANS_BUS_WIDTH_VALID; 8226 8227 if (lp) { 8228 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 8229 8230 if (dflags & SYM_DISC_ENABLED) 8231 cts->flags |= CCB_TRANS_DISC_ENB; 8232 8233 if (dflags & SYM_TAGS_ENABLED) 8234 cts->flags |= CCB_TRANS_TAG_ENB; 8235 8236 cts->valid |= CCB_TRANS_DISC_VALID; 8237 cts->valid |= CCB_TRANS_TQ_VALID; 8238 } 8239 8240 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8241 break; 8242 } 8243 case XPT_CALC_GEOMETRY: 8244 { 8245 struct ccb_calc_geometry *ccg; 8246 u32 size_mb; 8247 u32 secs_per_cylinder; 8248 int extended; 8249 8250 /* 8251 * Silly DOS geometry. 8252 */ 8253 ccg = &ccb->ccg; 8254 size_mb = ccg->volume_size 8255 / ((1024L * 1024L) / ccg->block_size); 8256 extended = 1; 8257 8258 if (size_mb > 1024 && extended) { 8259 ccg->heads = 255; 8260 ccg->secs_per_track = 63; 8261 } else { 8262 ccg->heads = 64; 8263 ccg->secs_per_track = 32; 8264 } 8265 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 8266 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 8267 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8268 break; 8269 } 8270 case XPT_PATH_INQ: 8271 { 8272 struct ccb_pathinq *cpi = &ccb->cpi; 8273 cpi->version_num = 1; 8274 cpi->hba_inquiry = PI_MDP_ABLE|PI_SDTR_ABLE|PI_TAG_ABLE; 8275 if ((np->features & FE_WIDE) != 0) 8276 cpi->hba_inquiry |= PI_WIDE_16; 8277 cpi->target_sprt = 0; 8278 cpi->hba_misc = 0; 8279 if (np->usrflags & SYM_SCAN_TARGETS_HILO) 8280 cpi->hba_misc |= PIM_SCANHILO; 8281 if (np->usrflags & SYM_AVOID_BUS_RESET) 8282 cpi->hba_misc |= PIM_NOBUSRESET; 8283 cpi->hba_eng_cnt = 0; 8284 cpi->max_target = (np->features & FE_WIDE) ? 15 : 7; 8285 /* Semantic problem:)LUN number max = max number of LUNs - 1 */ 8286 cpi->max_lun = SYM_CONF_MAX_LUN-1; 8287 if (SYM_SETUP_MAX_LUN < SYM_CONF_MAX_LUN) 8288 cpi->max_lun = SYM_SETUP_MAX_LUN-1; 8289 cpi->bus_id = cam_sim_bus(sim); 8290 cpi->initiator_id = np->myaddr; 8291 cpi->base_transfer_speed = 3300; 8292 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 8293 strncpy(cpi->hba_vid, "Symbios", HBA_IDLEN); 8294 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 8295 cpi->unit_number = cam_sim_unit(sim); 8296 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8297 break; 8298 } 8299 case XPT_ABORT: 8300 { 8301 union ccb *abort_ccb = ccb->cab.abort_ccb; 8302 switch(abort_ccb->ccb_h.func_code) { 8303 case XPT_SCSI_IO: 8304 if (sym_abort_scsiio(np, abort_ccb, 0) == 0) { 8305 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8306 break; 8307 } 8308 default: 8309 sym_xpt_done2(np, ccb, CAM_UA_ABORT); 8310 break; 8311 } 8312 break; 8313 } 8314 case XPT_RESET_DEV: 8315 { 8316 sym_reset_dev(np, ccb); 8317 break; 8318 } 8319 case XPT_RESET_BUS: 8320 { 8321 sym_reset_scsi_bus(np, 0); 8322 if (sym_verbose) { 8323 xpt_print_path(np->path); 8324 printf("SCSI BUS reset delivered.\n"); 8325 } 8326 sym_init (np, 1); 8327 sym_xpt_done2(np, ccb, CAM_REQ_CMP); 8328 break; 8329 } 8330 case XPT_ACCEPT_TARGET_IO: 8331 case XPT_CONT_TARGET_IO: 8332 case XPT_EN_LUN: 8333 case XPT_NOTIFY_ACK: 8334 case XPT_IMMED_NOTIFY: 8335 case XPT_TERM_IO: 8336 default: 8337 sym_xpt_done2(np, ccb, CAM_REQ_INVALID); 8338 break; 8339 } 8340 } 8341 8342 /* 8343 * Update transfer settings of a target. 8344 */ 8345 static void sym_update_trans(hcb_p np, tcb_p tp, struct sym_trans *tip, 8346 struct ccb_trans_settings *cts) 8347 { 8348 /* 8349 * Update the infos. 8350 */ 8351 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 8352 tip->width = cts->bus_width; 8353 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0) 8354 tip->offset = cts->sync_offset; 8355 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 8356 tip->period = cts->sync_period; 8357 8358 /* 8359 * Scale against out limits. 8360 */ 8361 if (tip->width > SYM_SETUP_MAX_WIDE) tip->width =SYM_SETUP_MAX_WIDE; 8362 if (tip->width > np->maxwide) tip->width = np->maxwide; 8363 if (tip->offset > SYM_SETUP_MAX_OFFS) tip->offset =SYM_SETUP_MAX_OFFS; 8364 if (tip->offset > np->maxoffs) tip->offset = np->maxoffs; 8365 if (tip->period) { 8366 if (tip->period < SYM_SETUP_MIN_SYNC) 8367 tip->period = SYM_SETUP_MIN_SYNC; 8368 if (np->features & FE_ULTRA3) { 8369 if (tip->period < np->minsync_dt) 8370 tip->period = np->minsync_dt; 8371 } 8372 else { 8373 if (tip->period < np->minsync) 8374 tip->period = np->minsync; 8375 } 8376 if (tip->period > np->maxsync) 8377 tip->period = np->maxsync; 8378 } 8379 } 8380 8381 /* 8382 * Update flags for a device (logical unit). 8383 */ 8384 static void 8385 sym_update_dflags(hcb_p np, u_char *flags, struct ccb_trans_settings *cts) 8386 { 8387 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 8388 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 8389 *flags |= SYM_DISC_ENABLED; 8390 else 8391 *flags &= ~SYM_DISC_ENABLED; 8392 } 8393 8394 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 8395 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 8396 *flags |= SYM_TAGS_ENABLED; 8397 else 8398 *flags &= ~SYM_TAGS_ENABLED; 8399 } 8400 } 8401 8402 8403 /*============= DRIVER INITIALISATION ==================*/ 8404 8405 #ifdef FreeBSD_Bus_Io_Abstraction 8406 8407 static device_method_t sym_pci_methods[] = { 8408 DEVMETHOD(device_probe, sym_pci_probe), 8409 DEVMETHOD(device_attach, sym_pci_attach), 8410 { 0, 0 } 8411 }; 8412 8413 static driver_t sym_pci_driver = { 8414 "sym", 8415 sym_pci_methods, 8416 sizeof(struct sym_hcb) 8417 }; 8418 8419 static devclass_t sym_devclass; 8420 8421 DRIVER_MODULE(sym, pci, sym_pci_driver, sym_devclass, 0, 0); 8422 8423 #else /* Pre-FreeBSD_Bus_Io_Abstraction */ 8424 8425 static u_long sym_unit; 8426 8427 static struct pci_device sym_pci_driver = { 8428 "sym", 8429 sym_pci_probe, 8430 sym_pci_attach, 8431 &sym_unit, 8432 NULL 8433 }; 8434 8435 #if __FreeBSD_version >= 400000 8436 COMPAT_PCI_DRIVER (sym, sym_pci_driver); 8437 #else 8438 DATA_SET (pcidevice_set, sym_pci_driver); 8439 #endif 8440 8441 #endif /* FreeBSD_Bus_Io_Abstraction */ 8442 8443 static struct sym_pci_chip sym_pci_dev_table[] = { 8444 {PCI_ID_SYM53C810, 0x0f, "810", 4, 8, 4, 64, 8445 FE_ERL} 8446 , 8447 #ifdef SYM_DEBUG_GENERIC_SUPPORT 8448 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, 8449 FE_BOF} 8450 , 8451 #else 8452 {PCI_ID_SYM53C810, 0xff, "810a", 4, 8, 4, 1, 8453 FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF} 8454 , 8455 #endif 8456 {PCI_ID_SYM53C815, 0xff, "815", 4, 8, 4, 64, 8457 FE_BOF|FE_ERL} 8458 , 8459 {PCI_ID_SYM53C825, 0x0f, "825", 6, 8, 4, 64, 8460 FE_WIDE|FE_BOF|FE_ERL|FE_DIFF} 8461 , 8462 {PCI_ID_SYM53C825, 0xff, "825a", 6, 8, 4, 2, 8463 FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF} 8464 , 8465 {PCI_ID_SYM53C860, 0xff, "860", 4, 8, 5, 1, 8466 FE_ULTRA|FE_CLK80|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN} 8467 , 8468 {PCI_ID_SYM53C875, 0x01, "875", 6, 16, 5, 2, 8469 FE_WIDE|FE_ULTRA|FE_CLK80|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8470 FE_RAM|FE_DIFF} 8471 , 8472 {PCI_ID_SYM53C875, 0xff, "875", 6, 16, 5, 2, 8473 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8474 FE_RAM|FE_DIFF} 8475 , 8476 {PCI_ID_SYM53C875_2, 0xff, "875", 6, 16, 5, 2, 8477 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8478 FE_RAM|FE_DIFF} 8479 , 8480 {PCI_ID_SYM53C885, 0xff, "885", 6, 16, 5, 2, 8481 FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8482 FE_RAM|FE_DIFF} 8483 , 8484 #ifdef SYM_DEBUG_GENERIC_SUPPORT 8485 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, 8486 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS| 8487 FE_RAM|FE_LCKFRQ} 8488 , 8489 #else 8490 {PCI_ID_SYM53C895, 0xff, "895", 6, 31, 7, 2, 8491 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8492 FE_RAM|FE_LCKFRQ} 8493 , 8494 #endif 8495 {PCI_ID_SYM53C896, 0xff, "896", 6, 31, 7, 4, 8496 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8497 FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} 8498 , 8499 {PCI_ID_SYM53C895A, 0xff, "895a", 6, 31, 7, 4, 8500 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8501 FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ} 8502 , 8503 {PCI_ID_LSI53C1010, 0x00, "1010", 6, 62, 7, 8, 8504 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| 8505 FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_PCI66|FE_CRC| 8506 FE_C10} 8507 , 8508 {PCI_ID_LSI53C1010, 0xff, "1010", 6, 62, 7, 8, 8509 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| 8510 FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC| 8511 FE_C10|FE_U3EN} 8512 , 8513 {PCI_ID_LSI53C1010_2, 0xff, "1010", 6, 62, 7, 8, 8514 FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN| 8515 FE_RAM|FE_RAM8K|FE_64BIT|FE_IO256|FE_NOPM|FE_LEDC|FE_PCI66|FE_CRC| 8516 FE_C10|FE_U3EN} 8517 , 8518 {PCI_ID_LSI53C1510D, 0xff, "1510d", 6, 31, 7, 4, 8519 FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN| 8520 FE_RAM|FE_IO256|FE_LEDC} 8521 }; 8522 8523 #define sym_pci_num_devs \ 8524 (sizeof(sym_pci_dev_table) / sizeof(sym_pci_dev_table[0])) 8525 8526 /* 8527 * Look up the chip table. 8528 * 8529 * Return a pointer to the chip entry if found, 8530 * zero otherwise. 8531 */ 8532 static struct sym_pci_chip * 8533 #ifdef FreeBSD_Bus_Io_Abstraction 8534 sym_find_pci_chip(device_t dev) 8535 #else 8536 sym_find_pci_chip(pcici_t pci_tag) 8537 #endif 8538 { 8539 struct sym_pci_chip *chip; 8540 int i; 8541 u_short device_id; 8542 u_char revision; 8543 8544 #ifdef FreeBSD_Bus_Io_Abstraction 8545 if (pci_get_vendor(dev) != PCI_VENDOR_NCR) 8546 return 0; 8547 8548 device_id = pci_get_device(dev); 8549 revision = pci_get_revid(dev); 8550 #else 8551 if (pci_cfgread(pci_tag, PCIR_VENDOR, 2) != PCI_VENDOR_NCR) 8552 return 0; 8553 8554 device_id = pci_cfgread(pci_tag, PCIR_DEVICE, 2); 8555 revision = pci_cfgread(pci_tag, PCIR_REVID, 1); 8556 #endif 8557 8558 for (i = 0; i < sym_pci_num_devs; i++) { 8559 chip = &sym_pci_dev_table[i]; 8560 if (device_id != chip->device_id) 8561 continue; 8562 if (revision > chip->revision_id) 8563 continue; 8564 return chip; 8565 } 8566 8567 return 0; 8568 } 8569 8570 /* 8571 * Tell upper layer if the chip is supported. 8572 */ 8573 #ifdef FreeBSD_Bus_Io_Abstraction 8574 static int 8575 sym_pci_probe(device_t dev) 8576 { 8577 struct sym_pci_chip *chip; 8578 8579 chip = sym_find_pci_chip(dev); 8580 if (chip && sym_find_firmware(chip)) { 8581 device_set_desc(dev, chip->name); 8582 return (chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)? -2000 : 0; 8583 } 8584 return ENXIO; 8585 } 8586 #else /* Pre-FreeBSD_Bus_Io_Abstraction */ 8587 static const char * 8588 sym_pci_probe(pcici_t pci_tag, pcidi_t type) 8589 { 8590 struct sym_pci_chip *chip; 8591 8592 chip = sym_find_pci_chip(pci_tag); 8593 if (chip && sym_find_firmware(chip)) { 8594 #if NNCR > 0 8595 /* Only claim chips we are allowed to take precedence over the ncr */ 8596 if (!(chip->lp_probe_bit & SYM_SETUP_LP_PROBE_MAP)) 8597 #else 8598 if (1) 8599 #endif 8600 return chip->name; 8601 } 8602 return 0; 8603 } 8604 #endif 8605 8606 /* 8607 * Attach a sym53c8xx device. 8608 */ 8609 #ifdef FreeBSD_Bus_Io_Abstraction 8610 static int 8611 sym_pci_attach(device_t dev) 8612 #else 8613 static void 8614 sym_pci_attach(pcici_t pci_tag, int unit) 8615 { 8616 int err = sym_pci_attach2(pci_tag, unit); 8617 if (err) 8618 printf("sym: failed to attach unit %d - err=%d.\n", unit, err); 8619 } 8620 static int 8621 sym_pci_attach2(pcici_t pci_tag, int unit) 8622 #endif 8623 { 8624 struct sym_pci_chip *chip; 8625 u_short command; 8626 u_char cachelnsz; 8627 struct sym_hcb *np = 0; 8628 struct sym_nvram nvram; 8629 struct sym_fw *fw = 0; 8630 int i; 8631 #ifdef FreeBSD_Bus_Dma_Abstraction 8632 bus_dma_tag_t bus_dmat; 8633 8634 /* 8635 * I expected to be told about a parent 8636 * DMA tag, but didn't find any. 8637 */ 8638 bus_dmat = NULL; 8639 #endif 8640 8641 /* 8642 * Only probed devices should be attached. 8643 * We just enjoy being paranoid. :) 8644 */ 8645 #ifdef FreeBSD_Bus_Io_Abstraction 8646 chip = sym_find_pci_chip(dev); 8647 #else 8648 chip = sym_find_pci_chip(pci_tag); 8649 #endif 8650 if (chip == NULL || (fw = sym_find_firmware(chip)) == NULL) 8651 return (ENXIO); 8652 8653 /* 8654 * Allocate immediately the host control block, 8655 * since we are only expecting to succeed. :) 8656 * We keep track in the HCB of all the resources that 8657 * are to be released on error. 8658 */ 8659 #ifdef FreeBSD_Bus_Dma_Abstraction 8660 np = __sym_calloc_dma(bus_dmat, sizeof(*np), "HCB"); 8661 if (np) 8662 np->bus_dmat = bus_dmat; 8663 else 8664 goto attach_failed; 8665 #else 8666 np = sym_calloc_dma(sizeof(*np), "HCB"); 8667 if (!np) 8668 goto attach_failed; 8669 #endif 8670 8671 /* 8672 * Copy some useful infos to the HCB. 8673 */ 8674 np->hcb_ba = vtobus(np); 8675 np->verbose = bootverbose; 8676 #ifdef FreeBSD_Bus_Io_Abstraction 8677 np->device = dev; 8678 np->unit = device_get_unit(dev); 8679 np->device_id = pci_get_device(dev); 8680 np->revision_id = pci_get_revid(dev); 8681 #else 8682 np->pci_tag = pci_tag; 8683 np->unit = unit; 8684 np->device_id = pci_cfgread(pci_tag, PCIR_DEVICE, 2); 8685 np->revision_id = pci_cfgread(pci_tag, PCIR_REVID, 1); 8686 #endif 8687 np->features = chip->features; 8688 np->clock_divn = chip->nr_divisor; 8689 np->maxoffs = chip->offset_max; 8690 np->maxburst = chip->burst_max; 8691 np->scripta_sz = fw->a_size; 8692 np->scriptb_sz = fw->b_size; 8693 np->fw_setup = fw->setup; 8694 np->fw_patch = fw->patch; 8695 np->fw_name = fw->name; 8696 8697 /* 8698 * Edit its name. 8699 */ 8700 snprintf(np->inst_name, sizeof(np->inst_name), "sym%d", np->unit); 8701 8702 /* 8703 * Allocate a tag for the DMA of user data. 8704 */ 8705 #ifdef FreeBSD_Bus_Dma_Abstraction 8706 if (bus_dma_tag_create(np->bus_dmat, 1, (1<<24), 8707 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 8708 NULL, NULL, 8709 BUS_SPACE_MAXSIZE, SYM_CONF_MAX_SG, 8710 (1<<24), 0, &np->data_dmat)) { 8711 device_printf(dev, "failed to create DMA tag.\n"); 8712 goto attach_failed; 8713 } 8714 #endif 8715 /* 8716 * Read and apply some fix-ups to the PCI COMMAND 8717 * register. We want the chip to be enabled for: 8718 * - BUS mastering 8719 * - PCI parity checking (reporting would also be fine) 8720 * - Write And Invalidate. 8721 */ 8722 #ifdef FreeBSD_Bus_Io_Abstraction 8723 command = pci_read_config(dev, PCIR_COMMAND, 2); 8724 #else 8725 command = pci_cfgread(pci_tag, PCIR_COMMAND, 2); 8726 #endif 8727 command |= PCIM_CMD_BUSMASTEREN; 8728 command |= PCIM_CMD_PERRESPEN; 8729 command |= /* PCIM_CMD_MWIEN */ 0x0010; 8730 #ifdef FreeBSD_Bus_Io_Abstraction 8731 pci_write_config(dev, PCIR_COMMAND, command, 2); 8732 #else 8733 pci_cfgwrite(pci_tag, PCIR_COMMAND, command, 2); 8734 #endif 8735 8736 /* 8737 * Let the device know about the cache line size, 8738 * if it doesn't yet. 8739 */ 8740 #ifdef FreeBSD_Bus_Io_Abstraction 8741 cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 8742 #else 8743 cachelnsz = pci_cfgread(pci_tag, PCIR_CACHELNSZ, 1); 8744 #endif 8745 if (!cachelnsz) { 8746 cachelnsz = 8; 8747 #ifdef FreeBSD_Bus_Io_Abstraction 8748 pci_write_config(dev, PCIR_CACHELNSZ, cachelnsz, 1); 8749 #else 8750 pci_cfgwrite(pci_tag, PCIR_CACHELNSZ, cachelnsz, 1); 8751 #endif 8752 } 8753 8754 /* 8755 * Alloc/get/map/retrieve everything that deals with MMIO. 8756 */ 8757 #ifdef FreeBSD_Bus_Io_Abstraction 8758 if ((command & PCIM_CMD_MEMEN) != 0) { 8759 int regs_id = SYM_PCI_MMIO; 8760 np->mmio_res = bus_alloc_resource(dev, SYS_RES_MEMORY, ®s_id, 8761 0, ~0, 1, RF_ACTIVE); 8762 } 8763 if (!np->mmio_res) { 8764 device_printf(dev, "failed to allocate MMIO resources\n"); 8765 goto attach_failed; 8766 } 8767 np->mmio_bsh = rman_get_bushandle(np->mmio_res); 8768 np->mmio_tag = rman_get_bustag(np->mmio_res); 8769 np->mmio_pa = rman_get_start(np->mmio_res); 8770 np->mmio_va = (vm_offset_t) rman_get_virtual(np->mmio_res); 8771 np->mmio_ba = np->mmio_pa; 8772 #else 8773 if ((command & PCIM_CMD_MEMEN) != 0) { 8774 vm_offset_t vaddr, paddr; 8775 if (!pci_map_mem(pci_tag, SYM_PCI_MMIO, &vaddr, &paddr)) { 8776 printf("%s: failed to map MMIO window\n", sym_name(np)); 8777 goto attach_failed; 8778 } 8779 np->mmio_va = vaddr; 8780 np->mmio_pa = paddr; 8781 np->mmio_ba = paddr; 8782 } 8783 #endif 8784 8785 /* 8786 * Allocate the IRQ. 8787 */ 8788 #ifdef FreeBSD_Bus_Io_Abstraction 8789 i = 0; 8790 np->irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &i, 8791 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); 8792 if (!np->irq_res) { 8793 device_printf(dev, "failed to allocate IRQ resource\n"); 8794 goto attach_failed; 8795 } 8796 #endif 8797 8798 #ifdef SYM_CONF_IOMAPPED 8799 /* 8800 * User want us to use normal IO with PCI. 8801 * Alloc/get/map/retrieve everything that deals with IO. 8802 */ 8803 #ifdef FreeBSD_Bus_Io_Abstraction 8804 if ((command & PCI_COMMAND_IO_ENABLE) != 0) { 8805 int regs_id = SYM_PCI_IO; 8806 np->io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, ®s_id, 8807 0, ~0, 1, RF_ACTIVE); 8808 } 8809 if (!np->io_res) { 8810 device_printf(dev, "failed to allocate IO resources\n"); 8811 goto attach_failed; 8812 } 8813 np->io_bsh = rman_get_bushandle(np->io_res); 8814 np->io_tag = rman_get_bustag(np->io_res); 8815 np->io_port = rman_get_start(np->io_res); 8816 #else 8817 if ((command & PCI_COMMAND_IO_ENABLE) != 0) { 8818 pci_port_t io_port; 8819 if (!pci_map_port (pci_tag, SYM_PCI_IO, &io_port)) { 8820 printf("%s: failed to map IO window\n", sym_name(np)); 8821 goto attach_failed; 8822 } 8823 np->io_port = io_port; 8824 } 8825 #endif 8826 8827 #endif /* SYM_CONF_IOMAPPED */ 8828 8829 /* 8830 * If the chip has RAM. 8831 * Alloc/get/map/retrieve the corresponding resources. 8832 */ 8833 if ((np->features & (FE_RAM|FE_RAM8K)) && 8834 (command & PCIM_CMD_MEMEN) != 0) { 8835 #ifdef FreeBSD_Bus_Io_Abstraction 8836 int regs_id = SYM_PCI_RAM; 8837 if (np->features & FE_64BIT) 8838 regs_id = SYM_PCI_RAM64; 8839 np->ram_res = bus_alloc_resource(dev, SYS_RES_MEMORY, ®s_id, 8840 0, ~0, 1, RF_ACTIVE); 8841 if (!np->ram_res) { 8842 device_printf(dev,"failed to allocate RAM resources\n"); 8843 goto attach_failed; 8844 } 8845 np->ram_id = regs_id; 8846 np->ram_bsh = rman_get_bushandle(np->ram_res); 8847 np->ram_tag = rman_get_bustag(np->ram_res); 8848 np->ram_pa = rman_get_start(np->ram_res); 8849 np->ram_va = (vm_offset_t) rman_get_virtual(np->ram_res); 8850 np->ram_ba = np->ram_pa; 8851 #else 8852 vm_offset_t vaddr, paddr; 8853 int regs_id = SYM_PCI_RAM; 8854 if (np->features & FE_64BIT) 8855 regs_id = SYM_PCI_RAM64; 8856 if (!pci_map_mem(pci_tag, regs_id, &vaddr, &paddr)) { 8857 printf("%s: failed to map RAM window\n", sym_name(np)); 8858 goto attach_failed; 8859 } 8860 np->ram_va = vaddr; 8861 np->ram_pa = paddr; 8862 np->ram_ba = paddr; 8863 #endif 8864 } 8865 8866 /* 8867 * Save setting of some IO registers, so we will 8868 * be able to probe specific implementations. 8869 */ 8870 sym_save_initial_setting (np); 8871 8872 /* 8873 * Reset the chip now, since it has been reported 8874 * that SCSI clock calibration may not work properly 8875 * if the chip is currently active. 8876 */ 8877 sym_chip_reset (np); 8878 8879 /* 8880 * Try to read the user set-up. 8881 */ 8882 (void) sym_read_nvram(np, &nvram); 8883 8884 /* 8885 * Prepare controller and devices settings, according 8886 * to chip features, user set-up and driver set-up. 8887 */ 8888 (void) sym_prepare_setting(np, &nvram); 8889 8890 /* 8891 * Check the PCI clock frequency. 8892 * Must be performed after prepare_setting since it destroys 8893 * STEST1 that is used to probe for the clock doubler. 8894 */ 8895 i = sym_getpciclock(np); 8896 if (i > 37000) 8897 #ifdef FreeBSD_Bus_Io_Abstraction 8898 device_printf(dev, "PCI BUS clock seems too high: %u KHz.\n",i); 8899 #else 8900 printf("%s: PCI BUS clock seems too high: %u KHz.\n", 8901 sym_name(np), i); 8902 #endif 8903 8904 /* 8905 * Allocate the start queue. 8906 */ 8907 np->squeue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE"); 8908 if (!np->squeue) 8909 goto attach_failed; 8910 np->squeue_ba = vtobus(np->squeue); 8911 8912 /* 8913 * Allocate the done queue. 8914 */ 8915 np->dqueue = (u32 *) sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE"); 8916 if (!np->dqueue) 8917 goto attach_failed; 8918 np->dqueue_ba = vtobus(np->dqueue); 8919 8920 /* 8921 * Allocate the target bus address array. 8922 */ 8923 np->targtbl = (u32 *) sym_calloc_dma(256, "TARGTBL"); 8924 if (!np->targtbl) 8925 goto attach_failed; 8926 np->targtbl_ba = cpu_to_scr(vtobus(np->targtbl)); 8927 8928 /* 8929 * Allocate SCRIPTS areas. 8930 */ 8931 np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0"); 8932 np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0"); 8933 if (!np->scripta0 || !np->scriptb0) 8934 goto attach_failed; 8935 8936 /* 8937 * Initialyze the CCB free and busy queues. 8938 * Allocate some CCB. We need at least ONE. 8939 */ 8940 sym_que_init(&np->free_ccbq); 8941 sym_que_init(&np->busy_ccbq); 8942 sym_que_init(&np->comp_ccbq); 8943 if (!sym_alloc_ccb(np)) 8944 goto attach_failed; 8945 8946 /* 8947 * Initialyze the CAM CCB pending queue. 8948 */ 8949 sym_que_init(&np->cam_ccbq); 8950 8951 /* 8952 * Calculate BUS addresses where we are going 8953 * to load the SCRIPTS. 8954 */ 8955 np->scripta_ba = vtobus(np->scripta0); 8956 np->scriptb_ba = vtobus(np->scriptb0); 8957 np->scriptb0_ba = np->scriptb_ba; 8958 8959 if (np->ram_ba) { 8960 np->scripta_ba = np->ram_ba; 8961 if (np->features & FE_RAM8K) { 8962 np->ram_ws = 8192; 8963 np->scriptb_ba = np->scripta_ba + 4096; 8964 #if BITS_PER_LONG > 32 8965 np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32); 8966 #endif 8967 } 8968 else 8969 np->ram_ws = 4096; 8970 } 8971 8972 /* 8973 * Copy scripts to controller instance. 8974 */ 8975 bcopy(fw->a_base, np->scripta0, np->scripta_sz); 8976 bcopy(fw->b_base, np->scriptb0, np->scriptb_sz); 8977 8978 /* 8979 * Setup variable parts in scripts and compute 8980 * scripts bus addresses used from the C code. 8981 */ 8982 np->fw_setup(np, fw); 8983 8984 /* 8985 * Bind SCRIPTS with physical addresses usable by the 8986 * SCRIPTS processor (as seen from the BUS = BUS addresses). 8987 */ 8988 sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz); 8989 sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz); 8990 8991 #ifdef SYM_CONF_IARB_SUPPORT 8992 /* 8993 * If user wants IARB to be set when we win arbitration 8994 * and have other jobs, compute the max number of consecutive 8995 * settings of IARB hints before we leave devices a chance to 8996 * arbitrate for reselection. 8997 */ 8998 #ifdef SYM_SETUP_IARB_MAX 8999 np->iarb_max = SYM_SETUP_IARB_MAX; 9000 #else 9001 np->iarb_max = 4; 9002 #endif 9003 #endif 9004 9005 /* 9006 * Prepare the idle and invalid task actions. 9007 */ 9008 np->idletask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); 9009 np->idletask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); 9010 np->idletask_ba = vtobus(&np->idletask); 9011 9012 np->notask.start = cpu_to_scr(SCRIPTA_BA (np, idle)); 9013 np->notask.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); 9014 np->notask_ba = vtobus(&np->notask); 9015 9016 np->bad_itl.start = cpu_to_scr(SCRIPTA_BA (np, idle)); 9017 np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA (np, bad_i_t_l)); 9018 np->bad_itl_ba = vtobus(&np->bad_itl); 9019 9020 np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA (np, idle)); 9021 np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA (np,bad_i_t_l_q)); 9022 np->bad_itlq_ba = vtobus(&np->bad_itlq); 9023 9024 /* 9025 * Allocate and prepare the lun JUMP table that is used 9026 * for a target prior the probing of devices (bad lun table). 9027 * A private table will be allocated for the target on the 9028 * first INQUIRY response received. 9029 */ 9030 np->badluntbl = sym_calloc_dma(256, "BADLUNTBL"); 9031 if (!np->badluntbl) 9032 goto attach_failed; 9033 9034 np->badlun_sa = cpu_to_scr(SCRIPTB_BA (np, resel_bad_lun)); 9035 for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */ 9036 np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa)); 9037 9038 /* 9039 * Prepare the bus address array that contains the bus 9040 * address of each target control bloc. 9041 * For now, assume all logical unit are wrong. :) 9042 */ 9043 for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { 9044 np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i])); 9045 np->target[i].head.luntbl_sa = 9046 cpu_to_scr(vtobus(np->badluntbl)); 9047 np->target[i].head.lun0_sa = 9048 cpu_to_scr(vtobus(&np->badlun_sa)); 9049 } 9050 9051 /* 9052 * Now check the cache handling of the pci chipset. 9053 */ 9054 if (sym_snooptest (np)) { 9055 #ifdef FreeBSD_Bus_Io_Abstraction 9056 device_printf(dev, "CACHE INCORRECTLY CONFIGURED.\n"); 9057 #else 9058 printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np)); 9059 #endif 9060 goto attach_failed; 9061 }; 9062 9063 /* 9064 * Now deal with CAM. 9065 * Hopefully, we will succeed with that one.:) 9066 */ 9067 if (!sym_cam_attach(np)) 9068 goto attach_failed; 9069 9070 /* 9071 * Sigh! we are done. 9072 */ 9073 return 0; 9074 9075 /* 9076 * We have failed. 9077 * We will try to free all the resources we have 9078 * allocated, but if we are a boot device, this 9079 * will not help that much.;) 9080 */ 9081 attach_failed: 9082 if (np) 9083 sym_pci_free(np); 9084 return ENXIO; 9085 } 9086 9087 /* 9088 * Free everything that have been allocated for this device. 9089 */ 9090 static void sym_pci_free(hcb_p np) 9091 { 9092 SYM_QUEHEAD *qp; 9093 ccb_p cp; 9094 tcb_p tp; 9095 lcb_p lp; 9096 int target, lun; 9097 int s; 9098 9099 /* 9100 * First free CAM resources. 9101 */ 9102 s = splcam(); 9103 sym_cam_free(np); 9104 splx(s); 9105 9106 /* 9107 * Now every should be quiet for us to 9108 * free other resources. 9109 */ 9110 #ifdef FreeBSD_Bus_Io_Abstraction 9111 if (np->ram_res) 9112 bus_release_resource(np->device, SYS_RES_MEMORY, 9113 np->ram_id, np->ram_res); 9114 if (np->mmio_res) 9115 bus_release_resource(np->device, SYS_RES_MEMORY, 9116 SYM_PCI_MMIO, np->mmio_res); 9117 if (np->io_res) 9118 bus_release_resource(np->device, SYS_RES_IOPORT, 9119 SYM_PCI_IO, np->io_res); 9120 if (np->irq_res) 9121 bus_release_resource(np->device, SYS_RES_IRQ, 9122 0, np->irq_res); 9123 #else 9124 /* 9125 * YEAH!!! 9126 * It seems there is no means to free MMIO resources. 9127 */ 9128 #endif 9129 9130 if (np->scriptb0) 9131 sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0"); 9132 if (np->scripta0) 9133 sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0"); 9134 if (np->squeue) 9135 sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE"); 9136 if (np->dqueue) 9137 sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE"); 9138 9139 while ((qp = sym_remque_head(&np->free_ccbq)) != 0) { 9140 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); 9141 #ifdef FreeBSD_Bus_Dma_Abstraction 9142 bus_dmamap_destroy(np->data_dmat, cp->dmamap); 9143 #endif 9144 sym_mfree_dma(cp->sns_bbuf, SYM_SNS_BBUF_LEN, "SNS_BBUF"); 9145 sym_mfree_dma(cp, sizeof(*cp), "CCB"); 9146 } 9147 9148 if (np->badluntbl) 9149 sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL"); 9150 9151 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { 9152 tp = &np->target[target]; 9153 for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) { 9154 lp = sym_lp(np, tp, lun); 9155 if (!lp) 9156 continue; 9157 if (lp->itlq_tbl) 9158 sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, 9159 "ITLQ_TBL"); 9160 if (lp->cb_tags) 9161 sym_mfree(lp->cb_tags, SYM_CONF_MAX_TASK, 9162 "CB_TAGS"); 9163 sym_mfree_dma(lp, sizeof(*lp), "LCB"); 9164 } 9165 #if SYM_CONF_MAX_LUN > 1 9166 if (tp->lunmp) 9167 sym_mfree(tp->lunmp, SYM_CONF_MAX_LUN*sizeof(lcb_p), 9168 "LUNMP"); 9169 #endif 9170 } 9171 if (np->targtbl) 9172 sym_mfree_dma(np->targtbl, 256, "TARGTBL"); 9173 #ifdef FreeBSD_Bus_Dma_Abstraction 9174 if (np->data_dmat) 9175 bus_dma_tag_destroy(np->data_dmat); 9176 #endif 9177 sym_mfree_dma(np, sizeof(*np), "HCB"); 9178 } 9179 9180 /* 9181 * Allocate CAM resources and register a bus to CAM. 9182 */ 9183 int sym_cam_attach(hcb_p np) 9184 { 9185 struct cam_devq *devq = 0; 9186 struct cam_sim *sim = 0; 9187 struct cam_path *path = 0; 9188 int err, s; 9189 9190 s = splcam(); 9191 9192 /* 9193 * Establish our interrupt handler. 9194 */ 9195 #ifdef FreeBSD_Bus_Io_Abstraction 9196 err = bus_setup_intr(np->device, np->irq_res, INTR_TYPE_CAM, 9197 sym_intr, np, &np->intr); 9198 if (err) { 9199 device_printf(np->device, "bus_setup_intr() failed: %d\n", 9200 err); 9201 goto fail; 9202 } 9203 #else 9204 err = 0; 9205 if (!pci_map_int (np->pci_tag, sym_intr, np, &cam_imask)) { 9206 printf("%s: failed to map interrupt\n", sym_name(np)); 9207 goto fail; 9208 } 9209 #endif 9210 9211 /* 9212 * Create the device queue for our sym SIM. 9213 */ 9214 devq = cam_simq_alloc(SYM_CONF_MAX_START); 9215 if (!devq) 9216 goto fail; 9217 9218 /* 9219 * Construct our SIM entry. 9220 */ 9221 sim = cam_sim_alloc(sym_action, sym_poll, "sym", np, np->unit, 9222 1, SYM_SETUP_MAX_TAG, devq); 9223 if (!sim) 9224 goto fail; 9225 devq = 0; 9226 9227 if (xpt_bus_register(sim, 0) != CAM_SUCCESS) 9228 goto fail; 9229 np->sim = sim; 9230 sim = 0; 9231 9232 if (xpt_create_path(&path, 0, 9233 cam_sim_path(np->sim), CAM_TARGET_WILDCARD, 9234 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 9235 goto fail; 9236 } 9237 np->path = path; 9238 9239 /* 9240 * Hmmm... This should be useful, but I donnot want to 9241 * know about. 9242 */ 9243 #if __FreeBSD_version < 400000 9244 #ifdef __alpha__ 9245 #ifdef FreeBSD_Bus_Io_Abstraction 9246 alpha_register_pci_scsi(pci_get_bus(np->device), 9247 pci_get_slot(np->device), np->sim); 9248 #else 9249 alpha_register_pci_scsi(pci_tag->bus, pci_tag->slot, np->sim); 9250 #endif 9251 #endif 9252 #endif 9253 9254 #if 0 9255 /* 9256 * Establish our async notification handler. 9257 */ 9258 { 9259 struct ccb_setasync csa; 9260 xpt_setup_ccb(&csa.ccb_h, np->path, 5); 9261 csa.ccb_h.func_code = XPT_SASYNC_CB; 9262 csa.event_enable = AC_LOST_DEVICE; 9263 csa.callback = sym_async; 9264 csa.callback_arg = np->sim; 9265 xpt_action((union ccb *)&csa); 9266 } 9267 #endif 9268 /* 9269 * Start the chip now, without resetting the BUS, since 9270 * it seems that this must stay under control of CAM. 9271 * With LVD/SE capable chips and BUS in SE mode, we may 9272 * get a spurious SMBC interrupt. 9273 */ 9274 sym_init (np, 0); 9275 9276 splx(s); 9277 return 1; 9278 fail: 9279 if (sim) 9280 cam_sim_free(sim, FALSE); 9281 if (devq) 9282 cam_simq_free(devq); 9283 9284 sym_cam_free(np); 9285 9286 splx(s); 9287 return 0; 9288 } 9289 9290 /* 9291 * Free everything that deals with CAM. 9292 */ 9293 void sym_cam_free(hcb_p np) 9294 { 9295 #ifdef FreeBSD_Bus_Io_Abstraction 9296 if (np->intr) 9297 bus_teardown_intr(np->device, np->irq_res, np->intr); 9298 #else 9299 /* pci_unmap_int(np->pci_tag); */ /* Does nothing */ 9300 #endif 9301 9302 if (np->sim) { 9303 xpt_bus_deregister(cam_sim_path(np->sim)); 9304 cam_sim_free(np->sim, /*free_devq*/ TRUE); 9305 } 9306 if (np->path) 9307 xpt_free_path(np->path); 9308 } 9309 9310 /*============ OPTIONNAL NVRAM SUPPORT =================*/ 9311 9312 /* 9313 * Get host setup from NVRAM. 9314 */ 9315 static void sym_nvram_setup_host (hcb_p np, struct sym_nvram *nvram) 9316 { 9317 #ifdef SYM_CONF_NVRAM_SUPPORT 9318 /* 9319 * Get parity checking, host ID, verbose mode 9320 * and miscellaneous host flags from NVRAM. 9321 */ 9322 switch(nvram->type) { 9323 case SYM_SYMBIOS_NVRAM: 9324 if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE)) 9325 np->rv_scntl0 &= ~0x0a; 9326 np->myaddr = nvram->data.Symbios.host_id & 0x0f; 9327 if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS) 9328 np->verbose += 1; 9329 if (nvram->data.Symbios.flags1 & SYMBIOS_SCAN_HI_LO) 9330 np->usrflags |= SYM_SCAN_TARGETS_HILO; 9331 if (nvram->data.Symbios.flags2 & SYMBIOS_AVOID_BUS_RESET) 9332 np->usrflags |= SYM_AVOID_BUS_RESET; 9333 break; 9334 case SYM_TEKRAM_NVRAM: 9335 np->myaddr = nvram->data.Tekram.host_id & 0x0f; 9336 break; 9337 default: 9338 break; 9339 } 9340 #endif 9341 } 9342 9343 /* 9344 * Get target setup from NVRAM. 9345 */ 9346 #ifdef SYM_CONF_NVRAM_SUPPORT 9347 static void sym_Symbios_setup_target(hcb_p np,int target, Symbios_nvram *nvram); 9348 static void sym_Tekram_setup_target(hcb_p np,int target, Tekram_nvram *nvram); 9349 #endif 9350 9351 static void 9352 sym_nvram_setup_target (hcb_p np, int target, struct sym_nvram *nvp) 9353 { 9354 #ifdef SYM_CONF_NVRAM_SUPPORT 9355 switch(nvp->type) { 9356 case SYM_SYMBIOS_NVRAM: 9357 sym_Symbios_setup_target (np, target, &nvp->data.Symbios); 9358 break; 9359 case SYM_TEKRAM_NVRAM: 9360 sym_Tekram_setup_target (np, target, &nvp->data.Tekram); 9361 break; 9362 default: 9363 break; 9364 } 9365 #endif 9366 } 9367 9368 #ifdef SYM_CONF_NVRAM_SUPPORT 9369 /* 9370 * Get target set-up from Symbios format NVRAM. 9371 */ 9372 static void 9373 sym_Symbios_setup_target(hcb_p np, int target, Symbios_nvram *nvram) 9374 { 9375 tcb_p tp = &np->target[target]; 9376 Symbios_target *tn = &nvram->target[target]; 9377 9378 tp->tinfo.user.period = tn->sync_period ? (tn->sync_period + 3) / 4 : 0; 9379 tp->tinfo.user.width = tn->bus_width == 0x10 ? BUS_16_BIT : BUS_8_BIT; 9380 tp->usrtags = 9381 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? SYM_SETUP_MAX_TAG : 0; 9382 9383 if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE)) 9384 tp->usrflags &= ~SYM_DISC_ENABLED; 9385 if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME)) 9386 tp->usrflags |= SYM_SCAN_BOOT_DISABLED; 9387 if (!(tn->flags & SYMBIOS_SCAN_LUNS)) 9388 tp->usrflags |= SYM_SCAN_LUNS_DISABLED; 9389 } 9390 9391 /* 9392 * Get target set-up from Tekram format NVRAM. 9393 */ 9394 static void 9395 sym_Tekram_setup_target(hcb_p np, int target, Tekram_nvram *nvram) 9396 { 9397 tcb_p tp = &np->target[target]; 9398 struct Tekram_target *tn = &nvram->target[target]; 9399 int i; 9400 9401 if (tn->flags & TEKRAM_SYNC_NEGO) { 9402 i = tn->sync_index & 0xf; 9403 tp->tinfo.user.period = Tekram_sync[i]; 9404 } 9405 9406 tp->tinfo.user.width = 9407 (tn->flags & TEKRAM_WIDE_NEGO) ? BUS_16_BIT : BUS_8_BIT; 9408 9409 if (tn->flags & TEKRAM_TAGGED_COMMANDS) { 9410 tp->usrtags = 2 << nvram->max_tags_index; 9411 } 9412 9413 if (tn->flags & TEKRAM_DISCONNECT_ENABLE) 9414 tp->usrflags |= SYM_DISC_ENABLED; 9415 9416 /* If any device does not support parity, we will not use this option */ 9417 if (!(tn->flags & TEKRAM_PARITY_CHECK)) 9418 np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */ 9419 } 9420 9421 #ifdef SYM_CONF_DEBUG_NVRAM 9422 /* 9423 * Dump Symbios format NVRAM for debugging purpose. 9424 */ 9425 void sym_display_Symbios_nvram(hcb_p np, Symbios_nvram *nvram) 9426 { 9427 int i; 9428 9429 /* display Symbios nvram host data */ 9430 printf("%s: HOST ID=%d%s%s%s%s%s%s\n", 9431 sym_name(np), nvram->host_id & 0x0f, 9432 (nvram->flags & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", 9433 (nvram->flags & SYMBIOS_PARITY_ENABLE) ? " PARITY" :"", 9434 (nvram->flags & SYMBIOS_VERBOSE_MSGS) ? " VERBOSE" :"", 9435 (nvram->flags & SYMBIOS_CHS_MAPPING) ? " CHS_ALT" :"", 9436 (nvram->flags2 & SYMBIOS_AVOID_BUS_RESET)?" NO_RESET" :"", 9437 (nvram->flags1 & SYMBIOS_SCAN_HI_LO) ? " HI_LO" :""); 9438 9439 /* display Symbios nvram drive data */ 9440 for (i = 0 ; i < 15 ; i++) { 9441 struct Symbios_target *tn = &nvram->target[i]; 9442 printf("%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n", 9443 sym_name(np), i, 9444 (tn->flags & SYMBIOS_DISCONNECT_ENABLE) ? " DISC" : "", 9445 (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME) ? " SCAN_BOOT" : "", 9446 (tn->flags & SYMBIOS_SCAN_LUNS) ? " SCAN_LUNS" : "", 9447 (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED)? " TCQ" : "", 9448 tn->bus_width, 9449 tn->sync_period / 4, 9450 tn->timeout); 9451 } 9452 } 9453 9454 /* 9455 * Dump TEKRAM format NVRAM for debugging purpose. 9456 */ 9457 static u_char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120}; 9458 void sym_display_Tekram_nvram(hcb_p np, Tekram_nvram *nvram) 9459 { 9460 int i, tags, boot_delay; 9461 char *rem; 9462 9463 /* display Tekram nvram host data */ 9464 tags = 2 << nvram->max_tags_index; 9465 boot_delay = 0; 9466 if (nvram->boot_delay_index < 6) 9467 boot_delay = Tekram_boot_delay[nvram->boot_delay_index]; 9468 switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS) >> 6) { 9469 default: 9470 case 0: rem = ""; break; 9471 case 1: rem = " REMOVABLE=boot device"; break; 9472 case 2: rem = " REMOVABLE=all"; break; 9473 } 9474 9475 printf("%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n", 9476 sym_name(np), nvram->host_id & 0x0f, 9477 (nvram->flags1 & SYMBIOS_SCAM_ENABLE) ? " SCAM" :"", 9478 (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES) ? " >2DRIVES" :"", 9479 (nvram->flags & TEKRAM_DRIVES_SUP_1GB) ? " >1GB" :"", 9480 (nvram->flags & TEKRAM_RESET_ON_POWER_ON) ? " RESET" :"", 9481 (nvram->flags & TEKRAM_ACTIVE_NEGATION) ? " ACT_NEG" :"", 9482 (nvram->flags & TEKRAM_IMMEDIATE_SEEK) ? " IMM_SEEK" :"", 9483 (nvram->flags & TEKRAM_SCAN_LUNS) ? " SCAN_LUNS" :"", 9484 (nvram->flags1 & TEKRAM_F2_F6_ENABLED) ? " F2_F6" :"", 9485 rem, boot_delay, tags); 9486 9487 /* display Tekram nvram drive data */ 9488 for (i = 0; i <= 15; i++) { 9489 int sync, j; 9490 struct Tekram_target *tn = &nvram->target[i]; 9491 j = tn->sync_index & 0xf; 9492 sync = Tekram_sync[j]; 9493 printf("%s-%d:%s%s%s%s%s%s PERIOD=%d\n", 9494 sym_name(np), i, 9495 (tn->flags & TEKRAM_PARITY_CHECK) ? " PARITY" : "", 9496 (tn->flags & TEKRAM_SYNC_NEGO) ? " SYNC" : "", 9497 (tn->flags & TEKRAM_DISCONNECT_ENABLE) ? " DISC" : "", 9498 (tn->flags & TEKRAM_START_CMD) ? " START" : "", 9499 (tn->flags & TEKRAM_TAGGED_COMMANDS) ? " TCQ" : "", 9500 (tn->flags & TEKRAM_WIDE_NEGO) ? " WIDE" : "", 9501 sync); 9502 } 9503 } 9504 #endif /* SYM_CONF_DEBUG_NVRAM */ 9505 #endif /* SYM_CONF_NVRAM_SUPPORT */ 9506 9507 9508 /* 9509 * Try reading Symbios or Tekram NVRAM 9510 */ 9511 #ifdef SYM_CONF_NVRAM_SUPPORT 9512 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram); 9513 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram); 9514 #endif 9515 9516 int sym_read_nvram(hcb_p np, struct sym_nvram *nvp) 9517 { 9518 #ifdef SYM_CONF_NVRAM_SUPPORT 9519 /* 9520 * Try to read SYMBIOS nvram. 9521 * Try to read TEKRAM nvram if Symbios nvram not found. 9522 */ 9523 if (SYM_SETUP_SYMBIOS_NVRAM && 9524 !sym_read_Symbios_nvram (np, &nvp->data.Symbios)) 9525 nvp->type = SYM_SYMBIOS_NVRAM; 9526 else if (SYM_SETUP_TEKRAM_NVRAM && 9527 !sym_read_Tekram_nvram (np, &nvp->data.Tekram)) 9528 nvp->type = SYM_TEKRAM_NVRAM; 9529 else 9530 nvp->type = 0; 9531 #else 9532 nvp->type = 0; 9533 #endif 9534 return nvp->type; 9535 } 9536 9537 9538 #ifdef SYM_CONF_NVRAM_SUPPORT 9539 /* 9540 * 24C16 EEPROM reading. 9541 * 9542 * GPOI0 - data in/data out 9543 * GPIO1 - clock 9544 * Symbios NVRAM wiring now also used by Tekram. 9545 */ 9546 9547 #define SET_BIT 0 9548 #define CLR_BIT 1 9549 #define SET_CLK 2 9550 #define CLR_CLK 3 9551 9552 /* 9553 * Set/clear data/clock bit in GPIO0 9554 */ 9555 static void S24C16_set_bit(hcb_p np, u_char write_bit, u_char *gpreg, 9556 int bit_mode) 9557 { 9558 UDELAY (5); 9559 switch (bit_mode){ 9560 case SET_BIT: 9561 *gpreg |= write_bit; 9562 break; 9563 case CLR_BIT: 9564 *gpreg &= 0xfe; 9565 break; 9566 case SET_CLK: 9567 *gpreg |= 0x02; 9568 break; 9569 case CLR_CLK: 9570 *gpreg &= 0xfd; 9571 break; 9572 9573 } 9574 OUTB (nc_gpreg, *gpreg); 9575 UDELAY (5); 9576 } 9577 9578 /* 9579 * Send START condition to NVRAM to wake it up. 9580 */ 9581 static void S24C16_start(hcb_p np, u_char *gpreg) 9582 { 9583 S24C16_set_bit(np, 1, gpreg, SET_BIT); 9584 S24C16_set_bit(np, 0, gpreg, SET_CLK); 9585 S24C16_set_bit(np, 0, gpreg, CLR_BIT); 9586 S24C16_set_bit(np, 0, gpreg, CLR_CLK); 9587 } 9588 9589 /* 9590 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!! 9591 */ 9592 static void S24C16_stop(hcb_p np, u_char *gpreg) 9593 { 9594 S24C16_set_bit(np, 0, gpreg, SET_CLK); 9595 S24C16_set_bit(np, 1, gpreg, SET_BIT); 9596 } 9597 9598 /* 9599 * Read or write a bit to the NVRAM, 9600 * read if GPIO0 input else write if GPIO0 output 9601 */ 9602 static void S24C16_do_bit(hcb_p np, u_char *read_bit, u_char write_bit, 9603 u_char *gpreg) 9604 { 9605 S24C16_set_bit(np, write_bit, gpreg, SET_BIT); 9606 S24C16_set_bit(np, 0, gpreg, SET_CLK); 9607 if (read_bit) 9608 *read_bit = INB (nc_gpreg); 9609 S24C16_set_bit(np, 0, gpreg, CLR_CLK); 9610 S24C16_set_bit(np, 0, gpreg, CLR_BIT); 9611 } 9612 9613 /* 9614 * Output an ACK to the NVRAM after reading, 9615 * change GPIO0 to output and when done back to an input 9616 */ 9617 static void S24C16_write_ack(hcb_p np, u_char write_bit, u_char *gpreg, 9618 u_char *gpcntl) 9619 { 9620 OUTB (nc_gpcntl, *gpcntl & 0xfe); 9621 S24C16_do_bit(np, 0, write_bit, gpreg); 9622 OUTB (nc_gpcntl, *gpcntl); 9623 } 9624 9625 /* 9626 * Input an ACK from NVRAM after writing, 9627 * change GPIO0 to input and when done back to an output 9628 */ 9629 static void S24C16_read_ack(hcb_p np, u_char *read_bit, u_char *gpreg, 9630 u_char *gpcntl) 9631 { 9632 OUTB (nc_gpcntl, *gpcntl | 0x01); 9633 S24C16_do_bit(np, read_bit, 1, gpreg); 9634 OUTB (nc_gpcntl, *gpcntl); 9635 } 9636 9637 /* 9638 * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK, 9639 * GPIO0 must already be set as an output 9640 */ 9641 static void S24C16_write_byte(hcb_p np, u_char *ack_data, u_char write_data, 9642 u_char *gpreg, u_char *gpcntl) 9643 { 9644 int x; 9645 9646 for (x = 0; x < 8; x++) 9647 S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg); 9648 9649 S24C16_read_ack(np, ack_data, gpreg, gpcntl); 9650 } 9651 9652 /* 9653 * READ a byte from the NVRAM and then send an ACK to say we have got it, 9654 * GPIO0 must already be set as an input 9655 */ 9656 static void S24C16_read_byte(hcb_p np, u_char *read_data, u_char ack_data, 9657 u_char *gpreg, u_char *gpcntl) 9658 { 9659 int x; 9660 u_char read_bit; 9661 9662 *read_data = 0; 9663 for (x = 0; x < 8; x++) { 9664 S24C16_do_bit(np, &read_bit, 1, gpreg); 9665 *read_data |= ((read_bit & 0x01) << (7 - x)); 9666 } 9667 9668 S24C16_write_ack(np, ack_data, gpreg, gpcntl); 9669 } 9670 9671 /* 9672 * Read 'len' bytes starting at 'offset'. 9673 */ 9674 static int sym_read_S24C16_nvram (hcb_p np, int offset, u_char *data, int len) 9675 { 9676 u_char gpcntl, gpreg; 9677 u_char old_gpcntl, old_gpreg; 9678 u_char ack_data; 9679 int retv = 1; 9680 int x; 9681 9682 /* save current state of GPCNTL and GPREG */ 9683 old_gpreg = INB (nc_gpreg); 9684 old_gpcntl = INB (nc_gpcntl); 9685 gpcntl = old_gpcntl & 0xfc; 9686 9687 /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ 9688 OUTB (nc_gpreg, old_gpreg); 9689 OUTB (nc_gpcntl, gpcntl); 9690 9691 /* this is to set NVRAM into a known state with GPIO0/1 both low */ 9692 gpreg = old_gpreg; 9693 S24C16_set_bit(np, 0, &gpreg, CLR_CLK); 9694 S24C16_set_bit(np, 0, &gpreg, CLR_BIT); 9695 9696 /* now set NVRAM inactive with GPIO0/1 both high */ 9697 S24C16_stop(np, &gpreg); 9698 9699 /* activate NVRAM */ 9700 S24C16_start(np, &gpreg); 9701 9702 /* write device code and random address MSB */ 9703 S24C16_write_byte(np, &ack_data, 9704 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); 9705 if (ack_data & 0x01) 9706 goto out; 9707 9708 /* write random address LSB */ 9709 S24C16_write_byte(np, &ack_data, 9710 offset & 0xff, &gpreg, &gpcntl); 9711 if (ack_data & 0x01) 9712 goto out; 9713 9714 /* regenerate START state to set up for reading */ 9715 S24C16_start(np, &gpreg); 9716 9717 /* rewrite device code and address MSB with read bit set (lsb = 0x01) */ 9718 S24C16_write_byte(np, &ack_data, 9719 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); 9720 if (ack_data & 0x01) 9721 goto out; 9722 9723 /* now set up GPIO0 for inputting data */ 9724 gpcntl |= 0x01; 9725 OUTB (nc_gpcntl, gpcntl); 9726 9727 /* input all requested data - only part of total NVRAM */ 9728 for (x = 0; x < len; x++) 9729 S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl); 9730 9731 /* finally put NVRAM back in inactive mode */ 9732 gpcntl &= 0xfe; 9733 OUTB (nc_gpcntl, gpcntl); 9734 S24C16_stop(np, &gpreg); 9735 retv = 0; 9736 out: 9737 /* return GPIO0/1 to original states after having accessed NVRAM */ 9738 OUTB (nc_gpcntl, old_gpcntl); 9739 OUTB (nc_gpreg, old_gpreg); 9740 9741 return retv; 9742 } 9743 9744 #undef SET_BIT 0 9745 #undef CLR_BIT 1 9746 #undef SET_CLK 2 9747 #undef CLR_CLK 3 9748 9749 /* 9750 * Try reading Symbios NVRAM. 9751 * Return 0 if OK. 9752 */ 9753 static int sym_read_Symbios_nvram (hcb_p np, Symbios_nvram *nvram) 9754 { 9755 static u_char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0}; 9756 u_char *data = (u_char *) nvram; 9757 int len = sizeof(*nvram); 9758 u_short csum; 9759 int x; 9760 9761 /* probe the 24c16 and read the SYMBIOS 24c16 area */ 9762 if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS, data, len)) 9763 return 1; 9764 9765 /* check valid NVRAM signature, verify byte count and checksum */ 9766 if (nvram->type != 0 || 9767 bcmp(nvram->trailer, Symbios_trailer, 6) || 9768 nvram->byte_count != len - 12) 9769 return 1; 9770 9771 /* verify checksum */ 9772 for (x = 6, csum = 0; x < len - 6; x++) 9773 csum += data[x]; 9774 if (csum != nvram->checksum) 9775 return 1; 9776 9777 return 0; 9778 } 9779 9780 /* 9781 * 93C46 EEPROM reading. 9782 * 9783 * GPOI0 - data in 9784 * GPIO1 - data out 9785 * GPIO2 - clock 9786 * GPIO4 - chip select 9787 * 9788 * Used by Tekram. 9789 */ 9790 9791 /* 9792 * Pulse clock bit in GPIO0 9793 */ 9794 static void T93C46_Clk(hcb_p np, u_char *gpreg) 9795 { 9796 OUTB (nc_gpreg, *gpreg | 0x04); 9797 UDELAY (2); 9798 OUTB (nc_gpreg, *gpreg); 9799 } 9800 9801 /* 9802 * Read bit from NVRAM 9803 */ 9804 static void T93C46_Read_Bit(hcb_p np, u_char *read_bit, u_char *gpreg) 9805 { 9806 UDELAY (2); 9807 T93C46_Clk(np, gpreg); 9808 *read_bit = INB (nc_gpreg); 9809 } 9810 9811 /* 9812 * Write bit to GPIO0 9813 */ 9814 static void T93C46_Write_Bit(hcb_p np, u_char write_bit, u_char *gpreg) 9815 { 9816 if (write_bit & 0x01) 9817 *gpreg |= 0x02; 9818 else 9819 *gpreg &= 0xfd; 9820 9821 *gpreg |= 0x10; 9822 9823 OUTB (nc_gpreg, *gpreg); 9824 UDELAY (2); 9825 9826 T93C46_Clk(np, gpreg); 9827 } 9828 9829 /* 9830 * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!! 9831 */ 9832 static void T93C46_Stop(hcb_p np, u_char *gpreg) 9833 { 9834 *gpreg &= 0xef; 9835 OUTB (nc_gpreg, *gpreg); 9836 UDELAY (2); 9837 9838 T93C46_Clk(np, gpreg); 9839 } 9840 9841 /* 9842 * Send read command and address to NVRAM 9843 */ 9844 static void T93C46_Send_Command(hcb_p np, u_short write_data, 9845 u_char *read_bit, u_char *gpreg) 9846 { 9847 int x; 9848 9849 /* send 9 bits, start bit (1), command (2), address (6) */ 9850 for (x = 0; x < 9; x++) 9851 T93C46_Write_Bit(np, (u_char) (write_data >> (8 - x)), gpreg); 9852 9853 *read_bit = INB (nc_gpreg); 9854 } 9855 9856 /* 9857 * READ 2 bytes from the NVRAM 9858 */ 9859 static void T93C46_Read_Word(hcb_p np, u_short *nvram_data, u_char *gpreg) 9860 { 9861 int x; 9862 u_char read_bit; 9863 9864 *nvram_data = 0; 9865 for (x = 0; x < 16; x++) { 9866 T93C46_Read_Bit(np, &read_bit, gpreg); 9867 9868 if (read_bit & 0x01) 9869 *nvram_data |= (0x01 << (15 - x)); 9870 else 9871 *nvram_data &= ~(0x01 << (15 - x)); 9872 } 9873 } 9874 9875 /* 9876 * Read Tekram NvRAM data. 9877 */ 9878 static int T93C46_Read_Data(hcb_p np, u_short *data,int len,u_char *gpreg) 9879 { 9880 u_char read_bit; 9881 int x; 9882 9883 for (x = 0; x < len; x++) { 9884 9885 /* output read command and address */ 9886 T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg); 9887 if (read_bit & 0x01) 9888 return 1; /* Bad */ 9889 T93C46_Read_Word(np, &data[x], gpreg); 9890 T93C46_Stop(np, gpreg); 9891 } 9892 9893 return 0; 9894 } 9895 9896 /* 9897 * Try reading 93C46 Tekram NVRAM. 9898 */ 9899 static int sym_read_T93C46_nvram (hcb_p np, Tekram_nvram *nvram) 9900 { 9901 u_char gpcntl, gpreg; 9902 u_char old_gpcntl, old_gpreg; 9903 int retv = 1; 9904 9905 /* save current state of GPCNTL and GPREG */ 9906 old_gpreg = INB (nc_gpreg); 9907 old_gpcntl = INB (nc_gpcntl); 9908 9909 /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in, 9910 1/2/4 out */ 9911 gpreg = old_gpreg & 0xe9; 9912 OUTB (nc_gpreg, gpreg); 9913 gpcntl = (old_gpcntl & 0xe9) | 0x09; 9914 OUTB (nc_gpcntl, gpcntl); 9915 9916 /* input all of NVRAM, 64 words */ 9917 retv = T93C46_Read_Data(np, (u_short *) nvram, 9918 sizeof(*nvram) / sizeof(short), &gpreg); 9919 9920 /* return GPIO0/1/2/4 to original states after having accessed NVRAM */ 9921 OUTB (nc_gpcntl, old_gpcntl); 9922 OUTB (nc_gpreg, old_gpreg); 9923 9924 return retv; 9925 } 9926 9927 /* 9928 * Try reading Tekram NVRAM. 9929 * Return 0 if OK. 9930 */ 9931 static int sym_read_Tekram_nvram (hcb_p np, Tekram_nvram *nvram) 9932 { 9933 u_char *data = (u_char *) nvram; 9934 int len = sizeof(*nvram); 9935 u_short csum; 9936 int x; 9937 9938 switch (np->device_id) { 9939 case PCI_ID_SYM53C885: 9940 case PCI_ID_SYM53C895: 9941 case PCI_ID_SYM53C896: 9942 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, 9943 data, len); 9944 break; 9945 case PCI_ID_SYM53C875: 9946 x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS, 9947 data, len); 9948 if (!x) 9949 break; 9950 default: 9951 x = sym_read_T93C46_nvram(np, nvram); 9952 break; 9953 } 9954 if (x) 9955 return 1; 9956 9957 /* verify checksum */ 9958 for (x = 0, csum = 0; x < len - 1; x += 2) 9959 csum += data[x] + (data[x+1] << 8); 9960 if (csum != 0x1234) 9961 return 1; 9962 9963 return 0; 9964 } 9965 9966 #endif /* SYM_CONF_NVRAM_SUPPORT */ 9967