1 /* $FreeBSD$ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 *--------------------------------------- 7 * Copyright (c) 1997, 1998, 1999 by Matthew Jacob 8 * NASA/Ames Research Center 9 * All rights reserved. 10 *--------------------------------------- 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice immediately at the beginning of the file, without modification, 17 * this list of conditions, and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. The name of the author may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 28 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 #include <dev/isp/isp_freebsd.h> 37 #include <dev/isp/asm_pci.h> 38 #include <sys/malloc.h> 39 #include <vm/vm.h> 40 #include <vm/pmap.h> 41 42 43 #include <pci/pcireg.h> 44 #include <pci/pcivar.h> 45 46 #include <machine/bus_memio.h> 47 #include <machine/bus_pio.h> 48 #include <machine/bus.h> 49 #include <machine/md_var.h> 50 51 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int)); 52 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t)); 53 #ifndef ISP_DISABLE_1080_SUPPORT 54 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int)); 55 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t)); 56 #endif 57 static int isp_pci_mbxdma __P((struct ispsoftc *)); 58 static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *, 59 ispreq_t *, u_int16_t *, u_int16_t)); 60 static void 61 isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t)); 62 63 static void isp_pci_reset1 __P((struct ispsoftc *)); 64 static void isp_pci_dumpregs __P((struct ispsoftc *)); 65 66 #ifndef ISP_CODE_ORG 67 #define ISP_CODE_ORG 0x1000 68 #endif 69 #ifndef ISP_1040_RISC_CODE 70 #define ISP_1040_RISC_CODE NULL 71 #endif 72 #ifndef ISP_1080_RISC_CODE 73 #define ISP_1080_RISC_CODE NULL 74 #endif 75 #ifndef ISP_2100_RISC_CODE 76 #define ISP_2100_RISC_CODE NULL 77 #endif 78 #ifndef ISP_2200_RISC_CODE 79 #define ISP_2200_RISC_CODE NULL 80 #endif 81 82 #ifndef ISP_DISABLE_1020_SUPPORT 83 static struct ispmdvec mdvec = { 84 isp_pci_rd_reg, 85 isp_pci_wr_reg, 86 isp_pci_mbxdma, 87 isp_pci_dmasetup, 88 isp_pci_dmateardown, 89 NULL, 90 isp_pci_reset1, 91 isp_pci_dumpregs, 92 ISP_1040_RISC_CODE, 93 0, 94 ISP_CODE_ORG, 95 0, 96 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64, 97 0 98 }; 99 #endif 100 101 #ifndef ISP_DISABLE_1080_SUPPORT 102 static struct ispmdvec mdvec_1080 = { 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_pci_dmateardown, 108 NULL, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 ISP_1080_RISC_CODE, 112 0, 113 ISP_CODE_ORG, 114 0, 115 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64, 116 0 117 }; 118 #endif 119 120 #ifndef ISP_DISABLE_2100_SUPPORT 121 static struct ispmdvec mdvec_2100 = { 122 isp_pci_rd_reg, 123 isp_pci_wr_reg, 124 isp_pci_mbxdma, 125 isp_pci_dmasetup, 126 isp_pci_dmateardown, 127 NULL, 128 isp_pci_reset1, 129 isp_pci_dumpregs, 130 ISP_2100_RISC_CODE, 131 0, 132 ISP_CODE_ORG, 133 0, 134 0, 135 0 136 }; 137 #endif 138 139 #ifndef ISP_DISABLE_2200_SUPPORT 140 static struct ispmdvec mdvec_2200 = { 141 isp_pci_rd_reg, 142 isp_pci_wr_reg, 143 isp_pci_mbxdma, 144 isp_pci_dmasetup, 145 isp_pci_dmateardown, 146 NULL, 147 isp_pci_reset1, 148 isp_pci_dumpregs, 149 ISP_2200_RISC_CODE, 150 0, 151 ISP_CODE_ORG, 152 0, 153 0, 154 0 155 }; 156 #endif 157 158 #ifndef SCSI_ISP_PREFER_MEM_MAP 159 #define SCSI_ISP_PREFER_MEM_MAP 0 160 #endif 161 162 #ifndef PCIM_CMD_INVEN 163 #define PCIM_CMD_INVEN 0x10 164 #endif 165 #ifndef PCIM_CMD_BUSMASTEREN 166 #define PCIM_CMD_BUSMASTEREN 0x0004 167 #endif 168 #ifndef PCIM_CMD_PERRESPEN 169 #define PCIM_CMD_PERRESPEN 0x0040 170 #endif 171 #ifndef PCIM_CMD_SEREN 172 #define PCIM_CMD_SEREN 0x0100 173 #endif 174 175 #ifndef PCIR_COMMAND 176 #define PCIR_COMMAND 0x04 177 #endif 178 179 #ifndef PCIR_CACHELNSZ 180 #define PCIR_CACHELNSZ 0x0c 181 #endif 182 183 #ifndef PCIR_LATTIMER 184 #define PCIR_LATTIMER 0x0d 185 #endif 186 187 #ifndef PCIR_ROMADDR 188 #define PCIR_ROMADDR 0x30 189 #endif 190 191 #ifndef PCI_VENDOR_QLOGIC 192 #define PCI_VENDOR_QLOGIC 0x1077 193 #endif 194 195 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 196 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 197 #endif 198 199 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 200 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 201 #endif 202 203 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 204 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 205 #endif 206 207 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 208 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 209 #endif 210 211 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 212 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 213 #endif 214 215 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 216 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 217 #endif 218 219 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 220 221 #define PCI_QLOGIC_ISP1080 \ 222 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 223 224 #define PCI_QLOGIC_ISP1240 \ 225 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 226 227 #define PCI_QLOGIC_ISP1280 \ 228 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 229 230 #define PCI_QLOGIC_ISP2100 \ 231 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 232 233 #define PCI_QLOGIC_ISP2200 \ 234 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 235 236 #define IO_MAP_REG 0x10 237 #define MEM_MAP_REG 0x14 238 239 #define PCI_DFLT_LTNCY 0x40 240 #define PCI_DFLT_LNSZ 0x10 241 242 static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type)); 243 static void isp_pci_attach __P((pcici_t config_d, int unit)); 244 245 /* This distinguishing define is not right, but it does work */ 246 #ifdef __alpha__ 247 #define IO_SPACE_MAPPING ALPHA_BUS_SPACE_IO 248 #define MEM_SPACE_MAPPING ALPHA_BUS_SPACE_MEM 249 #else 250 #define IO_SPACE_MAPPING I386_BUS_SPACE_IO 251 #define MEM_SPACE_MAPPING I386_BUS_SPACE_MEM 252 #endif 253 254 struct isp_pcisoftc { 255 struct ispsoftc pci_isp; 256 pcici_t pci_id; 257 bus_space_tag_t pci_st; 258 bus_space_handle_t pci_sh; 259 int16_t pci_poff[_NREG_BLKS]; 260 bus_dma_tag_t parent_dmat; 261 bus_dma_tag_t cntrol_dmat; 262 bus_dmamap_t cntrol_dmap; 263 bus_dmamap_t *dmaps; 264 }; 265 266 static u_long ispunit; 267 268 static struct pci_device isp_pci_driver = { 269 "isp", 270 isp_pci_probe, 271 isp_pci_attach, 272 &ispunit, 273 NULL 274 }; 275 COMPAT_PCI_DRIVER (isp_pci, isp_pci_driver); 276 277 278 static const char * 279 isp_pci_probe(pcici_t tag, pcidi_t type) 280 { 281 static int oneshot = 1; 282 char *x; 283 284 switch (type) { 285 #ifndef ISP_DISABLE_1020_SUPPORT 286 case PCI_QLOGIC_ISP: 287 x = "Qlogic ISP 1020/1040 PCI SCSI Adapter"; 288 break; 289 #endif 290 #ifndef ISP_DISABLE_1080_SUPPORT 291 case PCI_QLOGIC_ISP1080: 292 x = "Qlogic ISP 1080 PCI SCSI Adapter"; 293 break; 294 case PCI_QLOGIC_ISP1240: 295 x = "Qlogic ISP 1240 PCI SCSI Adapter"; 296 break; 297 case PCI_QLOGIC_ISP1280: 298 x = "Qlogic ISP 1280 PCI SCSI Adapter"; 299 break; 300 #endif 301 #ifndef ISP_DISABLE_2100_SUPPORT 302 case PCI_QLOGIC_ISP2100: 303 x = "Qlogic ISP 2100 PCI FC-AL Adapter"; 304 break; 305 #endif 306 #ifndef ISP_DISABLE_2200_SUPPORT 307 case PCI_QLOGIC_ISP2200: 308 x = "Qlogic ISP 2200 PCI FC-AL Adapter"; 309 break; 310 #endif 311 default: 312 return (NULL); 313 } 314 if (oneshot) { 315 oneshot = 0; 316 CFGPRINTF("Qlogic ISP Driver, FreeBSD Version %d.%d, " 317 "Core Version %d.%d\n", 318 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 319 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 320 } 321 return (x); 322 } 323 324 static void 325 isp_pci_attach(pcici_t cfid, int unit) 326 { 327 #ifdef SCSI_ISP_WWN 328 const char *name = SCSI_ISP_WWN; 329 char *vtp = NULL; 330 #endif 331 int mapped, prefer_mem_map, bitmap; 332 pci_port_t io_port; 333 u_int32_t data, rev, linesz, psize, basetype; 334 struct isp_pcisoftc *pcs; 335 struct ispsoftc *isp; 336 vm_offset_t vaddr, paddr; 337 struct ispmdvec *mdvp; 338 bus_size_t lim; 339 ISP_LOCKVAL_DECL; 340 341 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT); 342 if (pcs == NULL) { 343 printf("isp%d: cannot allocate softc\n", unit); 344 return; 345 } 346 bzero(pcs, sizeof (struct isp_pcisoftc)); 347 348 /* 349 * Figure out if we're supposed to skip this one. 350 */ 351 if (getenv_int("isp_disable", &bitmap)) { 352 if (bitmap & (1 << unit)) { 353 printf("isp%d: not configuring\n", unit); 354 return; 355 } 356 } 357 358 /* 359 * Figure out which we should try first - memory mapping or i/o mapping? 360 */ 361 #if SCSI_ISP_PREFER_MEM_MAP == 1 362 prefer_mem_map = 1; 363 #else 364 prefer_mem_map = 0; 365 #endif 366 bitmap = 0; 367 if (getenv_int("isp_mem_map", &bitmap)) { 368 if (bitmap & (1 << unit)) 369 prefer_mem_map = 1; 370 } 371 bitmap = 0; 372 if (getenv_int("isp_io_map", &bitmap)) { 373 if (bitmap & (1 << unit)) 374 prefer_mem_map = 0; 375 } 376 377 vaddr = paddr = NULL; 378 mapped = 0; 379 linesz = PCI_DFLT_LNSZ; 380 /* 381 * Note that pci_conf_read is a 32 bit word aligned function. 382 */ 383 data = pci_conf_read(cfid, PCIR_COMMAND); 384 if (prefer_mem_map) { 385 if (data & PCI_COMMAND_MEM_ENABLE) { 386 if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) { 387 pcs->pci_st = MEM_SPACE_MAPPING; 388 pcs->pci_sh = vaddr; 389 mapped++; 390 } 391 } 392 if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) { 393 if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) { 394 pcs->pci_st = IO_SPACE_MAPPING; 395 pcs->pci_sh = io_port; 396 mapped++; 397 } 398 } 399 } else { 400 if (data & PCI_COMMAND_IO_ENABLE) { 401 if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) { 402 pcs->pci_st = IO_SPACE_MAPPING; 403 pcs->pci_sh = io_port; 404 mapped++; 405 } 406 } 407 if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) { 408 if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) { 409 pcs->pci_st = MEM_SPACE_MAPPING; 410 pcs->pci_sh = vaddr; 411 mapped++; 412 } 413 } 414 } 415 if (mapped == 0) { 416 printf("isp%d: unable to map any ports!\n", unit); 417 free(pcs, M_DEVBUF); 418 return; 419 } 420 if (bootverbose) 421 printf("isp%d: using %s space register mapping\n", unit, 422 pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory"); 423 424 data = pci_conf_read(cfid, PCI_ID_REG); 425 rev = pci_conf_read(cfid, PCI_CLASS_REG) & 0xff; /* revision */ 426 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 427 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 428 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 429 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 430 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 431 /* 432 * GCC! 433 */ 434 mdvp = &mdvec; 435 basetype = ISP_HA_SCSI_UNKNOWN; 436 psize = sizeof (sdparam); 437 lim = BUS_SPACE_MAXSIZE_32BIT; 438 #ifndef ISP_DISABLE_1020_SUPPORT 439 if (data == PCI_QLOGIC_ISP) { 440 mdvp = &mdvec; 441 basetype = ISP_HA_SCSI_UNKNOWN; 442 psize = sizeof (sdparam); 443 lim = BUS_SPACE_MAXSIZE_24BIT; 444 } 445 #endif 446 #ifndef ISP_DISABLE_1080_SUPPORT 447 if (data == PCI_QLOGIC_ISP1080) { 448 mdvp = &mdvec_1080; 449 basetype = ISP_HA_SCSI_1080; 450 psize = sizeof (sdparam); 451 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 452 ISP1080_DMA_REGS_OFF; 453 } 454 if (data == PCI_QLOGIC_ISP1240) { 455 mdvp = &mdvec_1080; 456 basetype = ISP_HA_SCSI_1240; 457 psize = 2 * sizeof (sdparam); 458 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 459 ISP1080_DMA_REGS_OFF; 460 } 461 if (data == PCI_QLOGIC_ISP1280) { 462 mdvp = &mdvec_1080; 463 basetype = ISP_HA_SCSI_1280; 464 psize = 2 * sizeof (sdparam); 465 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 466 ISP1080_DMA_REGS_OFF; 467 } 468 #endif 469 #ifndef ISP_DISABLE_2100_SUPPORT 470 if (data == PCI_QLOGIC_ISP2100) { 471 mdvp = &mdvec_2100; 472 basetype = ISP_HA_FC_2100; 473 psize = sizeof (fcparam); 474 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 475 PCI_MBOX_REGS2100_OFF; 476 if (rev < 3) { 477 /* 478 * XXX: Need to get the actual revision 479 * XXX: number of the 2100 FB. At any rate, 480 * XXX: lower cache line size for early revision 481 * XXX; boards. 482 */ 483 linesz = 1; 484 } 485 } 486 #endif 487 #ifndef ISP_DISABLE_2200_SUPPORT 488 if (data == PCI_QLOGIC_ISP2200) { 489 mdvp = &mdvec_2200; 490 basetype = ISP_HA_FC_2200; 491 psize = sizeof (fcparam); 492 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 493 PCI_MBOX_REGS2100_OFF; 494 } 495 #endif 496 isp = &pcs->pci_isp; 497 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT); 498 if (isp->isp_param == NULL) { 499 printf("isp%d: cannot allocate parameter data\n", unit); 500 return; 501 } 502 bzero(isp->isp_param, psize); 503 isp->isp_mdvec = mdvp; 504 isp->isp_type = basetype; 505 isp->isp_revision = rev; 506 (void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit); 507 isp->isp_osinfo.unit = unit; 508 509 ISP_LOCK(isp); 510 511 /* 512 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 513 * are set. 514 */ 515 data = pci_cfgread(cfid, PCIR_COMMAND, 2); 516 data |= PCIM_CMD_SEREN | 517 PCIM_CMD_PERRESPEN | 518 PCIM_CMD_BUSMASTEREN | 519 PCIM_CMD_INVEN; 520 pci_cfgwrite(cfid, PCIR_COMMAND, 2, data); 521 522 /* 523 * Make sure the Cache Line Size register is set sensibly. 524 */ 525 data = pci_cfgread(cfid, PCIR_CACHELNSZ, 1); 526 if (data != linesz) { 527 data = PCI_DFLT_LNSZ; 528 CFGPRINTF("%s: set PCI line size to %d\n", isp->isp_name, data); 529 pci_cfgwrite(cfid, PCIR_CACHELNSZ, data, 1); 530 } 531 532 /* 533 * Make sure the Latency Timer is sane. 534 */ 535 data = pci_cfgread(cfid, PCIR_LATTIMER, 1); 536 if (data < PCI_DFLT_LTNCY) { 537 data = PCI_DFLT_LTNCY; 538 CFGPRINTF("%s: set PCI latency to %d\n", isp->isp_name, data); 539 pci_cfgwrite(cfid, PCIR_LATTIMER, data, 1); 540 } 541 542 /* 543 * Make sure we've disabled the ROM. 544 */ 545 data = pci_cfgread(cfid, PCIR_ROMADDR, 4); 546 data &= ~1; 547 pci_cfgwrite(cfid, PCIR_ROMADDR, data, 4); 548 ISP_UNLOCK(isp); 549 550 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 551 BUS_SPACE_MAXADDR, NULL, NULL, lim + 1, 552 255, lim, 0, &pcs->parent_dmat) != 0) { 553 printf("%s: could not create master dma tag\n", isp->isp_name); 554 free(pcs, M_DEVBUF); 555 return; 556 } 557 if (pci_map_int(cfid, (void (*)(void *))isp_intr, 558 (void *)isp, &IMASK) == 0) { 559 printf("%s: could not map interrupt\n", isp->isp_name); 560 free(pcs, M_DEVBUF); 561 return; 562 } 563 564 pcs->pci_id = cfid; 565 #ifdef SCSI_ISP_NO_FWLOAD_MASK 566 if (SCSI_ISP_NO_FWLOAD_MASK && (SCSI_ISP_NO_FWLOAD_MASK & (1 << unit))) 567 isp->isp_confopts |= ISP_CFG_NORELOAD; 568 #endif 569 if (getenv_int("isp_no_fwload", &bitmap)) { 570 if (bitmap & (1 << unit)) 571 isp->isp_confopts |= ISP_CFG_NORELOAD; 572 } 573 if (getenv_int("isp_fwload", &bitmap)) { 574 if (bitmap & (1 << unit)) 575 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 576 } 577 578 #ifdef SCSI_ISP_NO_NVRAM_MASK 579 if (SCSI_ISP_NO_NVRAM_MASK && (SCSI_ISP_NO_NVRAM_MASK & (1 << unit))) { 580 printf("%s: ignoring NVRAM\n", isp->isp_name); 581 isp->isp_confopts |= ISP_CFG_NONVRAM; 582 } 583 #endif 584 if (getenv_int("isp_no_nvram", &bitmap)) { 585 if (bitmap & (1 << unit)) 586 isp->isp_confopts |= ISP_CFG_NONVRAM; 587 } 588 if (getenv_int("isp_nvram", &bitmap)) { 589 if (bitmap & (1 << unit)) 590 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 591 } 592 593 #ifdef SCSI_ISP_FCDUPLEX 594 if (IS_FC(isp)) { 595 if (SCSI_ISP_FCDUPLEX && (SCSI_ISP_FCDUPLEX & (1 << unit))) { 596 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 597 } 598 } 599 #endif 600 if (getenv_int("isp_fcduplex", &bitmap)) { 601 if (bitmap & (1 << unit)) 602 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 603 } 604 if (getenv_int("isp_no_fcduplex", &bitmap)) { 605 if (bitmap & (1 << unit)) 606 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 607 } 608 /* 609 * Look for overriding WWN. This is a Node WWN so it binds to 610 * all FC instances. A Port WWN will be constructed from it 611 * as appropriate. 612 */ 613 #ifdef SCSI_ISP_WWN 614 isp->isp_osinfo.default_wwn = strtoq(name, &vtp, 16); 615 if (vtp != name && *vtp == 0) { 616 isp->isp_confopts |= ISP_CFG_OWNWWN; 617 } else 618 #endif 619 if (!getenv_quad("isp_wwn", (quad_t *) &isp->isp_osinfo.default_wwn)) { 620 int i; 621 u_int64_t seed = (u_int64_t) (intptr_t) isp; 622 623 seed <<= 16; 624 seed &= ((1LL << 48) - 1LL); 625 /* 626 * This isn't very random, but it's the best we can do for 627 * the real edge case of cards that don't have WWNs. If 628 * you recompile a new vers.c, you'll get a different WWN. 629 */ 630 for (i = 0; version[i] != 0; i++) { 631 seed += version[i]; 632 } 633 /* 634 * Make sure the top nibble has something vaguely sensible. 635 */ 636 isp->isp_osinfo.default_wwn |= (4LL << 60) | seed; 637 } else { 638 isp->isp_confopts |= ISP_CFG_OWNWWN; 639 } 640 (void) getenv_int("isp_debug", &isp_debug); 641 #ifdef ISP_TARGET_MODE 642 (void) getenv_int("isp_tdebug", &isp_tdebug); 643 #endif 644 ISP_LOCK(isp); 645 isp_reset(isp); 646 if (isp->isp_state != ISP_RESETSTATE) { 647 (void) pci_unmap_int(cfid); 648 ISP_UNLOCK(isp); 649 free(pcs, M_DEVBUF); 650 return; 651 } 652 isp_init(isp); 653 if (isp->isp_state != ISP_INITSTATE) { 654 /* If we're a Fibre Channel Card, we allow deferred attach */ 655 if (IS_SCSI(isp)) { 656 isp_uninit(isp); 657 (void) pci_unmap_int(cfid); /* Does nothing */ 658 ISP_UNLOCK(isp); 659 free(pcs, M_DEVBUF); 660 return; 661 } 662 } 663 isp_attach(isp); 664 if (isp->isp_state != ISP_RUNSTATE) { 665 /* If we're a Fibre Channel Card, we allow deferred attach */ 666 if (IS_SCSI(isp)) { 667 isp_uninit(isp); 668 (void) pci_unmap_int(cfid); /* Does nothing */ 669 ISP_UNLOCK(isp); 670 free(pcs, M_DEVBUF); 671 return; 672 } 673 } 674 ISP_UNLOCK(isp); 675 } 676 677 static u_int16_t 678 isp_pci_rd_reg(isp, regoff) 679 struct ispsoftc *isp; 680 int regoff; 681 { 682 u_int16_t rv; 683 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 684 int offset, oldconf = 0; 685 686 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 687 /* 688 * We will assume that someone has paused the RISC processor. 689 */ 690 oldconf = isp_pci_rd_reg(isp, BIU_CONF1); 691 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); 692 } 693 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 694 offset += (regoff & 0xff); 695 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 696 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 697 isp_pci_wr_reg(isp, BIU_CONF1, oldconf); 698 } 699 return (rv); 700 } 701 702 static void 703 isp_pci_wr_reg(isp, regoff, val) 704 struct ispsoftc *isp; 705 int regoff; 706 u_int16_t val; 707 { 708 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 709 int offset, oldconf = 0; 710 711 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 712 /* 713 * We will assume that someone has paused the RISC processor. 714 */ 715 oldconf = isp_pci_rd_reg(isp, BIU_CONF1); 716 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); 717 } 718 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 719 offset += (regoff & 0xff); 720 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 721 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 722 isp_pci_wr_reg(isp, BIU_CONF1, oldconf); 723 } 724 } 725 726 #ifndef ISP_DISABLE_1080_SUPPORT 727 static u_int16_t 728 isp_pci_rd_reg_1080(isp, regoff) 729 struct ispsoftc *isp; 730 int regoff; 731 { 732 u_int16_t rv, oc = 0; 733 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 734 int offset; 735 736 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 737 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 738 u_int16_t tc; 739 /* 740 * We will assume that someone has paused the RISC processor. 741 */ 742 oc = isp_pci_rd_reg(isp, BIU_CONF1); 743 tc = oc & ~BIU_PCI1080_CONF1_DMA; 744 if (regoff & SXP_BANK1_SELECT) 745 tc |= BIU_PCI1080_CONF1_SXP1; 746 else 747 tc |= BIU_PCI1080_CONF1_SXP0; 748 isp_pci_wr_reg(isp, BIU_CONF1, tc); 749 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 750 oc = isp_pci_rd_reg(isp, BIU_CONF1); 751 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); 752 } 753 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 754 offset += (regoff & 0xff); 755 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 756 if (oc) { 757 isp_pci_wr_reg(isp, BIU_CONF1, oc); 758 } 759 return (rv); 760 } 761 762 static void 763 isp_pci_wr_reg_1080(isp, regoff, val) 764 struct ispsoftc *isp; 765 int regoff; 766 u_int16_t val; 767 { 768 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 769 int offset, oc = 0; 770 771 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 772 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 773 u_int16_t tc; 774 /* 775 * We will assume that someone has paused the RISC processor. 776 */ 777 oc = isp_pci_rd_reg(isp, BIU_CONF1); 778 tc = oc & ~BIU_PCI1080_CONF1_DMA; 779 if (regoff & SXP_BANK1_SELECT) 780 tc |= BIU_PCI1080_CONF1_SXP1; 781 else 782 tc |= BIU_PCI1080_CONF1_SXP0; 783 isp_pci_wr_reg(isp, BIU_CONF1, tc); 784 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 785 oc = isp_pci_rd_reg(isp, BIU_CONF1); 786 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); 787 } 788 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 789 offset += (regoff & 0xff); 790 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 791 if (oc) { 792 isp_pci_wr_reg(isp, BIU_CONF1, oc); 793 } 794 } 795 #endif 796 797 798 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int)); 799 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int)); 800 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int)); 801 802 struct imush { 803 struct ispsoftc *isp; 804 int error; 805 }; 806 807 static void 808 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 809 { 810 struct imush *imushp = (struct imush *) arg; 811 if (error) { 812 imushp->error = error; 813 } else { 814 imushp->isp->isp_rquest_dma = segs->ds_addr; 815 } 816 } 817 818 static void 819 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error) 820 { 821 struct imush *imushp = (struct imush *) arg; 822 if (error) { 823 imushp->error = error; 824 } else { 825 imushp->isp->isp_result_dma = segs->ds_addr; 826 } 827 } 828 829 static void 830 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error) 831 { 832 struct imush *imushp = (struct imush *) arg; 833 if (error) { 834 imushp->error = error; 835 } else { 836 fcparam *fcp = imushp->isp->isp_param; 837 fcp->isp_scdma = segs->ds_addr; 838 } 839 } 840 841 static int 842 isp_pci_mbxdma(struct ispsoftc *isp) 843 { 844 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 845 caddr_t base; 846 u_int32_t len; 847 int i, error; 848 bus_size_t lim; 849 struct imush im; 850 851 852 /* 853 * Already been here? If so, leave... 854 */ 855 if (isp->isp_rquest) { 856 return (0); 857 } 858 859 len = sizeof (ISP_SCSI_XFER_T **) * isp->isp_maxcmds; 860 isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK); 861 if (isp->isp_xflist == NULL) { 862 printf("%s: can't alloc xflist array\n", isp->isp_name); 863 return (1); 864 } 865 bzero(isp->isp_xflist, len); 866 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 867 pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 868 if (pci->dmaps == NULL) { 869 printf("%s: can't alloc dma maps\n", isp->isp_name); 870 free(isp->isp_xflist, M_DEVBUF); 871 return (1); 872 } 873 874 if (IS_FC(isp) || IS_ULTRA2(isp)) 875 lim = BUS_SPACE_MAXADDR + 1; 876 else 877 lim = BUS_SPACE_MAXADDR_24BIT + 1; 878 879 /* 880 * Allocate and map the request, result queues, plus FC scratch area. 881 */ 882 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 883 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 884 if (IS_FC(isp)) { 885 len += ISP2100_SCRLEN; 886 } 887 if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim, 888 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, 889 BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) { 890 printf("%s: cannot create a dma tag for control spaces\n", 891 isp->isp_name); 892 free(isp->isp_xflist, M_DEVBUF); 893 free(pci->dmaps, M_DEVBUF); 894 return (1); 895 } 896 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base, 897 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) { 898 printf("%s: cannot allocate %d bytes of CCB memory\n", 899 isp->isp_name, len); 900 free(isp->isp_xflist, M_DEVBUF); 901 free(pci->dmaps, M_DEVBUF); 902 return (1); 903 } 904 905 isp->isp_rquest = base; 906 im.isp = isp; 907 im.error = 0; 908 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest, 909 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, &im, 0); 910 if (im.error) { 911 printf("%s: error %d loading dma map for DMA request queue\n", 912 isp->isp_name, im.error); 913 free(isp->isp_xflist, M_DEVBUF); 914 free(pci->dmaps, M_DEVBUF); 915 isp->isp_rquest = NULL; 916 return (1); 917 } 918 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 919 im.error = 0; 920 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result, 921 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, &im, 0); 922 if (im.error) { 923 printf("%s: error %d loading dma map for DMA result queue\n", 924 isp->isp_name, im.error); 925 free(isp->isp_xflist, M_DEVBUF); 926 free(pci->dmaps, M_DEVBUF); 927 isp->isp_rquest = NULL; 928 return (1); 929 } 930 931 for (i = 0; i < isp->isp_maxcmds; i++) { 932 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]); 933 if (error) { 934 printf("%s: error %d creating per-cmd DMA maps\n", 935 isp->isp_name, error); 936 free(isp->isp_xflist, M_DEVBUF); 937 free(pci->dmaps, M_DEVBUF); 938 isp->isp_rquest = NULL; 939 return (1); 940 } 941 } 942 943 if (IS_FC(isp)) { 944 fcparam *fcp = (fcparam *) isp->isp_param; 945 fcp->isp_scratch = base + 946 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN) + 947 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 948 im.error = 0; 949 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, 950 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0); 951 if (im.error) { 952 printf("%s: error %d loading FC scratch area\n", 953 isp->isp_name, im.error); 954 free(isp->isp_xflist, M_DEVBUF); 955 free(pci->dmaps, M_DEVBUF); 956 isp->isp_rquest = NULL; 957 return (1); 958 } 959 } 960 return (0); 961 } 962 963 typedef struct { 964 struct ispsoftc *isp; 965 void *cmd_token; 966 void *rq; 967 u_int16_t *iptrp; 968 u_int16_t optr; 969 u_int error; 970 } mush_t; 971 972 #define MUSHERR_NOQENTRIES -2 973 974 #ifdef ISP_TARGET_MODE 975 /* 976 * We need to handle DMA for target mode differently from initiator mode. 977 * 978 * DMA mapping and construction and submission of CTIO Request Entries 979 * and rendevous for completion are very tightly coupled because we start 980 * out by knowing (per platform) how much data we have to move, but we 981 * don't know, up front, how many DMA mapping segments will have to be used 982 * cover that data, so we don't know how many CTIO Request Entries we 983 * will end up using. Further, for performance reasons we may want to 984 * (on the last CTIO for Fibre Channel), send status too (if all went well). 985 * 986 * The standard vector still goes through isp_pci_dmasetup, but the callback 987 * for the DMA mapping routines comes here instead with the whole transfer 988 * mapped and a pointer to a partially filled in already allocated request 989 * queue entry. We finish the job. 990 */ 991 static void dma2_tgt __P((void *, bus_dma_segment_t *, int, int)); 992 static void dma2_tgt_fc __P((void *, bus_dma_segment_t *, int, int)); 993 994 static void 995 dma2_tgt(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 996 { 997 mush_t *mp; 998 struct ccb_scsiio *csio; 999 struct isp_pcisoftc *pci; 1000 bus_dmamap_t *dp; 1001 u_int8_t scsi_status, send_status; 1002 ct_entry_t *cto; 1003 u_int32_t handle; 1004 int nctios; 1005 1006 mp = (mush_t *) arg; 1007 if (error) { 1008 mp->error = error; 1009 return; 1010 } 1011 1012 csio = mp->cmd_token; 1013 cto = mp->rq; 1014 1015 cto->ct_xfrlen = 0; 1016 cto->ct_resid = 0; 1017 cto->ct_seg_count = 0; 1018 bzero(cto->ct_dataseg, sizeof (cto->ct_dataseg)); 1019 if (nseg == 0) { 1020 cto->ct_header.rqs_entry_count = 1; 1021 ISP_TDQE(mp->isp, "dma2_tgt[no data]", *mp->iptrp, cto); 1022 if (isp_tdebug) { 1023 printf("%s:CTIO lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1024 "0x%x res %u\n", mp->isp->isp_name, 1025 csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags, 1026 cto->ct_status, cto->ct_scsi_status, cto->ct_resid); 1027 } 1028 ISP_SWIZ_CTIO(isp, cto, cto); 1029 return; 1030 } 1031 1032 /* 1033 * Save handle, and potentially any SCSI status, which 1034 * we'll reinsert on the last CTIO we're going to send. 1035 */ 1036 handle = cto->ct_reserved; 1037 cto->ct_reserved = 0; 1038 scsi_status = cto->ct_scsi_status; 1039 cto->ct_scsi_status = 0; 1040 send_status = cto->ct_flags & CT_SENDSTATUS; 1041 cto->ct_flags &= ~CT_SENDSTATUS; 1042 1043 nctios = nseg / ISP_RQDSEG; 1044 if (nseg % ISP_RQDSEG) { 1045 nctios++; 1046 } 1047 1048 pci = (struct isp_pcisoftc *)mp->isp; 1049 dp = &pci->dmaps[handle - 1]; 1050 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1051 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1052 } else { 1053 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1054 } 1055 1056 1057 while (nctios--) { 1058 int seg, seglim; 1059 1060 seglim = nseg; 1061 if (seglim > ISP_RQDSEG) 1062 seglim = ISP_RQDSEG; 1063 1064 for (seg = 0; seg < seglim; seg++) { 1065 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1066 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1067 cto->ct_xfrlen += dm_segs->ds_len; 1068 dm_segs++; 1069 } 1070 1071 cto->ct_seg_count = seg; 1072 cto->ct_flags &= CT_DATAMASK; 1073 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1074 cto->ct_flags |= CT_DATA_IN; 1075 } else { 1076 cto->ct_flags |= CT_DATA_OUT; 1077 } 1078 1079 if (nctios == 0) { 1080 /* 1081 * We're the last in a sequence of CTIOs, so mark this 1082 * CTIO and save the handle to the CCB such that when 1083 * this CTIO completes we can free dma resources and 1084 * do whatever else we need to do to finish the rest 1085 * of the command. 1086 */ 1087 cto->ct_header.rqs_seqno = 1; 1088 cto->ct_reserved = handle; 1089 cto->ct_scsi_status = scsi_status; 1090 cto->ct_flags |= send_status; 1091 ISP_TDQE(mp->isp, "last dma2_tgt", *mp->iptrp, cto); 1092 if (isp_tdebug) { 1093 printf("%s:CTIO lun %d->iid%d flgs 0x%x sts " 1094 "0x%x ssts 0x%x res %u\n", 1095 mp->isp->isp_name, csio->ccb_h.target_lun, 1096 cto->ct_iid, cto->ct_flags, cto->ct_status, 1097 cto->ct_scsi_status, cto->ct_resid); 1098 } 1099 ISP_SWIZ_CTIO(isp, cto, cto); 1100 } else { 1101 ct_entry_t *octo = cto; 1102 cto->ct_reserved = 0; 1103 cto->ct_header.rqs_seqno = 0; 1104 ISP_TDQE(mp->isp, "dma2_tgt", *mp->iptrp, cto); 1105 if (isp_tdebug) { 1106 printf("%s:CTIO lun %d->iid%d flgs 0x%x res" 1107 " %u\n", mp->isp->isp_name, 1108 csio->ccb_h.target_lun, cto->ct_iid, 1109 cto->ct_flags, cto->ct_resid); 1110 } 1111 cto = (ct_entry_t *) 1112 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1113 *mp->iptrp = 1114 ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN); 1115 if (*mp->iptrp == mp->optr) { 1116 printf("%s: Queue Overflow in dma2_tgt\n", 1117 mp->isp->isp_name); 1118 mp->error = MUSHERR_NOQENTRIES; 1119 return; 1120 } 1121 /* 1122 * Fill in the new CTIO with info from the old one. 1123 */ 1124 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1125 cto->ct_header.rqs_entry_count = 1; 1126 cto->ct_header.rqs_flags = 0; 1127 /* ct_header.rqs_seqno && ct_reserved filled in later */ 1128 cto->ct_lun = octo->ct_lun; 1129 cto->ct_iid = octo->ct_iid; 1130 cto->ct_reserved2 = octo->ct_reserved2; 1131 cto->ct_tgt = octo->ct_tgt; 1132 cto->ct_flags = octo->ct_flags & ~CT_DATAMASK; 1133 cto->ct_status = 0; 1134 cto->ct_scsi_status = 0; 1135 cto->ct_tag_val = octo->ct_tag_val; 1136 cto->ct_tag_type = octo->ct_tag_type; 1137 cto->ct_xfrlen = 0; 1138 cto->ct_resid = 0; 1139 cto->ct_timeout = octo->ct_timeout; 1140 cto->ct_seg_count = 0; 1141 bzero(cto->ct_dataseg, sizeof (cto->ct_dataseg)); 1142 ISP_SWIZ_CTIO(isp, octo, octo); 1143 } 1144 } 1145 } 1146 1147 static void 1148 dma2_tgt_fc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1149 { 1150 mush_t *mp; 1151 struct ccb_scsiio *csio; 1152 struct isp_pcisoftc *pci; 1153 bus_dmamap_t *dp; 1154 ct2_entry_t *cto; 1155 u_int16_t scsi_status, send_status, send_sense; 1156 u_int32_t handle, totxfr; 1157 u_int8_t sense[QLTM_SENSELEN]; 1158 int nctios; 1159 int32_t resid; 1160 1161 mp = (mush_t *) arg; 1162 if (error) { 1163 mp->error = error; 1164 return; 1165 } 1166 1167 csio = mp->cmd_token; 1168 cto = mp->rq; 1169 1170 if (nseg == 0) { 1171 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1172 printf("%s: dma2_tgt_fc, a status CTIO2 without MODE1 " 1173 "set (0x%x)\n", mp->isp->isp_name, cto->ct_flags); 1174 mp->error = EINVAL; 1175 return; 1176 } 1177 cto->ct_header.rqs_entry_count = 1; 1178 /* ct_reserved contains the handle set by caller */ 1179 /* 1180 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1181 * flags to NO DATA and clear relative offset flags. 1182 * We preserve the ct_resid and the response area. 1183 */ 1184 cto->ct_flags |= CT2_NO_DATA; 1185 cto->ct_seg_count = 0; 1186 cto->ct_reloff = 0; 1187 ISP_TDQE(mp->isp, "dma2_tgt_fc[no data]", *mp->iptrp, cto); 1188 if (isp_tdebug) { 1189 scsi_status = cto->rsp.m1.ct_scsi_status; 1190 printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x " 1191 "sts 0x%x ssts 0x%x res %u\n", mp->isp->isp_name, 1192 cto->ct_rxid, csio->ccb_h.target_lun, cto->ct_iid, 1193 cto->ct_flags, cto->ct_status, 1194 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1195 } 1196 ISP_SWIZ_CTIO2(isp, cto, cto); 1197 return; 1198 } 1199 1200 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1201 printf("%s: dma2_tgt_fc, a data CTIO2 without MODE0 set " 1202 "(0x%x)\n\n", mp->isp->isp_name, cto->ct_flags); 1203 mp->error = EINVAL; 1204 return; 1205 } 1206 1207 1208 nctios = nseg / ISP_RQDSEG_T2; 1209 if (nseg % ISP_RQDSEG_T2) { 1210 nctios++; 1211 } 1212 1213 /* 1214 * Save the handle, status, reloff, and residual. We'll reinsert the 1215 * handle into the last CTIO2 we're going to send, and reinsert status 1216 * and residual (and possibly sense data) if that's to be sent as well. 1217 * 1218 * We preserve ct_reloff and adjust it for each data CTIO2 we send past 1219 * the first one. This is needed so that the FCP DATA IUs being sent 1220 * out have the correct offset (they can arrive at the other end out 1221 * of order). 1222 */ 1223 1224 handle = cto->ct_reserved; 1225 cto->ct_reserved = 0; 1226 1227 if ((send_status = (cto->ct_flags & CT2_SENDSTATUS)) != 0) { 1228 cto->ct_flags &= ~CT2_SENDSTATUS; 1229 1230 /* 1231 * Preserve residual. 1232 */ 1233 resid = cto->ct_resid; 1234 1235 /* 1236 * Save actual SCSI status. We'll reinsert the 1237 * CT2_SNSLEN_VALID later if appropriate. 1238 */ 1239 scsi_status = cto->rsp.m0.ct_scsi_status & 0xff; 1240 send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID; 1241 1242 /* 1243 * If we're sending status and have a CHECK CONDTION and 1244 * have sense data, we send one more CTIO2 with just the 1245 * status and sense data. The upper layers have stashed 1246 * the sense data in the dataseg structure for us. 1247 */ 1248 1249 if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND && 1250 send_sense) { 1251 bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN); 1252 nctios++; 1253 } 1254 } else { 1255 scsi_status = send_sense = resid = 0; 1256 } 1257 1258 totxfr = cto->ct_resid = 0; 1259 cto->rsp.m0.ct_scsi_status = 0; 1260 bzero(&cto->rsp, sizeof (cto->rsp)); 1261 1262 pci = (struct isp_pcisoftc *)mp->isp; 1263 dp = &pci->dmaps[handle - 1]; 1264 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1265 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1266 } else { 1267 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1268 } 1269 1270 while (nctios--) { 1271 int seg, seglim; 1272 1273 seglim = nseg; 1274 if (seglim) { 1275 if (seglim > ISP_RQDSEG_T2) 1276 seglim = ISP_RQDSEG_T2; 1277 1278 for (seg = 0; seg < seglim; seg++) { 1279 cto->rsp.m0.ct_dataseg[seg].ds_base = 1280 dm_segs->ds_addr; 1281 cto->rsp.m0.ct_dataseg[seg].ds_count = 1282 dm_segs->ds_len; 1283 cto->rsp.m0.ct_xfrlen += dm_segs->ds_len; 1284 totxfr += dm_segs->ds_len; 1285 dm_segs++; 1286 } 1287 cto->ct_seg_count = seg; 1288 } else { 1289 /* 1290 * This case should only happen when we're sending a 1291 * synthesized MODE1 final status with sense data. 1292 */ 1293 if (send_sense == 0) { 1294 printf("%s: dma2_tgt_fc ran out of segments, " 1295 "no SENSE DATA\n", mp->isp->isp_name); 1296 mp->error = EINVAL; 1297 return; 1298 } 1299 } 1300 1301 /* 1302 * At this point, the fields ct_lun, ct_iid, ct_rxid, 1303 * ct_timeout have been carried over unchanged from what 1304 * our caller had set. 1305 * 1306 * The field ct_reloff is either what the caller set, or 1307 * what we've added to below. 1308 * 1309 * The dataseg fields and the seg_count fields we just got 1310 * through setting. The data direction we've preserved all 1311 * along and only clear it if we're sending a MODE1 status 1312 * as the last CTIO. 1313 * 1314 */ 1315 1316 if (nctios == 0) { 1317 1318 /* 1319 * We're the last in a sequence of CTIO2s, so mark this 1320 * CTIO2 and save the handle to the CCB such that when 1321 * this CTIO2 completes we can free dma resources and 1322 * do whatever else we need to do to finish the rest 1323 * of the command. 1324 */ 1325 1326 cto->ct_reserved = handle; 1327 cto->ct_header.rqs_seqno = 1; 1328 1329 if (send_status) { 1330 if (send_sense) { 1331 bcopy(sense, cto->rsp.m1.ct_resp, 1332 QLTM_SENSELEN); 1333 cto->rsp.m1.ct_senselen = 1334 QLTM_SENSELEN; 1335 scsi_status |= CT2_SNSLEN_VALID; 1336 cto->rsp.m1.ct_scsi_status = 1337 scsi_status; 1338 cto->ct_flags &= CT2_FLAG_MMASK; 1339 cto->ct_flags |= CT2_FLAG_MODE1 | 1340 CT2_NO_DATA| CT2_SENDSTATUS; 1341 } else { 1342 cto->rsp.m0.ct_scsi_status = 1343 scsi_status; 1344 cto->ct_flags |= CT2_SENDSTATUS; 1345 } 1346 cto->ct_resid = resid - totxfr; 1347 } 1348 ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto); 1349 if (isp_tdebug) { 1350 printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs" 1351 "0x%x sts 0x%x ssts 0x%x res %u\n", 1352 mp->isp->isp_name, cto->ct_rxid, 1353 csio->ccb_h.target_lun, (int) cto->ct_iid, 1354 cto->ct_flags, cto->ct_status, 1355 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1356 } 1357 ISP_SWIZ_CTIO2(isp, cto, cto); 1358 } else { 1359 ct2_entry_t *octo = cto; 1360 1361 /* 1362 * Make sure handle fields are clean 1363 */ 1364 cto->ct_reserved = 0; 1365 cto->ct_header.rqs_seqno = 0; 1366 1367 ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto); 1368 if (isp_tdebug) { 1369 printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs" 1370 "0x%x\n", mp->isp->isp_name, cto->ct_rxid, 1371 csio->ccb_h.target_lun, (int) cto->ct_iid, 1372 cto->ct_flags); 1373 } 1374 /* 1375 * Get a new CTIO2 1376 */ 1377 cto = (ct2_entry_t *) 1378 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1379 *mp->iptrp = 1380 ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN); 1381 if (*mp->iptrp == mp->optr) { 1382 printf("%s: Queue Overflow in dma2_tgt_fc\n", 1383 mp->isp->isp_name); 1384 mp->error = MUSHERR_NOQENTRIES; 1385 return; 1386 } 1387 1388 /* 1389 * Fill in the new CTIO2 with info from the old one. 1390 */ 1391 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1392 cto->ct_header.rqs_entry_count = 1; 1393 cto->ct_header.rqs_flags = 0; 1394 /* ct_header.rqs_seqno && ct_reserved done later */ 1395 cto->ct_lun = octo->ct_lun; 1396 cto->ct_iid = octo->ct_iid; 1397 cto->ct_rxid = octo->ct_rxid; 1398 cto->ct_flags = octo->ct_flags; 1399 cto->ct_status = 0; 1400 cto->ct_resid = 0; 1401 cto->ct_timeout = octo->ct_timeout; 1402 cto->ct_seg_count = 0; 1403 /* 1404 * Adjust the new relative offset by the amount which 1405 * is recorded in the data segment of the old CTIO2 we 1406 * just finished filling out. 1407 */ 1408 cto->ct_reloff += octo->rsp.m0.ct_xfrlen; 1409 bzero(&cto->rsp, sizeof (cto->rsp)); 1410 ISP_SWIZ_CTIO2(isp, cto, cto); 1411 } 1412 } 1413 } 1414 #endif 1415 1416 static void dma2 __P((void *, bus_dma_segment_t *, int, int)); 1417 1418 static void 1419 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1420 { 1421 mush_t *mp; 1422 struct ccb_scsiio *csio; 1423 struct isp_pcisoftc *pci; 1424 bus_dmamap_t *dp; 1425 bus_dma_segment_t *eseg; 1426 ispreq_t *rq; 1427 ispcontreq_t *crq; 1428 int seglim, datalen; 1429 1430 mp = (mush_t *) arg; 1431 if (error) { 1432 mp->error = error; 1433 return; 1434 } 1435 1436 if (nseg < 1) { 1437 printf("%s: bad segment count (%d)\n", mp->isp->isp_name, nseg); 1438 mp->error = EFAULT; 1439 return; 1440 } 1441 csio = mp->cmd_token; 1442 rq = mp->rq; 1443 pci = (struct isp_pcisoftc *)mp->isp; 1444 dp = &pci->dmaps[rq->req_handle - 1]; 1445 1446 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1447 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1448 } else { 1449 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1450 } 1451 1452 datalen = XS_XFRLEN(csio); 1453 1454 /* 1455 * We're passed an initial partially filled in entry that 1456 * has most fields filled in except for data transfer 1457 * related values. 1458 * 1459 * Our job is to fill in the initial request queue entry and 1460 * then to start allocating and filling in continuation entries 1461 * until we've covered the entire transfer. 1462 */ 1463 1464 if (IS_FC(mp->isp)) { 1465 seglim = ISP_RQDSEG_T2; 1466 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1467 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1468 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1469 } else { 1470 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1471 } 1472 } else { 1473 seglim = ISP_RQDSEG; 1474 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1475 rq->req_flags |= REQFLAG_DATA_IN; 1476 } else { 1477 rq->req_flags |= REQFLAG_DATA_OUT; 1478 } 1479 } 1480 1481 eseg = dm_segs + nseg; 1482 1483 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1484 if (IS_FC(mp->isp)) { 1485 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1486 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1487 dm_segs->ds_addr; 1488 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1489 dm_segs->ds_len; 1490 } else { 1491 rq->req_dataseg[rq->req_seg_count].ds_base = 1492 dm_segs->ds_addr; 1493 rq->req_dataseg[rq->req_seg_count].ds_count = 1494 dm_segs->ds_len; 1495 } 1496 datalen -= dm_segs->ds_len; 1497 #if 0 1498 if (IS_FC(mp->isp)) { 1499 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1500 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 1501 mp->isp->isp_name, rq->req_seg_count, 1502 rq2->req_dataseg[rq2->req_seg_count].ds_count, 1503 rq2->req_dataseg[rq2->req_seg_count].ds_base); 1504 } else { 1505 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 1506 mp->isp->isp_name, rq->req_seg_count, 1507 rq->req_dataseg[rq->req_seg_count].ds_count, 1508 rq->req_dataseg[rq->req_seg_count].ds_base); 1509 } 1510 #endif 1511 rq->req_seg_count++; 1512 dm_segs++; 1513 } 1514 1515 while (datalen > 0 && dm_segs != eseg) { 1516 crq = (ispcontreq_t *) 1517 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1518 *mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN); 1519 if (*mp->iptrp == mp->optr) { 1520 #if 0 1521 printf("%s: Request Queue Overflow++\n", 1522 mp->isp->isp_name); 1523 #endif 1524 mp->error = MUSHERR_NOQENTRIES; 1525 return; 1526 } 1527 rq->req_header.rqs_entry_count++; 1528 bzero((void *)crq, sizeof (*crq)); 1529 crq->req_header.rqs_entry_count = 1; 1530 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1531 1532 seglim = 0; 1533 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1534 crq->req_dataseg[seglim].ds_base = 1535 dm_segs->ds_addr; 1536 crq->req_dataseg[seglim].ds_count = 1537 dm_segs->ds_len; 1538 #if 0 1539 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n", 1540 mp->isp->isp_name, rq->req_header.rqs_entry_count-1, 1541 seglim, crq->req_dataseg[seglim].ds_count, 1542 crq->req_dataseg[seglim].ds_base); 1543 #endif 1544 rq->req_seg_count++; 1545 dm_segs++; 1546 seglim++; 1547 datalen -= dm_segs->ds_len; 1548 } 1549 } 1550 } 1551 1552 static int 1553 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1554 u_int16_t *iptrp, u_int16_t optr) 1555 { 1556 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1557 bus_dmamap_t *dp = NULL; 1558 mush_t mush, *mp; 1559 void (*eptr) __P((void *, bus_dma_segment_t *, int, int)); 1560 1561 #ifdef ISP_TARGET_MODE 1562 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1563 if (IS_FC(isp)) { 1564 eptr = dma2_tgt_fc; 1565 } else { 1566 eptr = dma2_tgt; 1567 } 1568 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) { 1569 rq->req_seg_count = 1; 1570 mp = &mush; 1571 mp->isp = isp; 1572 mp->cmd_token = csio; 1573 mp->rq = rq; 1574 mp->iptrp = iptrp; 1575 mp->optr = optr; 1576 mp->error = 0; 1577 (*eptr)(mp, NULL, 0, 0); 1578 goto exit; 1579 } 1580 } else 1581 #endif 1582 eptr = dma2; 1583 1584 /* 1585 * NB: if we need to do request queue entry swizzling, 1586 * NB: this is where it would need to be done for cmds 1587 * NB: that move no data. For commands that move data, 1588 * NB: swizzling would take place in those functions. 1589 */ 1590 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) { 1591 rq->req_seg_count = 1; 1592 return (CMD_QUEUED); 1593 } 1594 1595 /* 1596 * Do a virtual grapevine step to collect info for 1597 * the callback dma allocation that we have to use... 1598 */ 1599 mp = &mush; 1600 mp->isp = isp; 1601 mp->cmd_token = csio; 1602 mp->rq = rq; 1603 mp->iptrp = iptrp; 1604 mp->optr = optr; 1605 mp->error = 0; 1606 1607 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1608 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1609 int error, s; 1610 dp = &pci->dmaps[rq->req_handle - 1]; 1611 s = splsoftvm(); 1612 error = bus_dmamap_load(pci->parent_dmat, *dp, 1613 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1614 if (error == EINPROGRESS) { 1615 bus_dmamap_unload(pci->parent_dmat, *dp); 1616 mp->error = EINVAL; 1617 printf("%s: deferred dma allocation not " 1618 "supported\n", isp->isp_name); 1619 } else if (error && mp->error == 0) { 1620 #ifdef DIAGNOSTIC 1621 printf("%s: error %d in dma mapping code\n", 1622 isp->isp_name, error); 1623 #endif 1624 mp->error = error; 1625 } 1626 splx(s); 1627 } else { 1628 /* Pointer to physical buffer */ 1629 struct bus_dma_segment seg; 1630 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1631 seg.ds_len = csio->dxfer_len; 1632 (*eptr)(mp, &seg, 1, 0); 1633 } 1634 } else { 1635 struct bus_dma_segment *segs; 1636 1637 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1638 printf("%s: Physical segment pointers unsupported", 1639 isp->isp_name); 1640 mp->error = EINVAL; 1641 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1642 printf("%s: Virtual segment addresses unsupported", 1643 isp->isp_name); 1644 mp->error = EINVAL; 1645 } else { 1646 /* Just use the segments provided */ 1647 segs = (struct bus_dma_segment *) csio->data_ptr; 1648 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1649 } 1650 } 1651 #ifdef ISP_TARGET_MODE 1652 exit: 1653 #endif 1654 if (mp->error) { 1655 int retval = CMD_COMPLETE; 1656 if (mp->error == MUSHERR_NOQENTRIES) { 1657 retval = CMD_EAGAIN; 1658 } else if (mp->error == EFBIG) { 1659 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1660 } else if (mp->error == EINVAL) { 1661 XS_SETERR(csio, CAM_REQ_INVALID); 1662 } else { 1663 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1664 } 1665 return (retval); 1666 } else { 1667 /* 1668 * Check to see if we weren't cancelled while sleeping on 1669 * getting DMA resources... 1670 */ 1671 if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1672 if (dp) { 1673 bus_dmamap_unload(pci->parent_dmat, *dp); 1674 } 1675 return (CMD_COMPLETE); 1676 } 1677 return (CMD_QUEUED); 1678 } 1679 } 1680 1681 static void 1682 isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *xs, u_int32_t handle) 1683 { 1684 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1685 bus_dmamap_t *dp = &pci->dmaps[handle - 1]; 1686 KASSERT((handle > 0 && handle <= isp->isp_maxcmds), 1687 ("bad handle in isp_pci_dmateardonw")); 1688 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1689 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD); 1690 } else { 1691 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE); 1692 } 1693 bus_dmamap_unload(pci->parent_dmat, *dp); 1694 } 1695 1696 1697 static void 1698 isp_pci_reset1(struct ispsoftc *isp) 1699 { 1700 /* Make sure the BIOS is disabled */ 1701 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1702 } 1703 1704 static void 1705 isp_pci_dumpregs(struct ispsoftc *isp) 1706 { 1707 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1708 printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name, 1709 pci_conf_read(pci->pci_id, PCIR_COMMAND)); 1710 } 1711