1 /* $FreeBSD$ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 *--------------------------------------- 7 * Copyright (c) 1997, 1998, 1999 by Matthew Jacob 8 * NASA/Ames Research Center 9 * All rights reserved. 10 *--------------------------------------- 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice immediately at the beginning of the file, without modification, 17 * this list of conditions, and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. The name of the author may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 28 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 #include <dev/isp/isp_freebsd.h> 37 #include <dev/isp/asm_pci.h> 38 #include <sys/malloc.h> 39 #include <vm/vm.h> 40 #include <vm/pmap.h> 41 42 43 #include <pci/pcireg.h> 44 #include <pci/pcivar.h> 45 46 #include <machine/bus_memio.h> 47 #include <machine/bus_pio.h> 48 #include <machine/bus.h> 49 #include <machine/md_var.h> 50 51 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int)); 52 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t)); 53 #ifndef ISP_DISABLE_1080_SUPPORT 54 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int)); 55 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t)); 56 #endif 57 static int isp_pci_mbxdma __P((struct ispsoftc *)); 58 static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *, 59 ispreq_t *, u_int16_t *, u_int16_t)); 60 static void 61 isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t)); 62 63 static void isp_pci_reset1 __P((struct ispsoftc *)); 64 static void isp_pci_dumpregs __P((struct ispsoftc *)); 65 66 #ifndef ISP_CODE_ORG 67 #define ISP_CODE_ORG 0x1000 68 #endif 69 #ifndef ISP_1040_RISC_CODE 70 #define ISP_1040_RISC_CODE NULL 71 #endif 72 #ifndef ISP_1080_RISC_CODE 73 #define ISP_1080_RISC_CODE NULL 74 #endif 75 #ifndef ISP_2100_RISC_CODE 76 #define ISP_2100_RISC_CODE NULL 77 #endif 78 #ifndef ISP_2200_RISC_CODE 79 #define ISP_2200_RISC_CODE NULL 80 #endif 81 82 #ifndef ISP_DISABLE_1020_SUPPORT 83 static struct ispmdvec mdvec = { 84 isp_pci_rd_reg, 85 isp_pci_wr_reg, 86 isp_pci_mbxdma, 87 isp_pci_dmasetup, 88 isp_pci_dmateardown, 89 NULL, 90 isp_pci_reset1, 91 isp_pci_dumpregs, 92 ISP_1040_RISC_CODE, 93 0, 94 ISP_CODE_ORG, 95 0, 96 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64, 97 0 98 }; 99 #endif 100 101 #ifndef ISP_DISABLE_1080_SUPPORT 102 static struct ispmdvec mdvec_1080 = { 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_pci_dmateardown, 108 NULL, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 ISP_1080_RISC_CODE, 112 0, 113 ISP_CODE_ORG, 114 0, 115 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64, 116 0 117 }; 118 #endif 119 120 #ifndef ISP_DISABLE_2100_SUPPORT 121 static struct ispmdvec mdvec_2100 = { 122 isp_pci_rd_reg, 123 isp_pci_wr_reg, 124 isp_pci_mbxdma, 125 isp_pci_dmasetup, 126 isp_pci_dmateardown, 127 NULL, 128 isp_pci_reset1, 129 isp_pci_dumpregs, 130 ISP_2100_RISC_CODE, 131 0, 132 ISP_CODE_ORG, 133 0, 134 0, 135 0 136 }; 137 #endif 138 139 #ifndef ISP_DISABLE_2200_SUPPORT 140 static struct ispmdvec mdvec_2200 = { 141 isp_pci_rd_reg, 142 isp_pci_wr_reg, 143 isp_pci_mbxdma, 144 isp_pci_dmasetup, 145 isp_pci_dmateardown, 146 NULL, 147 isp_pci_reset1, 148 isp_pci_dumpregs, 149 ISP_2200_RISC_CODE, 150 0, 151 ISP_CODE_ORG, 152 0, 153 0, 154 0 155 }; 156 #endif 157 158 #ifndef SCSI_ISP_PREFER_MEM_MAP 159 #define SCSI_ISP_PREFER_MEM_MAP 0 160 #endif 161 162 #ifndef PCIM_CMD_INVEN 163 #define PCIM_CMD_INVEN 0x10 164 #endif 165 #ifndef PCIM_CMD_BUSMASTEREN 166 #define PCIM_CMD_BUSMASTEREN 0x0004 167 #endif 168 #ifndef PCIM_CMD_PERRESPEN 169 #define PCIM_CMD_PERRESPEN 0x0040 170 #endif 171 #ifndef PCIM_CMD_SEREN 172 #define PCIM_CMD_SEREN 0x0100 173 #endif 174 175 #ifndef PCIR_COMMAND 176 #define PCIR_COMMAND 0x04 177 #endif 178 179 #ifndef PCIR_CACHELNSZ 180 #define PCIR_CACHELNSZ 0x0c 181 #endif 182 183 #ifndef PCIR_LATTIMER 184 #define PCIR_LATTIMER 0x0d 185 #endif 186 187 #ifndef PCIR_ROMADDR 188 #define PCIR_ROMADDR 0x30 189 #endif 190 191 #ifndef PCI_VENDOR_QLOGIC 192 #define PCI_VENDOR_QLOGIC 0x1077 193 #endif 194 195 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 196 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 197 #endif 198 199 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 200 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 201 #endif 202 203 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 204 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 205 #endif 206 207 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 208 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 209 #endif 210 211 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 212 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 213 #endif 214 215 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 216 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 217 #endif 218 219 #define PCI_QLOGIC_ISP ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 220 221 #define PCI_QLOGIC_ISP1080 \ 222 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 223 224 #define PCI_QLOGIC_ISP1240 \ 225 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 226 227 #define PCI_QLOGIC_ISP1280 \ 228 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 229 230 #define PCI_QLOGIC_ISP2100 \ 231 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 232 233 #define PCI_QLOGIC_ISP2200 \ 234 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 235 236 #define IO_MAP_REG 0x10 237 #define MEM_MAP_REG 0x14 238 239 #define PCI_DFLT_LTNCY 0x40 240 #define PCI_DFLT_LNSZ 0x10 241 242 static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type)); 243 static void isp_pci_attach __P((pcici_t config_d, int unit)); 244 245 /* This distinguishing define is not right, but it does work */ 246 #ifdef __alpha__ 247 #define IO_SPACE_MAPPING ALPHA_BUS_SPACE_IO 248 #define MEM_SPACE_MAPPING ALPHA_BUS_SPACE_MEM 249 #else 250 #define IO_SPACE_MAPPING I386_BUS_SPACE_IO 251 #define MEM_SPACE_MAPPING I386_BUS_SPACE_MEM 252 #endif 253 254 struct isp_pcisoftc { 255 struct ispsoftc pci_isp; 256 pcici_t pci_id; 257 bus_space_tag_t pci_st; 258 bus_space_handle_t pci_sh; 259 int16_t pci_poff[_NREG_BLKS]; 260 bus_dma_tag_t parent_dmat; 261 bus_dma_tag_t cntrol_dmat; 262 bus_dmamap_t cntrol_dmap; 263 bus_dmamap_t *dmaps; 264 }; 265 266 static u_long ispunit; 267 268 static struct pci_device isp_pci_driver = { 269 "isp", 270 isp_pci_probe, 271 isp_pci_attach, 272 &ispunit, 273 NULL 274 }; 275 COMPAT_PCI_DRIVER (isp_pci, isp_pci_driver); 276 277 278 static const char * 279 isp_pci_probe(pcici_t tag, pcidi_t type) 280 { 281 static int oneshot = 1; 282 char *x; 283 284 switch (type) { 285 #ifndef ISP_DISABLE_1020_SUPPORT 286 case PCI_QLOGIC_ISP: 287 x = "Qlogic ISP 1020/1040 PCI SCSI Adapter"; 288 break; 289 #endif 290 #ifndef ISP_DISABLE_1080_SUPPORT 291 case PCI_QLOGIC_ISP1080: 292 x = "Qlogic ISP 1080 PCI SCSI Adapter"; 293 break; 294 case PCI_QLOGIC_ISP1240: 295 x = "Qlogic ISP 1240 PCI SCSI Adapter"; 296 break; 297 case PCI_QLOGIC_ISP1280: 298 x = "Qlogic ISP 1280 PCI SCSI Adapter"; 299 break; 300 #endif 301 #ifndef ISP_DISABLE_2100_SUPPORT 302 case PCI_QLOGIC_ISP2100: 303 x = "Qlogic ISP 2100 PCI FC-AL Adapter"; 304 break; 305 #endif 306 #ifndef ISP_DISABLE_2200_SUPPORT 307 case PCI_QLOGIC_ISP2200: 308 x = "Qlogic ISP 2200 PCI FC-AL Adapter"; 309 break; 310 #endif 311 default: 312 return (NULL); 313 } 314 if (oneshot) { 315 oneshot = 0; 316 CFGPRINTF("Qlogic ISP Driver, FreeBSD Version %d.%d, " 317 "Core Version %d.%d\n", 318 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 319 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 320 } 321 return (x); 322 } 323 324 static void 325 isp_pci_attach(pcici_t cfid, int unit) 326 { 327 #ifdef SCSI_ISP_WWN 328 const char *name = SCSI_ISP_WWN; 329 char *vtp = NULL; 330 #endif 331 int mapped, prefer_mem_map, bitmap; 332 pci_port_t io_port; 333 u_int32_t data, rev, linesz, psize, basetype; 334 struct isp_pcisoftc *pcs; 335 struct ispsoftc *isp; 336 vm_offset_t vaddr, paddr; 337 struct ispmdvec *mdvp; 338 bus_size_t lim; 339 ISP_LOCKVAL_DECL; 340 341 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT); 342 if (pcs == NULL) { 343 printf("isp%d: cannot allocate softc\n", unit); 344 return; 345 } 346 bzero(pcs, sizeof (struct isp_pcisoftc)); 347 348 /* 349 * Figure out if we're supposed to skip this one. 350 */ 351 if (getenv_int("isp_disable", &bitmap)) { 352 if (bitmap & (1 << unit)) { 353 printf("isp%d: not configuring\n", unit); 354 return; 355 } 356 } 357 358 /* 359 * Figure out which we should try first - memory mapping or i/o mapping? 360 */ 361 #if SCSI_ISP_PREFER_MEM_MAP == 1 362 prefer_mem_map = 1; 363 #else 364 prefer_mem_map = 0; 365 #endif 366 bitmap = 0; 367 if (getenv_int("isp_mem_map", &bitmap)) { 368 if (bitmap & (1 << unit)) 369 prefer_mem_map = 1; 370 } 371 bitmap = 0; 372 if (getenv_int("isp_io_map", &bitmap)) { 373 if (bitmap & (1 << unit)) 374 prefer_mem_map = 0; 375 } 376 377 vaddr = paddr = NULL; 378 mapped = 0; 379 linesz = PCI_DFLT_LNSZ; 380 /* 381 * Note that pci_conf_read is a 32 bit word aligned function. 382 */ 383 data = pci_conf_read(cfid, PCIR_COMMAND); 384 if (prefer_mem_map) { 385 if (data & PCI_COMMAND_MEM_ENABLE) { 386 if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) { 387 pcs->pci_st = MEM_SPACE_MAPPING; 388 pcs->pci_sh = vaddr; 389 mapped++; 390 } 391 } 392 if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) { 393 if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) { 394 pcs->pci_st = IO_SPACE_MAPPING; 395 pcs->pci_sh = io_port; 396 mapped++; 397 } 398 } 399 } else { 400 if (data & PCI_COMMAND_IO_ENABLE) { 401 if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) { 402 pcs->pci_st = IO_SPACE_MAPPING; 403 pcs->pci_sh = io_port; 404 mapped++; 405 } 406 } 407 if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) { 408 if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) { 409 pcs->pci_st = MEM_SPACE_MAPPING; 410 pcs->pci_sh = vaddr; 411 mapped++; 412 } 413 } 414 } 415 if (mapped == 0) { 416 printf("isp%d: unable to map any ports!\n", unit); 417 free(pcs, M_DEVBUF); 418 return; 419 } 420 if (bootverbose) 421 printf("isp%d: using %s space register mapping\n", unit, 422 pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory"); 423 424 data = pci_conf_read(cfid, PCI_ID_REG); 425 rev = pci_conf_read(cfid, PCI_CLASS_REG) & 0xff; /* revision */ 426 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 427 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 428 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 429 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 430 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 431 /* 432 * GCC! 433 */ 434 mdvp = &mdvec; 435 basetype = ISP_HA_SCSI_UNKNOWN; 436 psize = sizeof (sdparam); 437 lim = BUS_SPACE_MAXSIZE_32BIT; 438 #ifndef ISP_DISABLE_1020_SUPPORT 439 if (data == PCI_QLOGIC_ISP) { 440 mdvp = &mdvec; 441 basetype = ISP_HA_SCSI_UNKNOWN; 442 psize = sizeof (sdparam); 443 lim = BUS_SPACE_MAXSIZE_24BIT; 444 } 445 #endif 446 #ifndef ISP_DISABLE_1080_SUPPORT 447 if (data == PCI_QLOGIC_ISP1080) { 448 mdvp = &mdvec_1080; 449 basetype = ISP_HA_SCSI_1080; 450 psize = sizeof (sdparam); 451 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 452 ISP1080_DMA_REGS_OFF; 453 } 454 if (data == PCI_QLOGIC_ISP1240) { 455 mdvp = &mdvec_1080; 456 basetype = ISP_HA_SCSI_1240; 457 psize = 2 * sizeof (sdparam); 458 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 459 ISP1080_DMA_REGS_OFF; 460 } 461 if (data == PCI_QLOGIC_ISP1280) { 462 mdvp = &mdvec_1080; 463 basetype = ISP_HA_SCSI_1280; 464 psize = 2 * sizeof (sdparam); 465 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 466 ISP1080_DMA_REGS_OFF; 467 } 468 #endif 469 #ifndef ISP_DISABLE_2100_SUPPORT 470 if (data == PCI_QLOGIC_ISP2100) { 471 mdvp = &mdvec_2100; 472 basetype = ISP_HA_FC_2100; 473 psize = sizeof (fcparam); 474 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 475 PCI_MBOX_REGS2100_OFF; 476 if (rev < 3) { 477 /* 478 * XXX: Need to get the actual revision 479 * XXX: number of the 2100 FB. At any rate, 480 * XXX: lower cache line size for early revision 481 * XXX; boards. 482 */ 483 linesz = 1; 484 } 485 } 486 #endif 487 #ifndef ISP_DISABLE_2200_SUPPORT 488 if (data == PCI_QLOGIC_ISP2200) { 489 mdvp = &mdvec_2200; 490 basetype = ISP_HA_FC_2200; 491 psize = sizeof (fcparam); 492 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 493 PCI_MBOX_REGS2100_OFF; 494 } 495 #endif 496 isp = &pcs->pci_isp; 497 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT); 498 if (isp->isp_param == NULL) { 499 printf("isp%d: cannot allocate parameter data\n", unit); 500 return; 501 } 502 bzero(isp->isp_param, psize); 503 isp->isp_mdvec = mdvp; 504 isp->isp_type = basetype; 505 isp->isp_revision = rev; 506 (void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit); 507 isp->isp_osinfo.unit = unit; 508 509 ISP_LOCK(isp); 510 511 /* 512 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 513 * are set. 514 */ 515 data = pci_cfgread(cfid, PCIR_COMMAND, 2); 516 data |= PCIM_CMD_SEREN | 517 PCIM_CMD_PERRESPEN | 518 PCIM_CMD_BUSMASTEREN | 519 PCIM_CMD_INVEN; 520 pci_cfgwrite(cfid, PCIR_COMMAND, 2, data); 521 522 /* 523 * Make sure the Cache Line Size register is set sensibly. 524 */ 525 data = pci_cfgread(cfid, PCIR_CACHELNSZ, 1); 526 if (data != linesz) { 527 data = PCI_DFLT_LNSZ; 528 CFGPRINTF("%s: set PCI line size to %d\n", isp->isp_name, data); 529 pci_cfgwrite(cfid, PCIR_CACHELNSZ, data, 1); 530 } 531 532 /* 533 * Make sure the Latency Timer is sane. 534 */ 535 data = pci_cfgread(cfid, PCIR_LATTIMER, 1); 536 if (data < PCI_DFLT_LTNCY) { 537 data = PCI_DFLT_LTNCY; 538 CFGPRINTF("%s: set PCI latency to %d\n", isp->isp_name, data); 539 pci_cfgwrite(cfid, PCIR_LATTIMER, data, 1); 540 } 541 542 /* 543 * Make sure we've disabled the ROM. 544 */ 545 data = pci_cfgread(cfid, PCIR_ROMADDR, 4); 546 data &= ~1; 547 pci_cfgwrite(cfid, PCIR_ROMADDR, data, 4); 548 ISP_UNLOCK(isp); 549 550 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 551 BUS_SPACE_MAXADDR, NULL, NULL, lim + 1, 552 255, lim, 0, &pcs->parent_dmat) != 0) { 553 printf("%s: could not create master dma tag\n", isp->isp_name); 554 free(pcs, M_DEVBUF); 555 return; 556 } 557 if (pci_map_int(cfid, (void (*)(void *))isp_intr, 558 (void *)isp, &IMASK) == 0) { 559 printf("%s: could not map interrupt\n", isp->isp_name); 560 free(pcs, M_DEVBUF); 561 return; 562 } 563 564 pcs->pci_id = cfid; 565 #ifdef SCSI_ISP_NO_FWLOAD_MASK 566 if (SCSI_ISP_NO_FWLOAD_MASK && (SCSI_ISP_NO_FWLOAD_MASK & (1 << unit))) 567 isp->isp_confopts |= ISP_CFG_NORELOAD; 568 #endif 569 if (getenv_int("isp_no_fwload", &bitmap)) { 570 if (bitmap & (1 << unit)) 571 isp->isp_confopts |= ISP_CFG_NORELOAD; 572 } 573 if (getenv_int("isp_fwload", &bitmap)) { 574 if (bitmap & (1 << unit)) 575 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 576 } 577 578 #ifdef SCSI_ISP_NO_NVRAM_MASK 579 if (SCSI_ISP_NO_NVRAM_MASK && (SCSI_ISP_NO_NVRAM_MASK & (1 << unit))) { 580 printf("%s: ignoring NVRAM\n", isp->isp_name); 581 isp->isp_confopts |= ISP_CFG_NONVRAM; 582 } 583 #endif 584 if (getenv_int("isp_no_nvram", &bitmap)) { 585 if (bitmap & (1 << unit)) 586 isp->isp_confopts |= ISP_CFG_NONVRAM; 587 } 588 if (getenv_int("isp_nvram", &bitmap)) { 589 if (bitmap & (1 << unit)) 590 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 591 } 592 593 #ifdef SCSI_ISP_FCDUPLEX 594 if (IS_FC(isp)) { 595 if (SCSI_ISP_FCDUPLEX && (SCSI_ISP_FCDUPLEX & (1 << unit))) { 596 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 597 } 598 } 599 #endif 600 if (getenv_int("isp_fcduplex", &bitmap)) { 601 if (bitmap & (1 << unit)) 602 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 603 } 604 if (getenv_int("isp_no_fcduplex", &bitmap)) { 605 if (bitmap & (1 << unit)) 606 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 607 } 608 /* 609 * Look for overriding WWN. This is a Node WWN so it binds to 610 * all FC instances. A Port WWN will be constructed from it 611 * as appropriate. 612 */ 613 #ifdef SCSI_ISP_WWN 614 isp->isp_osinfo.default_wwn = strtoq(name, &vtp, 16); 615 if (vtp != name && *vtp == 0) { 616 isp->isp_confopts |= ISP_CFG_OWNWWN; 617 } else 618 #endif 619 if (!getenv_quad("isp_wwn", (quad_t *) &isp->isp_osinfo.default_wwn)) { 620 int i; 621 u_int64_t seed = (u_int64_t) (intptr_t) isp; 622 623 seed <<= 16; 624 seed &= ((1LL << 48) - 1LL); 625 /* 626 * This isn't very random, but it's the best we can do for 627 * the real edge case of cards that don't have WWNs. If 628 * you recompile a new vers.c, you'll get a different WWN. 629 */ 630 for (i = 0; version[i] != 0; i++) { 631 seed += version[i]; 632 } 633 /* 634 * Make sure the top nibble has something vaguely sensible. 635 */ 636 isp->isp_osinfo.default_wwn |= (4LL << 60) | seed; 637 } else { 638 isp->isp_confopts |= ISP_CFG_OWNWWN; 639 } 640 (void) getenv_int("isp_debug", &isp_debug); 641 #ifdef ISP_TARGET_MODE 642 (void) getenv_int("isp_tdebug", &isp_tdebug); 643 #endif 644 ISP_LOCK(isp); 645 isp_reset(isp); 646 if (isp->isp_state != ISP_RESETSTATE) { 647 (void) pci_unmap_int(cfid); 648 ISP_UNLOCK(isp); 649 free(pcs, M_DEVBUF); 650 return; 651 } 652 isp_init(isp); 653 if (isp->isp_state != ISP_INITSTATE) { 654 /* If we're a Fibre Channel Card, we allow deferred attach */ 655 if (IS_SCSI(isp)) { 656 isp_uninit(isp); 657 (void) pci_unmap_int(cfid); /* Does nothing */ 658 ISP_UNLOCK(isp); 659 free(pcs, M_DEVBUF); 660 return; 661 } 662 } 663 isp_attach(isp); 664 if (isp->isp_state != ISP_RUNSTATE) { 665 /* If we're a Fibre Channel Card, we allow deferred attach */ 666 if (IS_SCSI(isp)) { 667 isp_uninit(isp); 668 (void) pci_unmap_int(cfid); /* Does nothing */ 669 ISP_UNLOCK(isp); 670 free(pcs, M_DEVBUF); 671 return; 672 } 673 } 674 ISP_UNLOCK(isp); 675 } 676 677 static u_int16_t 678 isp_pci_rd_reg(isp, regoff) 679 struct ispsoftc *isp; 680 int regoff; 681 { 682 u_int16_t rv; 683 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 684 int offset, oldconf = 0; 685 686 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 687 /* 688 * We will assume that someone has paused the RISC processor. 689 */ 690 oldconf = isp_pci_rd_reg(isp, BIU_CONF1); 691 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); 692 } 693 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 694 offset += (regoff & 0xff); 695 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 696 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 697 isp_pci_wr_reg(isp, BIU_CONF1, oldconf); 698 } 699 return (rv); 700 } 701 702 static void 703 isp_pci_wr_reg(isp, regoff, val) 704 struct ispsoftc *isp; 705 int regoff; 706 u_int16_t val; 707 { 708 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 709 int offset, oldconf = 0; 710 711 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 712 /* 713 * We will assume that someone has paused the RISC processor. 714 */ 715 oldconf = isp_pci_rd_reg(isp, BIU_CONF1); 716 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); 717 } 718 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 719 offset += (regoff & 0xff); 720 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 721 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 722 isp_pci_wr_reg(isp, BIU_CONF1, oldconf); 723 } 724 } 725 726 #ifndef ISP_DISABLE_1080_SUPPORT 727 static u_int16_t 728 isp_pci_rd_reg_1080(isp, regoff) 729 struct ispsoftc *isp; 730 int regoff; 731 { 732 u_int16_t rv, oc = 0; 733 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 734 int offset; 735 736 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 737 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 738 u_int16_t tc; 739 /* 740 * We will assume that someone has paused the RISC processor. 741 */ 742 oc = isp_pci_rd_reg(isp, BIU_CONF1); 743 tc = oc & ~BIU_PCI1080_CONF1_DMA; 744 if (regoff & SXP_BANK1_SELECT) 745 tc |= BIU_PCI1080_CONF1_SXP1; 746 else 747 tc |= BIU_PCI1080_CONF1_SXP0; 748 isp_pci_wr_reg(isp, BIU_CONF1, tc); 749 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 750 oc = isp_pci_rd_reg(isp, BIU_CONF1); 751 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); 752 } 753 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 754 offset += (regoff & 0xff); 755 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 756 if (oc) { 757 isp_pci_wr_reg(isp, BIU_CONF1, oc); 758 } 759 return (rv); 760 } 761 762 static void 763 isp_pci_wr_reg_1080(isp, regoff, val) 764 struct ispsoftc *isp; 765 int regoff; 766 u_int16_t val; 767 { 768 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 769 int offset, oc = 0; 770 771 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 772 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 773 u_int16_t tc; 774 /* 775 * We will assume that someone has paused the RISC processor. 776 */ 777 oc = isp_pci_rd_reg(isp, BIU_CONF1); 778 tc = oc & ~BIU_PCI1080_CONF1_DMA; 779 if (regoff & SXP_BANK1_SELECT) 780 tc |= BIU_PCI1080_CONF1_SXP1; 781 else 782 tc |= BIU_PCI1080_CONF1_SXP0; 783 isp_pci_wr_reg(isp, BIU_CONF1, tc); 784 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 785 oc = isp_pci_rd_reg(isp, BIU_CONF1); 786 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); 787 } 788 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 789 offset += (regoff & 0xff); 790 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 791 if (oc) { 792 isp_pci_wr_reg(isp, BIU_CONF1, oc); 793 } 794 } 795 #endif 796 797 798 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int)); 799 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int)); 800 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int)); 801 802 struct imush { 803 struct ispsoftc *isp; 804 int error; 805 }; 806 807 static void 808 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 809 { 810 struct imush *imushp = (struct imush *) arg; 811 if (error) { 812 imushp->error = error; 813 } else { 814 imushp->isp->isp_rquest_dma = segs->ds_addr; 815 } 816 } 817 818 static void 819 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error) 820 { 821 struct imush *imushp = (struct imush *) arg; 822 if (error) { 823 imushp->error = error; 824 } else { 825 imushp->isp->isp_result_dma = segs->ds_addr; 826 } 827 } 828 829 static void 830 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error) 831 { 832 struct imush *imushp = (struct imush *) arg; 833 if (error) { 834 imushp->error = error; 835 } else { 836 fcparam *fcp = imushp->isp->isp_param; 837 fcp->isp_scdma = segs->ds_addr; 838 } 839 } 840 841 static int 842 isp_pci_mbxdma(struct ispsoftc *isp) 843 { 844 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 845 caddr_t base; 846 u_int32_t len; 847 int i, error; 848 bus_size_t lim; 849 struct imush im; 850 851 852 /* 853 * Already been here? If so, leave... 854 */ 855 if (isp->isp_rquest) { 856 return (0); 857 } 858 859 len = sizeof (ISP_SCSI_XFER_T **) * isp->isp_maxcmds; 860 isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK); 861 if (isp->isp_xflist == NULL) { 862 printf("%s: can't alloc xflist array\n", isp->isp_name); 863 return (1); 864 } 865 bzero(isp->isp_xflist, len); 866 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 867 pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 868 if (pci->dmaps == NULL) { 869 printf("%s: can't alloc dma maps\n", isp->isp_name); 870 free(isp->isp_xflist, M_DEVBUF); 871 return (1); 872 } 873 874 if (IS_FC(isp) || IS_ULTRA2(isp)) 875 lim = BUS_SPACE_MAXADDR + 1; 876 else 877 lim = BUS_SPACE_MAXADDR_24BIT + 1; 878 879 /* 880 * Allocate and map the request, result queues, plus FC scratch area. 881 */ 882 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 883 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 884 if (IS_FC(isp)) { 885 len += ISP2100_SCRLEN; 886 } 887 if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim, 888 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, 889 BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) { 890 printf("%s: cannot create a dma tag for control spaces\n", 891 isp->isp_name); 892 free(isp->isp_xflist, M_DEVBUF); 893 free(pci->dmaps, M_DEVBUF); 894 return (1); 895 } 896 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base, 897 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) { 898 printf("%s: cannot allocate %d bytes of CCB memory\n", 899 isp->isp_name, len); 900 free(isp->isp_xflist, M_DEVBUF); 901 free(pci->dmaps, M_DEVBUF); 902 return (1); 903 } 904 905 isp->isp_rquest = base; 906 im.isp = isp; 907 im.error = 0; 908 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest, 909 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, &im, 0); 910 if (im.error) { 911 printf("%s: error %d loading dma map for DMA request queue\n", 912 isp->isp_name, im.error); 913 free(isp->isp_xflist, M_DEVBUF); 914 free(pci->dmaps, M_DEVBUF); 915 isp->isp_rquest = NULL; 916 return (1); 917 } 918 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 919 im.error = 0; 920 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result, 921 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, &im, 0); 922 if (im.error) { 923 printf("%s: error %d loading dma map for DMA result queue\n", 924 isp->isp_name, im.error); 925 free(isp->isp_xflist, M_DEVBUF); 926 free(pci->dmaps, M_DEVBUF); 927 isp->isp_rquest = NULL; 928 return (1); 929 } 930 931 for (i = 0; i < isp->isp_maxcmds; i++) { 932 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]); 933 if (error) { 934 printf("%s: error %d creating per-cmd DMA maps\n", 935 isp->isp_name, error); 936 free(isp->isp_xflist, M_DEVBUF); 937 free(pci->dmaps, M_DEVBUF); 938 isp->isp_rquest = NULL; 939 return (1); 940 } 941 } 942 943 if (IS_FC(isp)) { 944 fcparam *fcp = (fcparam *) isp->isp_param; 945 fcp->isp_scratch = base + 946 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN) + 947 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 948 im.error = 0; 949 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, 950 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0); 951 if (im.error) { 952 printf("%s: error %d loading FC scratch area\n", 953 isp->isp_name, im.error); 954 free(isp->isp_xflist, M_DEVBUF); 955 free(pci->dmaps, M_DEVBUF); 956 isp->isp_rquest = NULL; 957 return (1); 958 } 959 } 960 return (0); 961 } 962 963 typedef struct { 964 struct ispsoftc *isp; 965 void *cmd_token; 966 void *rq; 967 u_int16_t *iptrp; 968 u_int16_t optr; 969 u_int error; 970 } mush_t; 971 972 #define MUSHERR_NOQENTRIES -2 973 974 #ifdef ISP_TARGET_MODE 975 /* 976 * We need to handle DMA for target mode differently from initiator mode. 977 * 978 * DMA mapping and construction and submission of CTIO Request Entries 979 * and rendevous for completion are very tightly coupled because we start 980 * out by knowing (per platform) how much data we have to move, but we 981 * don't know, up front, how many DMA mapping segments will have to be used 982 * cover that data, so we don't know how many CTIO Request Entries we 983 * will end up using. Further, for performance reasons we may want to 984 * (on the last CTIO for Fibre Channel), send status too (if all went well). 985 * 986 * The standard vector still goes through isp_pci_dmasetup, but the callback 987 * for the DMA mapping routines comes here instead with the whole transfer 988 * mapped and a pointer to a partially filled in already allocated request 989 * queue entry. We finish the job. 990 */ 991 static void dma2_tgt __P((void *, bus_dma_segment_t *, int, int)); 992 static void dma2_tgt_fc __P((void *, bus_dma_segment_t *, int, int)); 993 994 static void 995 dma2_tgt(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 996 { 997 mush_t *mp; 998 struct ccb_scsiio *csio; 999 struct isp_pcisoftc *pci; 1000 bus_dmamap_t *dp; 1001 u_int8_t scsi_status, send_status; 1002 ct_entry_t *cto; 1003 u_int32_t handle; 1004 int nctios; 1005 1006 mp = (mush_t *) arg; 1007 if (error) { 1008 mp->error = error; 1009 return; 1010 } 1011 1012 if (nseg < 1) { 1013 printf("%s: bad segment count (%d)\n", mp->isp->isp_name, nseg); 1014 mp->error = EFAULT; 1015 return; 1016 } 1017 1018 csio = mp->cmd_token; 1019 cto = mp->rq; 1020 1021 /* 1022 * Save handle, and potentially any SCSI status, which 1023 * we'll reinsert on the last CTIO we're going to send. 1024 */ 1025 handle = cto->ct_reserved; 1026 cto->ct_reserved = 0; 1027 scsi_status = cto->ct_scsi_status; 1028 cto->ct_scsi_status = 0; 1029 send_status = cto->ct_flags & CT_SENDSTATUS; 1030 cto->ct_flags &= ~CT_SENDSTATUS; 1031 1032 pci = (struct isp_pcisoftc *)mp->isp; 1033 dp = &pci->dmaps[handle - 1]; 1034 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1035 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1036 } else { 1037 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1038 } 1039 1040 nctios = nseg / ISP_RQDSEG; 1041 if (nseg % ISP_RQDSEG) { 1042 nctios++; 1043 } 1044 1045 cto->ct_xfrlen = 0; 1046 cto->ct_resid = 0; 1047 cto->ct_seg_count = 0; 1048 bzero(cto->ct_dataseg, sizeof (cto->ct_dataseg)); 1049 1050 while (nctios--) { 1051 int seg, seglim; 1052 1053 seglim = nseg; 1054 if (seglim > ISP_RQDSEG) 1055 seglim = ISP_RQDSEG; 1056 1057 for (seg = 0; seg < seglim; seg++) { 1058 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1059 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1060 cto->ct_xfrlen += dm_segs->ds_len; 1061 dm_segs++; 1062 } 1063 1064 cto->ct_seg_count = seg; 1065 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1066 cto->ct_flags |= CT_DATA_IN; 1067 } else { 1068 cto->ct_flags |= CT_DATA_OUT; 1069 } 1070 1071 if (nctios == 0) { 1072 /* 1073 * We're the last in a sequence of CTIOs, so mark this 1074 * CTIO and save the handle to the CCB such that when 1075 * this CTIO completes we can free dma resources and 1076 * do whatever else we need to do to finish the rest 1077 * of the command. 1078 */ 1079 cto->ct_header.rqs_seqno = 1; 1080 cto->ct_reserved = handle; 1081 cto->ct_scsi_status = scsi_status; 1082 cto->ct_flags |= send_status; 1083 ISP_TDQE(mp->isp, "last dma2_tgt", *mp->iptrp, cto); 1084 if (isp_tdebug) { 1085 printf("%s:CTIO lun %d->iid%d flgs 0x%x sts " 1086 "0x%x ssts 0x%x res %u\n", 1087 mp->isp->isp_name, csio->ccb_h.target_lun, 1088 cto->ct_iid, cto->ct_flags, cto->ct_status, 1089 cto->ct_scsi_status, cto->ct_resid); 1090 } 1091 } else { 1092 ct_entry_t *octo = cto; 1093 cto->ct_reserved = 0; 1094 cto->ct_header.rqs_seqno = 0; 1095 ISP_TDQE(mp->isp, "dma2_tgt", *mp->iptrp, cto); 1096 if (isp_tdebug) { 1097 printf("%s:CTIO lun %d->iid%d flgs 0x%x res" 1098 " %u\n", mp->isp->isp_name, 1099 csio->ccb_h.target_lun, cto->ct_iid, 1100 cto->ct_flags, cto->ct_resid); 1101 } 1102 cto = (ct_entry_t *) 1103 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1104 *mp->iptrp = 1105 ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN); 1106 if (*mp->iptrp == mp->optr) { 1107 printf("%s: Queue Overflow in dma2_tgt\n", 1108 mp->isp->isp_name); 1109 mp->error = MUSHERR_NOQENTRIES; 1110 return; 1111 } 1112 /* 1113 * Fill in the new CTIO with info from the old one. 1114 */ 1115 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1116 cto->ct_header.rqs_entry_count = 1; 1117 cto->ct_header.rqs_flags = 0; 1118 /* ct_header.rqs_seqno && ct_reserved filled in later */ 1119 cto->ct_lun = octo->ct_lun; 1120 cto->ct_iid = octo->ct_iid; 1121 cto->ct_reserved2 = octo->ct_reserved2; 1122 cto->ct_tgt = octo->ct_tgt; 1123 cto->ct_flags = octo->ct_flags & ~CT_DATAMASK; 1124 cto->ct_status = 0; 1125 cto->ct_scsi_status = 0; 1126 cto->ct_tag_val = octo->ct_tag_val; 1127 cto->ct_tag_type = octo->ct_tag_type; 1128 cto->ct_xfrlen = 0; 1129 cto->ct_resid = 0; 1130 cto->ct_timeout = octo->ct_timeout; 1131 cto->ct_seg_count = 0; 1132 bzero(cto->ct_dataseg, sizeof (cto->ct_dataseg)); 1133 } 1134 } 1135 } 1136 1137 static void 1138 dma2_tgt_fc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1139 { 1140 mush_t *mp; 1141 struct ccb_scsiio *csio; 1142 struct isp_pcisoftc *pci; 1143 bus_dmamap_t *dp; 1144 ct2_entry_t *cto; 1145 u_int16_t scsi_status, send_status; 1146 u_int32_t handle, reloff; 1147 int nctios; 1148 1149 mp = (mush_t *) arg; 1150 if (error) { 1151 mp->error = error; 1152 return; 1153 } 1154 1155 if (nseg < 1) { 1156 printf("%s: bad segment count (%d)\n", mp->isp->isp_name, nseg); 1157 mp->error = EFAULT; 1158 return; 1159 } 1160 1161 csio = mp->cmd_token; 1162 cto = mp->rq; 1163 /* 1164 * Save handle, and potentially any SCSI status, which 1165 * we'll reinsert on the last CTIO we're going to send. 1166 */ 1167 handle = cto->ct_reserved; 1168 cto->ct_reserved = 0; 1169 scsi_status = cto->rsp.m0.ct_scsi_status; 1170 cto->rsp.m0.ct_scsi_status = 0; 1171 send_status = cto->ct_flags & CT2_SENDSTATUS; 1172 cto->ct_flags &= ~CT2_SENDSTATUS; 1173 1174 pci = (struct isp_pcisoftc *)mp->isp; 1175 dp = &pci->dmaps[handle - 1]; 1176 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1177 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1178 } else { 1179 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1180 } 1181 1182 nctios = nseg / ISP_RQDSEG_T2; 1183 if (nseg % ISP_RQDSEG_T2) { 1184 nctios++; 1185 } 1186 1187 cto->ct_reloff = 0; 1188 cto->ct_resid = 0; 1189 cto->ct_seg_count = 0; 1190 cto->ct_reloff = reloff = 0; 1191 bzero(&cto->rsp, sizeof (cto->rsp)); 1192 1193 while (nctios--) { 1194 int seg, seglim; 1195 1196 seglim = nseg; 1197 if (seglim > ISP_RQDSEG_T2) 1198 seglim = ISP_RQDSEG_T2; 1199 1200 for (seg = 0; seg < seglim; seg++) { 1201 cto->rsp.m0.ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1202 cto->rsp.m0.ct_dataseg[seg].ds_count = dm_segs->ds_len; 1203 cto->rsp.m0.ct_xfrlen += dm_segs->ds_len; 1204 reloff += dm_segs->ds_len; 1205 dm_segs++; 1206 } 1207 1208 cto->ct_seg_count = seg; 1209 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1210 cto->ct_flags |= CT2_DATA_IN; 1211 } else { 1212 cto->ct_flags |= CT2_DATA_OUT; 1213 } 1214 1215 if (nctios == 0) { 1216 /* 1217 * We're the last in a sequence of CTIOs, so mark this 1218 * CTIO and save the handle to the CCB such that when 1219 * this CTIO completes we can free dma resources and 1220 * do whatever else we need to do to finish the rest 1221 * of the command. 1222 */ 1223 cto->ct_header.rqs_seqno = 1; 1224 cto->ct_reserved = handle; 1225 cto->rsp.m0.ct_scsi_status = scsi_status; 1226 cto->ct_flags |= send_status; 1227 ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto); 1228 if (isp_tdebug) { 1229 printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs " 1230 "0x%x sts 0x%x ssts 0x%x roff %u res " 1231 "%u\n", mp->isp->isp_name, cto->ct_rxid, 1232 csio->ccb_h.target_lun, cto->ct_iid, 1233 cto->ct_flags, cto->ct_status, 1234 cto->rsp.m0.ct_scsi_status, 1235 cto->ct_reloff, cto->ct_resid); 1236 } 1237 } else { 1238 ct2_entry_t *octo = cto; 1239 cto->ct_reserved = 0; 1240 cto->ct_header.rqs_seqno = 0; 1241 ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto); 1242 if (isp_tdebug) { 1243 printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs " 1244 "0x%x ro %u res %u\n", 1245 mp->isp->isp_name, 1246 cto->ct_rxid, csio->ccb_h.target_lun, 1247 cto->ct_iid, cto->ct_flags, cto->ct_reloff, 1248 cto->ct_resid); 1249 } 1250 cto = (ct2_entry_t *) 1251 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1252 *mp->iptrp = 1253 ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN); 1254 if (*mp->iptrp == mp->optr) { 1255 printf("%s: Queue Overflow in dma2_tgt_fc\n", 1256 mp->isp->isp_name); 1257 mp->error = MUSHERR_NOQENTRIES; 1258 return; 1259 } 1260 /* 1261 * Fill in the new CTIO with info from the old one. 1262 */ 1263 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1264 cto->ct_header.rqs_entry_count = 1; 1265 cto->ct_header.rqs_flags = 0; 1266 /* ct_header.rqs_seqno && ct_reserved filled in later */ 1267 cto->ct_lun = octo->ct_lun; 1268 cto->ct_iid = octo->ct_iid; 1269 cto->ct_rxid = octo->ct_rxid; 1270 cto->ct_flags = octo->ct_flags & ~CT2_DATAMASK; 1271 cto->ct_status = 0; 1272 cto->ct_resid = 0; 1273 cto->ct_timeout = octo->ct_timeout; 1274 cto->ct_seg_count = 0; 1275 cto->ct_reloff = reloff; 1276 bzero(&cto->rsp, sizeof (cto->rsp)); 1277 } 1278 } 1279 } 1280 #endif 1281 1282 static void dma2 __P((void *, bus_dma_segment_t *, int, int)); 1283 1284 static void 1285 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1286 { 1287 mush_t *mp; 1288 struct ccb_scsiio *csio; 1289 struct isp_pcisoftc *pci; 1290 bus_dmamap_t *dp; 1291 bus_dma_segment_t *eseg; 1292 ispreq_t *rq; 1293 ispcontreq_t *crq; 1294 int seglim, datalen; 1295 1296 mp = (mush_t *) arg; 1297 if (error) { 1298 mp->error = error; 1299 return; 1300 } 1301 1302 if (nseg < 1) { 1303 printf("%s: bad segment count (%d)\n", mp->isp->isp_name, nseg); 1304 mp->error = EFAULT; 1305 return; 1306 } 1307 csio = mp->cmd_token; 1308 rq = mp->rq; 1309 pci = (struct isp_pcisoftc *)mp->isp; 1310 dp = &pci->dmaps[rq->req_handle - 1]; 1311 1312 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1313 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1314 } else { 1315 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1316 } 1317 1318 datalen = XS_XFRLEN(csio); 1319 1320 /* 1321 * We're passed an initial partially filled in entry that 1322 * has most fields filled in except for data transfer 1323 * related values. 1324 * 1325 * Our job is to fill in the initial request queue entry and 1326 * then to start allocating and filling in continuation entries 1327 * until we've covered the entire transfer. 1328 */ 1329 1330 if (IS_FC(mp->isp)) { 1331 seglim = ISP_RQDSEG_T2; 1332 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1333 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1334 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1335 } else { 1336 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1337 } 1338 } else { 1339 seglim = ISP_RQDSEG; 1340 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1341 rq->req_flags |= REQFLAG_DATA_IN; 1342 } else { 1343 rq->req_flags |= REQFLAG_DATA_OUT; 1344 } 1345 } 1346 1347 eseg = dm_segs + nseg; 1348 1349 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1350 if (IS_FC(mp->isp)) { 1351 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1352 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1353 dm_segs->ds_addr; 1354 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1355 dm_segs->ds_len; 1356 } else { 1357 rq->req_dataseg[rq->req_seg_count].ds_base = 1358 dm_segs->ds_addr; 1359 rq->req_dataseg[rq->req_seg_count].ds_count = 1360 dm_segs->ds_len; 1361 } 1362 datalen -= dm_segs->ds_len; 1363 #if 0 1364 if (IS_FC(mp->isp)) { 1365 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1366 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 1367 mp->isp->isp_name, rq->req_seg_count, 1368 rq2->req_dataseg[rq2->req_seg_count].ds_count, 1369 rq2->req_dataseg[rq2->req_seg_count].ds_base); 1370 } else { 1371 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 1372 mp->isp->isp_name, rq->req_seg_count, 1373 rq->req_dataseg[rq->req_seg_count].ds_count, 1374 rq->req_dataseg[rq->req_seg_count].ds_base); 1375 } 1376 #endif 1377 rq->req_seg_count++; 1378 dm_segs++; 1379 } 1380 1381 while (datalen > 0 && dm_segs != eseg) { 1382 crq = (ispcontreq_t *) 1383 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1384 *mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN); 1385 if (*mp->iptrp == mp->optr) { 1386 #if 0 1387 printf("%s: Request Queue Overflow++\n", 1388 mp->isp->isp_name); 1389 #endif 1390 mp->error = MUSHERR_NOQENTRIES; 1391 return; 1392 } 1393 rq->req_header.rqs_entry_count++; 1394 bzero((void *)crq, sizeof (*crq)); 1395 crq->req_header.rqs_entry_count = 1; 1396 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1397 1398 seglim = 0; 1399 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1400 crq->req_dataseg[seglim].ds_base = 1401 dm_segs->ds_addr; 1402 crq->req_dataseg[seglim].ds_count = 1403 dm_segs->ds_len; 1404 #if 0 1405 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n", 1406 mp->isp->isp_name, rq->req_header.rqs_entry_count-1, 1407 seglim, crq->req_dataseg[seglim].ds_count, 1408 crq->req_dataseg[seglim].ds_base); 1409 #endif 1410 rq->req_seg_count++; 1411 dm_segs++; 1412 seglim++; 1413 datalen -= dm_segs->ds_len; 1414 } 1415 } 1416 } 1417 1418 static int 1419 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1420 u_int16_t *iptrp, u_int16_t optr) 1421 { 1422 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1423 bus_dmamap_t *dp = NULL; 1424 mush_t mush, *mp; 1425 void (*eptr) __P((void *, bus_dma_segment_t *, int, int)); 1426 1427 /* 1428 * NB: if we need to do request queue entry swizzling, 1429 * NB: this is where it would need to be done for cmds 1430 * NB: that move no data. For commands that move data, 1431 * NB: swizzling would take place in those functions. 1432 */ 1433 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) { 1434 rq->req_seg_count = 1; 1435 #ifdef ISP_TARGET_MODE 1436 if (isp_tdebug && csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1437 if (IS_FC(isp)) { 1438 ct2_entry_t *cto = (ct2_entry_t *) rq; 1439 printf("%s:CTIO2 RX_ID 0x%x lun %d->iid%d flgs " 1440 "0x%x sts 0x%x ssts 0x%x res %u\n", 1441 isp->isp_name, cto->ct_rxid, 1442 csio->ccb_h.target_lun, cto->ct_iid, 1443 cto->ct_flags, cto->ct_status, 1444 cto->rsp.m0.ct_scsi_status, 1445 cto->ct_resid); 1446 } else { 1447 ct_entry_t *cto = (ct_entry_t *) rq; 1448 printf("%s:CTIO lun %d->iid%d flgs 0x%x sts " 1449 "0x%x ssts 0x%x res %u\n", 1450 isp->isp_name, csio->ccb_h.target_lun, 1451 cto->ct_iid, cto->ct_flags, cto->ct_status, 1452 cto->ct_scsi_status, cto->ct_resid); 1453 } 1454 } 1455 #endif 1456 return (CMD_QUEUED); 1457 } 1458 1459 #ifdef ISP_TARGET_MODE 1460 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1461 if (IS_FC(isp)) { 1462 eptr = dma2_tgt_fc; 1463 } else { 1464 eptr = dma2_tgt; 1465 } 1466 } else 1467 #endif 1468 eptr = dma2; 1469 1470 1471 /* 1472 * Do a virtual grapevine step to collect info for 1473 * the callback dma allocation that we have to use... 1474 */ 1475 mp = &mush; 1476 mp->isp = isp; 1477 mp->cmd_token = csio; 1478 mp->rq = rq; 1479 mp->iptrp = iptrp; 1480 mp->optr = optr; 1481 mp->error = 0; 1482 1483 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1484 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1485 int error, s; 1486 dp = &pci->dmaps[rq->req_handle - 1]; 1487 s = splsoftvm(); 1488 error = bus_dmamap_load(pci->parent_dmat, *dp, 1489 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1490 if (error == EINPROGRESS) { 1491 bus_dmamap_unload(pci->parent_dmat, *dp); 1492 mp->error = EINVAL; 1493 printf("%s: deferred dma allocation not " 1494 "supported\n", isp->isp_name); 1495 } else if (error && mp->error == 0) { 1496 #ifdef DIAGNOSTIC 1497 printf("%s: error %d in dma mapping code\n", 1498 isp->isp_name, error); 1499 #endif 1500 mp->error = error; 1501 } 1502 splx(s); 1503 } else { 1504 /* Pointer to physical buffer */ 1505 struct bus_dma_segment seg; 1506 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1507 seg.ds_len = csio->dxfer_len; 1508 (*eptr)(mp, &seg, 1, 0); 1509 } 1510 } else { 1511 struct bus_dma_segment *segs; 1512 1513 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1514 printf("%s: Physical segment pointers unsupported", 1515 isp->isp_name); 1516 mp->error = EINVAL; 1517 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1518 printf("%s: Virtual segment addresses unsupported", 1519 isp->isp_name); 1520 mp->error = EINVAL; 1521 } else { 1522 /* Just use the segments provided */ 1523 segs = (struct bus_dma_segment *) csio->data_ptr; 1524 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1525 } 1526 } 1527 if (mp->error) { 1528 int retval = CMD_COMPLETE; 1529 if (mp->error == MUSHERR_NOQENTRIES) { 1530 retval = CMD_EAGAIN; 1531 } else if (mp->error == EFBIG) { 1532 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1533 } else if (mp->error == EINVAL) { 1534 XS_SETERR(csio, CAM_REQ_INVALID); 1535 } else { 1536 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1537 } 1538 return (retval); 1539 } else { 1540 /* 1541 * Check to see if we weren't cancelled while sleeping on 1542 * getting DMA resources... 1543 */ 1544 if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1545 if (dp) { 1546 bus_dmamap_unload(pci->parent_dmat, *dp); 1547 } 1548 return (CMD_COMPLETE); 1549 } 1550 return (CMD_QUEUED); 1551 } 1552 } 1553 1554 static void 1555 isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *xs, u_int32_t handle) 1556 { 1557 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1558 bus_dmamap_t *dp = &pci->dmaps[handle - 1]; 1559 KASSERT((handle > 0 && handle <= isp->isp_maxcmds), 1560 ("bad handle in isp_pci_dmateardonw")); 1561 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1562 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD); 1563 } else { 1564 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE); 1565 } 1566 bus_dmamap_unload(pci->parent_dmat, *dp); 1567 } 1568 1569 1570 static void 1571 isp_pci_reset1(struct ispsoftc *isp) 1572 { 1573 /* Make sure the BIOS is disabled */ 1574 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1575 } 1576 1577 static void 1578 isp_pci_dumpregs(struct ispsoftc *isp) 1579 { 1580 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1581 printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name, 1582 pci_conf_read(pci->pci_id, PCIR_COMMAND)); 1583 } 1584