1 /* $FreeBSD$ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 1997, 1998, 1999, 2000 by Matthew Jacob 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/bus.h> 35 36 #include <pci/pcireg.h> 37 #include <pci/pcivar.h> 38 39 #include <machine/bus_memio.h> 40 #include <machine/bus_pio.h> 41 #include <machine/bus.h> 42 #include <machine/resource.h> 43 #include <sys/rman.h> 44 #include <sys/malloc.h> 45 46 #include <dev/isp/isp_freebsd.h> 47 48 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int)); 49 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t)); 50 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int)); 51 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t)); 52 static int isp_pci_mbxdma __P((struct ispsoftc *)); 53 static int isp_pci_dmasetup __P((struct ispsoftc *, XS_T *, 54 ispreq_t *, u_int16_t *, u_int16_t)); 55 static void 56 isp_pci_dmateardown __P((struct ispsoftc *, XS_T *, u_int32_t)); 57 58 static void isp_pci_reset1 __P((struct ispsoftc *)); 59 static void isp_pci_dumpregs __P((struct ispsoftc *, const char *)); 60 61 #ifndef ISP_CODE_ORG 62 #define ISP_CODE_ORG 0x1000 63 #endif 64 65 static struct ispmdvec mdvec = { 66 isp_pci_rd_reg, 67 isp_pci_wr_reg, 68 isp_pci_mbxdma, 69 isp_pci_dmasetup, 70 isp_pci_dmateardown, 71 NULL, 72 isp_pci_reset1, 73 isp_pci_dumpregs, 74 NULL, 75 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 76 }; 77 78 static struct ispmdvec mdvec_1080 = { 79 isp_pci_rd_reg_1080, 80 isp_pci_wr_reg_1080, 81 isp_pci_mbxdma, 82 isp_pci_dmasetup, 83 isp_pci_dmateardown, 84 NULL, 85 isp_pci_reset1, 86 isp_pci_dumpregs, 87 NULL, 88 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 89 }; 90 91 static struct ispmdvec mdvec_12160 = { 92 isp_pci_rd_reg_1080, 93 isp_pci_wr_reg_1080, 94 isp_pci_mbxdma, 95 isp_pci_dmasetup, 96 isp_pci_dmateardown, 97 NULL, 98 isp_pci_reset1, 99 isp_pci_dumpregs, 100 NULL, 101 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 102 }; 103 104 static struct ispmdvec mdvec_2100 = { 105 isp_pci_rd_reg, 106 isp_pci_wr_reg, 107 isp_pci_mbxdma, 108 isp_pci_dmasetup, 109 isp_pci_dmateardown, 110 NULL, 111 isp_pci_reset1, 112 isp_pci_dumpregs 113 }; 114 115 static struct ispmdvec mdvec_2200 = { 116 isp_pci_rd_reg, 117 isp_pci_wr_reg, 118 isp_pci_mbxdma, 119 isp_pci_dmasetup, 120 isp_pci_dmateardown, 121 NULL, 122 isp_pci_reset1, 123 isp_pci_dumpregs 124 }; 125 126 #ifndef PCIM_CMD_INVEN 127 #define PCIM_CMD_INVEN 0x10 128 #endif 129 #ifndef PCIM_CMD_BUSMASTEREN 130 #define PCIM_CMD_BUSMASTEREN 0x0004 131 #endif 132 #ifndef PCIM_CMD_PERRESPEN 133 #define PCIM_CMD_PERRESPEN 0x0040 134 #endif 135 #ifndef PCIM_CMD_SEREN 136 #define PCIM_CMD_SEREN 0x0100 137 #endif 138 139 #ifndef PCIR_COMMAND 140 #define PCIR_COMMAND 0x04 141 #endif 142 143 #ifndef PCIR_CACHELNSZ 144 #define PCIR_CACHELNSZ 0x0c 145 #endif 146 147 #ifndef PCIR_LATTIMER 148 #define PCIR_LATTIMER 0x0d 149 #endif 150 151 #ifndef PCIR_ROMADDR 152 #define PCIR_ROMADDR 0x30 153 #endif 154 155 #ifndef PCI_VENDOR_QLOGIC 156 #define PCI_VENDOR_QLOGIC 0x1077 157 #endif 158 159 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 160 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 161 #endif 162 163 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 164 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 165 #endif 166 167 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 168 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 169 #endif 170 171 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 172 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 173 #endif 174 175 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 176 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 177 #endif 178 179 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 180 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 181 #endif 182 183 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 184 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 185 #endif 186 187 #define PCI_QLOGIC_ISP1020 \ 188 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 189 190 #define PCI_QLOGIC_ISP1080 \ 191 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 192 193 #define PCI_QLOGIC_ISP12160 \ 194 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 195 196 #define PCI_QLOGIC_ISP1240 \ 197 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 198 199 #define PCI_QLOGIC_ISP1280 \ 200 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 201 202 #define PCI_QLOGIC_ISP2100 \ 203 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 204 205 #define PCI_QLOGIC_ISP2200 \ 206 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 207 208 /* 209 * Odd case for some AMI raid cards... We need to *not* attach to this. 210 */ 211 #define AMI_RAID_SUBVENDOR_ID 0x101e 212 213 #define IO_MAP_REG 0x10 214 #define MEM_MAP_REG 0x14 215 216 #define PCI_DFLT_LTNCY 0x40 217 #define PCI_DFLT_LNSZ 0x10 218 219 static int isp_pci_probe (device_t); 220 static int isp_pci_attach (device_t); 221 222 struct isp_pcisoftc { 223 struct ispsoftc pci_isp; 224 device_t pci_dev; 225 struct resource * pci_reg; 226 bus_space_tag_t pci_st; 227 bus_space_handle_t pci_sh; 228 void * ih; 229 int16_t pci_poff[_NREG_BLKS]; 230 bus_dma_tag_t parent_dmat; 231 bus_dma_tag_t cntrol_dmat; 232 bus_dmamap_t cntrol_dmap; 233 bus_dmamap_t *dmaps; 234 }; 235 ispfwfunc *isp_get_firmware_p = NULL; 236 237 static device_method_t isp_pci_methods[] = { 238 /* Device interface */ 239 DEVMETHOD(device_probe, isp_pci_probe), 240 DEVMETHOD(device_attach, isp_pci_attach), 241 { 0, 0 } 242 }; 243 static void isp_pci_intr __P((void *)); 244 245 static driver_t isp_pci_driver = { 246 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 247 }; 248 static devclass_t isp_devclass; 249 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 250 MODULE_VERSION(isp, 1); 251 252 static int 253 isp_pci_probe(device_t dev) 254 { 255 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 256 case PCI_QLOGIC_ISP1020: 257 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 258 break; 259 case PCI_QLOGIC_ISP1080: 260 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 261 break; 262 case PCI_QLOGIC_ISP1240: 263 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 264 break; 265 case PCI_QLOGIC_ISP1280: 266 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 267 break; 268 case PCI_QLOGIC_ISP12160: 269 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 270 return (ENXIO); 271 } 272 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 273 break; 274 case PCI_QLOGIC_ISP2100: 275 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 276 break; 277 case PCI_QLOGIC_ISP2200: 278 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 279 break; 280 default: 281 return (ENXIO); 282 } 283 if (device_get_unit(dev) == 0 && bootverbose) { 284 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 285 "Core Version %d.%d\n", 286 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 287 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 288 } 289 /* 290 * XXXX: Here is where we might load the f/w module 291 * XXXX: (or increase a reference count to it). 292 */ 293 return (0); 294 } 295 296 static int 297 isp_pci_attach(device_t dev) 298 { 299 struct resource *regs, *irq; 300 int unit, bitmap, rtp, rgd, iqd, m1, m2, isp_debug, role; 301 u_int32_t data, cmd, linesz, psize, basetype; 302 struct isp_pcisoftc *pcs; 303 struct ispsoftc *isp = NULL; 304 struct ispmdvec *mdvp; 305 bus_size_t lim; 306 #ifdef ISP_SMPLOCK 307 int locksetup = 0; 308 #endif 309 310 /* 311 * Figure out if we're supposed to skip this one. 312 * If we are, we actually go to ISP_ROLE_NONE. 313 */ 314 unit = device_get_unit(dev); 315 if (getenv_int("isp_disable", &bitmap)) { 316 if (bitmap & (1 << unit)) { 317 device_printf(dev, "device is disabled\n"); 318 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 319 return (0); 320 } 321 } 322 #ifdef ISP_TARGET_MODE 323 role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET; 324 #else 325 role = ISP_DEFAULT_ROLES; 326 #endif 327 if (getenv_int("isp_none", &bitmap)) { 328 if (bitmap & (1 << unit)) { 329 device_printf(dev, "setting to ISP_ROLE_NONE\n"); 330 role = ISP_ROLE_NONE; 331 } 332 } 333 334 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 335 if (pcs == NULL) { 336 device_printf(dev, "cannot allocate softc\n"); 337 return (ENOMEM); 338 } 339 340 /* 341 * Figure out which we should try first - memory mapping or i/o mapping? 342 */ 343 #ifdef __alpha__ 344 m1 = PCIM_CMD_MEMEN; 345 m2 = PCIM_CMD_PORTEN; 346 #else 347 m1 = PCIM_CMD_PORTEN; 348 m2 = PCIM_CMD_MEMEN; 349 #endif 350 bitmap = 0; 351 if (getenv_int("isp_mem_map", &bitmap)) { 352 if (bitmap & (1 << unit)) { 353 m1 = PCIM_CMD_MEMEN; 354 m2 = PCIM_CMD_PORTEN; 355 } 356 } 357 bitmap = 0; 358 if (getenv_int("isp_io_map", &bitmap)) { 359 if (bitmap & (1 << unit)) { 360 m1 = PCIM_CMD_PORTEN; 361 m2 = PCIM_CMD_MEMEN; 362 } 363 } 364 365 linesz = PCI_DFLT_LNSZ; 366 irq = regs = NULL; 367 rgd = rtp = iqd = 0; 368 369 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 370 if (cmd & m1) { 371 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 372 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 373 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 374 } 375 if (regs == NULL && (cmd & m2)) { 376 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 377 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 378 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 379 } 380 if (regs == NULL) { 381 device_printf(dev, "unable to map any ports\n"); 382 goto bad; 383 } 384 if (bootverbose) 385 device_printf(dev, "using %s space register mapping\n", 386 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 387 pcs->pci_dev = dev; 388 pcs->pci_reg = regs; 389 pcs->pci_st = rman_get_bustag(regs); 390 pcs->pci_sh = rman_get_bushandle(regs); 391 392 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 393 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 394 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 395 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 396 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 397 mdvp = &mdvec; 398 basetype = ISP_HA_SCSI_UNKNOWN; 399 psize = sizeof (sdparam); 400 lim = BUS_SPACE_MAXSIZE_32BIT; 401 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 402 mdvp = &mdvec; 403 basetype = ISP_HA_SCSI_UNKNOWN; 404 psize = sizeof (sdparam); 405 lim = BUS_SPACE_MAXSIZE_24BIT; 406 } 407 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 408 mdvp = &mdvec_1080; 409 basetype = ISP_HA_SCSI_1080; 410 psize = sizeof (sdparam); 411 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 412 ISP1080_DMA_REGS_OFF; 413 } 414 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 415 mdvp = &mdvec_1080; 416 basetype = ISP_HA_SCSI_1240; 417 psize = 2 * sizeof (sdparam); 418 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 419 ISP1080_DMA_REGS_OFF; 420 } 421 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 422 mdvp = &mdvec_1080; 423 basetype = ISP_HA_SCSI_1280; 424 psize = 2 * sizeof (sdparam); 425 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 426 ISP1080_DMA_REGS_OFF; 427 } 428 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 429 mdvp = &mdvec_12160; 430 basetype = ISP_HA_SCSI_12160; 431 psize = 2 * sizeof (sdparam); 432 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 433 ISP1080_DMA_REGS_OFF; 434 } 435 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 436 mdvp = &mdvec_2100; 437 basetype = ISP_HA_FC_2100; 438 psize = sizeof (fcparam); 439 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 440 PCI_MBOX_REGS2100_OFF; 441 if (pci_get_revid(dev) < 3) { 442 /* 443 * XXX: Need to get the actual revision 444 * XXX: number of the 2100 FB. At any rate, 445 * XXX: lower cache line size for early revision 446 * XXX; boards. 447 */ 448 linesz = 1; 449 } 450 } 451 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 452 mdvp = &mdvec_2200; 453 basetype = ISP_HA_FC_2200; 454 psize = sizeof (fcparam); 455 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 456 PCI_MBOX_REGS2100_OFF; 457 } 458 isp = &pcs->pci_isp; 459 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 460 if (isp->isp_param == NULL) { 461 device_printf(dev, "cannot allocate parameter data\n"); 462 goto bad; 463 } 464 isp->isp_mdvec = mdvp; 465 isp->isp_type = basetype; 466 isp->isp_revision = pci_get_revid(dev); 467 (void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit); 468 isp->isp_osinfo.unit = unit; 469 isp->isp_role = role; 470 471 /* 472 * Try and find firmware for this device. 473 */ 474 475 if (isp_get_firmware_p) { 476 int device = (int) pci_get_device(dev); 477 #ifdef ISP_TARGET_MODE 478 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 479 #else 480 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 481 #endif 482 } 483 484 /* 485 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 486 * are set. 487 */ 488 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 489 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 490 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 491 492 /* 493 * Make sure the Cache Line Size register is set sensibly. 494 */ 495 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 496 if (data != linesz) { 497 data = PCI_DFLT_LNSZ; 498 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 499 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 500 } 501 502 /* 503 * Make sure the Latency Timer is sane. 504 */ 505 data = pci_read_config(dev, PCIR_LATTIMER, 1); 506 if (data < PCI_DFLT_LTNCY) { 507 data = PCI_DFLT_LTNCY; 508 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 509 pci_write_config(dev, PCIR_LATTIMER, data, 1); 510 } 511 512 /* 513 * Make sure we've disabled the ROM. 514 */ 515 data = pci_read_config(dev, PCIR_ROMADDR, 4); 516 data &= ~1; 517 pci_write_config(dev, PCIR_ROMADDR, data, 4); 518 519 520 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 521 BUS_SPACE_MAXADDR, NULL, NULL, lim + 1, 522 255, lim, 0, &pcs->parent_dmat) != 0) { 523 device_printf(dev, "could not create master dma tag\n"); 524 free(isp->isp_param, M_DEVBUF); 525 free(pcs, M_DEVBUF); 526 return (ENXIO); 527 } 528 529 iqd = 0; 530 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 531 1, RF_ACTIVE | RF_SHAREABLE); 532 if (irq == NULL) { 533 device_printf(dev, "could not allocate interrupt\n"); 534 goto bad; 535 } 536 537 if (getenv_int("isp_no_fwload", &bitmap)) { 538 if (bitmap & (1 << unit)) 539 isp->isp_confopts |= ISP_CFG_NORELOAD; 540 } 541 if (getenv_int("isp_fwload", &bitmap)) { 542 if (bitmap & (1 << unit)) 543 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 544 } 545 if (getenv_int("isp_no_nvram", &bitmap)) { 546 if (bitmap & (1 << unit)) 547 isp->isp_confopts |= ISP_CFG_NONVRAM; 548 } 549 if (getenv_int("isp_nvram", &bitmap)) { 550 if (bitmap & (1 << unit)) 551 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 552 } 553 if (getenv_int("isp_fcduplex", &bitmap)) { 554 if (bitmap & (1 << unit)) 555 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 556 } 557 if (getenv_int("isp_no_fcduplex", &bitmap)) { 558 if (bitmap & (1 << unit)) 559 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 560 } 561 if (getenv_int("isp_nport", &bitmap)) { 562 if (bitmap & (1 << unit)) 563 isp->isp_confopts |= ISP_CFG_NPORT; 564 } 565 /* 566 * Look for overriding WWN. This is a Node WWN so it binds to 567 * all FC instances. A Port WWN will be constructed from it 568 * as appropriate. 569 */ 570 if (!getenv_quad("isp_wwn", (quad_t *) &isp->isp_osinfo.default_wwn)) { 571 int i; 572 u_int64_t seed = (u_int64_t) (intptr_t) isp; 573 574 seed <<= 16; 575 seed &= ((1LL << 48) - 1LL); 576 /* 577 * This isn't very random, but it's the best we can do for 578 * the real edge case of cards that don't have WWNs. If 579 * you recompile a new vers.c, you'll get a different WWN. 580 */ 581 for (i = 0; version[i] != 0; i++) { 582 seed += version[i]; 583 } 584 /* 585 * Make sure the top nibble has something vaguely sensible 586 * (NAA == Locally Administered) 587 */ 588 isp->isp_osinfo.default_wwn |= (3LL << 60) | seed; 589 } else { 590 isp->isp_confopts |= ISP_CFG_OWNWWN; 591 } 592 isp_debug = 0; 593 (void) getenv_int("isp_debug", &isp_debug); 594 595 #ifdef ISP_SMPLOCK 596 /* Make sure the lock is set up. */ 597 mtx_init(&isp->isp_osinfo.lock, "isp", MTX_DEF); 598 locksetup++; 599 600 if (bus_setup_intr(dev, irq, INTR_TYPE_CAM | INTR_MPSAFE, 601 isp_pci_intr, isp, &pcs->ih)) { 602 device_printf(dev, "could not setup interrupt\n"); 603 goto bad; 604 } 605 #else 606 if (bus_setup_intr(dev, irq, INTR_TYPE_CAM, 607 isp_pci_intr, isp, &pcs->ih)) { 608 device_printf(dev, "could not setup interrupt\n"); 609 goto bad; 610 } 611 #endif 612 613 /* 614 * Set up logging levels. 615 */ 616 if (isp_debug) { 617 isp->isp_dblev = isp_debug; 618 } else { 619 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 620 } 621 if (bootverbose) 622 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 623 624 /* 625 * Make sure we're in reset state. 626 */ 627 ISP_LOCK(isp); 628 isp_reset(isp); 629 if (isp->isp_state != ISP_RESETSTATE) { 630 ISP_UNLOCK(isp); 631 goto bad; 632 } 633 isp_init(isp); 634 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 635 isp_uninit(isp); 636 ISP_UNLOCK(isp); 637 goto bad; 638 } 639 isp_attach(isp); 640 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 641 isp_uninit(isp); 642 ISP_UNLOCK(isp); 643 goto bad; 644 } 645 /* 646 * XXXX: Here is where we might unload the f/w module 647 * XXXX: (or decrease the reference count to it). 648 */ 649 ISP_UNLOCK(isp); 650 return (0); 651 652 bad: 653 654 if (pcs && pcs->ih) { 655 (void) bus_teardown_intr(dev, irq, pcs->ih); 656 } 657 658 #ifdef ISP_SMPLOCK 659 if (locksetup && isp) { 660 mtx_destroy(&isp->isp_osinfo.lock); 661 } 662 #endif 663 664 if (irq) { 665 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 666 } 667 668 669 if (regs) { 670 (void) bus_release_resource(dev, rtp, rgd, regs); 671 } 672 673 if (pcs) { 674 if (pcs->pci_isp.isp_param) 675 free(pcs->pci_isp.isp_param, M_DEVBUF); 676 free(pcs, M_DEVBUF); 677 } 678 679 /* 680 * XXXX: Here is where we might unload the f/w module 681 * XXXX: (or decrease the reference count to it). 682 */ 683 return (ENXIO); 684 } 685 686 static void 687 isp_pci_intr(void *arg) 688 { 689 struct ispsoftc *isp = arg; 690 ISP_LOCK(isp); 691 (void) isp_intr(isp); 692 ISP_UNLOCK(isp); 693 } 694 695 static u_int16_t 696 isp_pci_rd_reg(isp, regoff) 697 struct ispsoftc *isp; 698 int regoff; 699 { 700 u_int16_t rv; 701 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 702 int offset, oldconf = 0; 703 704 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 705 /* 706 * We will assume that someone has paused the RISC processor. 707 */ 708 oldconf = isp_pci_rd_reg(isp, BIU_CONF1); 709 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); 710 } 711 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 712 offset += (regoff & 0xff); 713 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 714 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 715 isp_pci_wr_reg(isp, BIU_CONF1, oldconf); 716 } 717 return (rv); 718 } 719 720 static void 721 isp_pci_wr_reg(isp, regoff, val) 722 struct ispsoftc *isp; 723 int regoff; 724 u_int16_t val; 725 { 726 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 727 int offset, oldconf = 0; 728 729 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 730 /* 731 * We will assume that someone has paused the RISC processor. 732 */ 733 oldconf = isp_pci_rd_reg(isp, BIU_CONF1); 734 isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP); 735 } 736 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 737 offset += (regoff & 0xff); 738 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 739 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 740 isp_pci_wr_reg(isp, BIU_CONF1, oldconf); 741 } 742 } 743 744 static u_int16_t 745 isp_pci_rd_reg_1080(isp, regoff) 746 struct ispsoftc *isp; 747 int regoff; 748 { 749 u_int16_t rv, oc = 0; 750 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 751 int offset; 752 753 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 754 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 755 u_int16_t tc; 756 /* 757 * We will assume that someone has paused the RISC processor. 758 */ 759 oc = isp_pci_rd_reg(isp, BIU_CONF1); 760 tc = oc & ~BIU_PCI1080_CONF1_DMA; 761 if (regoff & SXP_BANK1_SELECT) 762 tc |= BIU_PCI1080_CONF1_SXP1; 763 else 764 tc |= BIU_PCI1080_CONF1_SXP0; 765 isp_pci_wr_reg(isp, BIU_CONF1, tc); 766 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 767 oc = isp_pci_rd_reg(isp, BIU_CONF1); 768 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); 769 } 770 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 771 offset += (regoff & 0xff); 772 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 773 if (oc) { 774 isp_pci_wr_reg(isp, BIU_CONF1, oc); 775 } 776 return (rv); 777 } 778 779 static void 780 isp_pci_wr_reg_1080(isp, regoff, val) 781 struct ispsoftc *isp; 782 int regoff; 783 u_int16_t val; 784 { 785 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 786 int offset, oc = 0; 787 788 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 789 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 790 u_int16_t tc; 791 /* 792 * We will assume that someone has paused the RISC processor. 793 */ 794 oc = isp_pci_rd_reg(isp, BIU_CONF1); 795 tc = oc & ~BIU_PCI1080_CONF1_DMA; 796 if (regoff & SXP_BANK1_SELECT) 797 tc |= BIU_PCI1080_CONF1_SXP1; 798 else 799 tc |= BIU_PCI1080_CONF1_SXP0; 800 isp_pci_wr_reg(isp, BIU_CONF1, tc); 801 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 802 oc = isp_pci_rd_reg(isp, BIU_CONF1); 803 isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA); 804 } 805 offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 806 offset += (regoff & 0xff); 807 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 808 if (oc) { 809 isp_pci_wr_reg(isp, BIU_CONF1, oc); 810 } 811 } 812 813 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int)); 814 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int)); 815 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int)); 816 817 struct imush { 818 struct ispsoftc *isp; 819 int error; 820 }; 821 822 static void 823 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 824 { 825 struct imush *imushp = (struct imush *) arg; 826 if (error) { 827 imushp->error = error; 828 } else { 829 imushp->isp->isp_rquest_dma = segs->ds_addr; 830 } 831 } 832 833 static void 834 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error) 835 { 836 struct imush *imushp = (struct imush *) arg; 837 if (error) { 838 imushp->error = error; 839 } else { 840 imushp->isp->isp_result_dma = segs->ds_addr; 841 } 842 } 843 844 static void 845 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error) 846 { 847 struct imush *imushp = (struct imush *) arg; 848 if (error) { 849 imushp->error = error; 850 } else { 851 fcparam *fcp = imushp->isp->isp_param; 852 fcp->isp_scdma = segs->ds_addr; 853 } 854 } 855 856 static int 857 isp_pci_mbxdma(struct ispsoftc *isp) 858 { 859 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 860 caddr_t base; 861 u_int32_t len; 862 int i, error; 863 bus_size_t lim; 864 struct imush im; 865 866 867 /* 868 * Already been here? If so, leave... 869 */ 870 if (isp->isp_rquest) { 871 return (0); 872 } 873 874 len = sizeof (XS_T **) * isp->isp_maxcmds; 875 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 876 if (isp->isp_xflist == NULL) { 877 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 878 return (1); 879 } 880 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 881 pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 882 if (pci->dmaps == NULL) { 883 isp_prt(isp, ISP_LOGERR, "can't alloc dma maps"); 884 free(isp->isp_xflist, M_DEVBUF); 885 return (1); 886 } 887 888 if (IS_FC(isp) || IS_ULTRA2(isp)) 889 lim = BUS_SPACE_MAXADDR + 1; 890 else 891 lim = BUS_SPACE_MAXADDR_24BIT + 1; 892 893 /* 894 * Allocate and map the request, result queues, plus FC scratch area. 895 */ 896 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 897 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 898 if (IS_FC(isp)) { 899 len += ISP2100_SCRLEN; 900 } 901 if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim, 902 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, 903 BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) { 904 isp_prt(isp, ISP_LOGERR, 905 "cannot create a dma tag for control spaces"); 906 free(isp->isp_xflist, M_DEVBUF); 907 free(pci->dmaps, M_DEVBUF); 908 return (1); 909 } 910 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base, 911 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) { 912 isp_prt(isp, ISP_LOGERR, 913 "cannot allocate %d bytes of CCB memory", len); 914 free(isp->isp_xflist, M_DEVBUF); 915 free(pci->dmaps, M_DEVBUF); 916 return (1); 917 } 918 919 isp->isp_rquest = base; 920 im.isp = isp; 921 im.error = 0; 922 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest, 923 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), isp_map_rquest, &im, 0); 924 if (im.error) { 925 isp_prt(isp, ISP_LOGERR, 926 "error %d loading dma map for DMA request queue", im.error); 927 free(isp->isp_xflist, M_DEVBUF); 928 free(pci->dmaps, M_DEVBUF); 929 isp->isp_rquest = NULL; 930 return (1); 931 } 932 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 933 im.error = 0; 934 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result, 935 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)), isp_map_result, &im, 0); 936 if (im.error) { 937 isp_prt(isp, ISP_LOGERR, 938 "error %d loading dma map for DMA result queue", im.error); 939 free(isp->isp_xflist, M_DEVBUF); 940 free(pci->dmaps, M_DEVBUF); 941 isp->isp_rquest = NULL; 942 return (1); 943 } 944 945 for (i = 0; i < isp->isp_maxcmds; i++) { 946 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]); 947 if (error) { 948 isp_prt(isp, ISP_LOGERR, 949 "error %d creating per-cmd DMA maps", error); 950 free(isp->isp_xflist, M_DEVBUF); 951 free(pci->dmaps, M_DEVBUF); 952 isp->isp_rquest = NULL; 953 return (1); 954 } 955 } 956 957 if (IS_FC(isp)) { 958 fcparam *fcp = (fcparam *) isp->isp_param; 959 fcp->isp_scratch = base + 960 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) + 961 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 962 im.error = 0; 963 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, 964 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0); 965 if (im.error) { 966 isp_prt(isp, ISP_LOGERR, 967 "error %d loading FC scratch area", im.error); 968 free(isp->isp_xflist, M_DEVBUF); 969 free(pci->dmaps, M_DEVBUF); 970 isp->isp_rquest = NULL; 971 return (1); 972 } 973 } 974 return (0); 975 } 976 977 typedef struct { 978 struct ispsoftc *isp; 979 void *cmd_token; 980 void *rq; 981 u_int16_t *iptrp; 982 u_int16_t optr; 983 u_int error; 984 } mush_t; 985 986 #define MUSHERR_NOQENTRIES -2 987 988 #ifdef ISP_TARGET_MODE 989 /* 990 * We need to handle DMA for target mode differently from initiator mode. 991 * 992 * DMA mapping and construction and submission of CTIO Request Entries 993 * and rendevous for completion are very tightly coupled because we start 994 * out by knowing (per platform) how much data we have to move, but we 995 * don't know, up front, how many DMA mapping segments will have to be used 996 * cover that data, so we don't know how many CTIO Request Entries we 997 * will end up using. Further, for performance reasons we may want to 998 * (on the last CTIO for Fibre Channel), send status too (if all went well). 999 * 1000 * The standard vector still goes through isp_pci_dmasetup, but the callback 1001 * for the DMA mapping routines comes here instead with the whole transfer 1002 * mapped and a pointer to a partially filled in already allocated request 1003 * queue entry. We finish the job. 1004 */ 1005 static void tdma_mk __P((void *, bus_dma_segment_t *, int, int)); 1006 static void tdma_mkfc __P((void *, bus_dma_segment_t *, int, int)); 1007 1008 static void 1009 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1010 { 1011 mush_t *mp; 1012 struct ccb_scsiio *csio; 1013 struct isp_pcisoftc *pci; 1014 bus_dmamap_t *dp; 1015 u_int8_t scsi_status; 1016 ct_entry_t *cto; 1017 u_int32_t handle, totxfr, sflags; 1018 int nctios, send_status; 1019 int32_t resid; 1020 1021 mp = (mush_t *) arg; 1022 if (error) { 1023 mp->error = error; 1024 return; 1025 } 1026 csio = mp->cmd_token; 1027 cto = mp->rq; 1028 1029 cto->ct_xfrlen = 0; 1030 cto->ct_seg_count = 0; 1031 cto->ct_header.rqs_entry_count = 1; 1032 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1033 1034 if (nseg == 0) { 1035 cto->ct_header.rqs_seqno = 1; 1036 ISP_TDQE(mp->isp, "tdma_mk[no data]", *mp->iptrp, cto); 1037 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1038 "CTIO lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x res %d", 1039 csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags, 1040 cto->ct_status, cto->ct_scsi_status, cto->ct_resid); 1041 ISP_SWIZ_CTIO(mp->isp, cto, cto); 1042 return; 1043 } 1044 1045 nctios = nseg / ISP_RQDSEG; 1046 if (nseg % ISP_RQDSEG) { 1047 nctios++; 1048 } 1049 1050 /* 1051 * Save handle, and potentially any SCSI status, which we'll reinsert 1052 * on the last CTIO we're going to send. 1053 */ 1054 handle = cto->ct_reserved; 1055 cto->ct_reserved = 0; 1056 cto->ct_header.rqs_seqno = 0; 1057 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1058 1059 if (send_status) { 1060 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1061 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1062 /* 1063 * Preserve residual. 1064 */ 1065 resid = cto->ct_resid; 1066 1067 /* 1068 * Save actual SCSI status. 1069 */ 1070 scsi_status = cto->ct_scsi_status; 1071 1072 /* 1073 * We can't do a status at the same time as a data CTIO, so 1074 * we need to synthesize an extra CTIO at this level. 1075 */ 1076 nctios++; 1077 } else { 1078 sflags = scsi_status = resid = 0; 1079 } 1080 1081 totxfr = cto->ct_resid = 0; 1082 cto->ct_scsi_status = 0; 1083 1084 pci = (struct isp_pcisoftc *)mp->isp; 1085 dp = &pci->dmaps[isp_handle_index(handle)]; 1086 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1087 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1088 } else { 1089 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1090 } 1091 1092 1093 while (nctios--) { 1094 int seglim; 1095 1096 seglim = nseg; 1097 if (seglim) { 1098 int seg; 1099 1100 if (seglim > ISP_RQDSEG) 1101 seglim = ISP_RQDSEG; 1102 1103 for (seg = 0; seg < seglim; seg++, nseg--) { 1104 /* 1105 * Unlike normal initiator commands, we don't 1106 * do any swizzling here. 1107 */ 1108 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1109 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1110 cto->ct_xfrlen += dm_segs->ds_len; 1111 totxfr += dm_segs->ds_len; 1112 dm_segs++; 1113 } 1114 cto->ct_seg_count = seg; 1115 } else { 1116 /* 1117 * This case should only happen when we're sending an 1118 * extra CTIO with final status. 1119 */ 1120 if (send_status == 0) { 1121 isp_prt(mp->isp, ISP_LOGWARN, 1122 "tdma_mk ran out of segments"); 1123 mp->error = EINVAL; 1124 return; 1125 } 1126 } 1127 1128 /* 1129 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1130 * ct_tagtype, and ct_timeout have been carried over 1131 * unchanged from what our caller had set. 1132 * 1133 * The dataseg fields and the seg_count fields we just got 1134 * through setting. The data direction we've preserved all 1135 * along and only clear it if we're now sending status. 1136 */ 1137 1138 if (nctios == 0) { 1139 /* 1140 * We're the last in a sequence of CTIOs, so mark 1141 * this CTIO and save the handle to the CCB such that 1142 * when this CTIO completes we can free dma resources 1143 * and do whatever else we need to do to finish the 1144 * rest of the command. 1145 */ 1146 cto->ct_reserved = handle; 1147 cto->ct_header.rqs_seqno = 1; 1148 1149 if (send_status) { 1150 cto->ct_scsi_status = scsi_status; 1151 cto->ct_flags |= sflags | CT_NO_DATA;; 1152 cto->ct_resid = resid; 1153 } 1154 if (send_status) { 1155 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1156 "CTIO lun%d for ID %d ct_flags 0x%x scsi " 1157 "status %x resid %d", 1158 csio->ccb_h.target_lun, 1159 cto->ct_iid, cto->ct_flags, 1160 cto->ct_scsi_status, cto->ct_resid); 1161 } else { 1162 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1163 "CTIO lun%d for ID%d ct_flags 0x%x", 1164 csio->ccb_h.target_lun, 1165 cto->ct_iid, cto->ct_flags); 1166 } 1167 ISP_TDQE(mp->isp, "last tdma_mk", *mp->iptrp, cto); 1168 ISP_SWIZ_CTIO(mp->isp, cto, cto); 1169 } else { 1170 ct_entry_t *octo = cto; 1171 1172 /* 1173 * Make sure handle fields are clean 1174 */ 1175 cto->ct_reserved = 0; 1176 cto->ct_header.rqs_seqno = 0; 1177 1178 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1179 "CTIO lun%d for ID%d ct_flags 0x%x", 1180 csio->ccb_h.target_lun, cto->ct_iid, cto->ct_flags); 1181 ISP_TDQE(mp->isp, "tdma_mk", *mp->iptrp, cto); 1182 1183 /* 1184 * Get a new CTIO 1185 */ 1186 cto = (ct_entry_t *) 1187 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1188 *mp->iptrp = 1189 ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp)); 1190 if (*mp->iptrp == mp->optr) { 1191 isp_prt(mp->isp, ISP_LOGWARN, 1192 "Queue Overflow in tdma_mk"); 1193 mp->error = MUSHERR_NOQENTRIES; 1194 return; 1195 } 1196 /* 1197 * Fill in the new CTIO with info from the old one. 1198 */ 1199 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1200 cto->ct_header.rqs_entry_count = 1; 1201 cto->ct_header.rqs_flags = 0; 1202 cto->ct_lun = octo->ct_lun; 1203 cto->ct_iid = octo->ct_iid; 1204 cto->ct_reserved2 = octo->ct_reserved2; 1205 cto->ct_tgt = octo->ct_tgt; 1206 cto->ct_flags = octo->ct_flags; 1207 cto->ct_status = 0; 1208 cto->ct_scsi_status = 0; 1209 cto->ct_tag_val = octo->ct_tag_val; 1210 cto->ct_tag_type = octo->ct_tag_type; 1211 cto->ct_xfrlen = 0; 1212 cto->ct_resid = 0; 1213 cto->ct_timeout = octo->ct_timeout; 1214 cto->ct_seg_count = 0; 1215 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1216 /* 1217 * Now swizzle the old one for the consumption of the 1218 * chip. 1219 */ 1220 ISP_SWIZ_CTIO(mp->isp, octo, octo); 1221 } 1222 } 1223 } 1224 1225 static void 1226 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1227 { 1228 mush_t *mp; 1229 struct ccb_scsiio *csio; 1230 struct isp_pcisoftc *pci; 1231 bus_dmamap_t *dp; 1232 ct2_entry_t *cto; 1233 u_int16_t scsi_status, send_status, send_sense; 1234 u_int32_t handle, totxfr, datalen; 1235 u_int8_t sense[QLTM_SENSELEN]; 1236 int nctios; 1237 1238 mp = (mush_t *) arg; 1239 if (error) { 1240 mp->error = error; 1241 return; 1242 } 1243 1244 csio = mp->cmd_token; 1245 cto = mp->rq; 1246 1247 if (nseg == 0) { 1248 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1249 isp_prt(mp->isp, ISP_LOGWARN, 1250 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1251 "set (0x%x)", cto->ct_flags); 1252 mp->error = EINVAL; 1253 return; 1254 } 1255 cto->ct_header.rqs_entry_count = 1; 1256 cto->ct_header.rqs_seqno = 1; 1257 /* ct_reserved contains the handle set by caller */ 1258 /* 1259 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1260 * flags to NO DATA and clear relative offset flags. 1261 * We preserve the ct_resid and the response area. 1262 */ 1263 cto->ct_flags |= CT2_NO_DATA; 1264 if (cto->ct_resid > 0) 1265 cto->ct_flags |= CT2_DATA_UNDER; 1266 else if (cto->ct_resid < 0) 1267 cto->ct_flags |= CT2_DATA_OVER; 1268 cto->ct_seg_count = 0; 1269 cto->ct_reloff = 0; 1270 ISP_TDQE(mp->isp, "dma2_tgt_fc[no data]", *mp->iptrp, cto); 1271 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1272 "CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1273 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1274 cto->ct_iid, cto->ct_flags, cto->ct_status, 1275 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1276 ISP_SWIZ_CTIO2(isp, cto, cto); 1277 return; 1278 } 1279 1280 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1281 isp_prt(mp->isp, ISP_LOGWARN, 1282 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1283 "(0x%x)", cto->ct_flags); 1284 mp->error = EINVAL; 1285 return; 1286 } 1287 1288 1289 nctios = nseg / ISP_RQDSEG_T2; 1290 if (nseg % ISP_RQDSEG_T2) { 1291 nctios++; 1292 } 1293 1294 /* 1295 * Save the handle, status, reloff, and residual. We'll reinsert the 1296 * handle into the last CTIO2 we're going to send, and reinsert status 1297 * and residual (and possibly sense data) if that's to be sent as well. 1298 * 1299 * We preserve ct_reloff and adjust it for each data CTIO2 we send past 1300 * the first one. This is needed so that the FCP DATA IUs being sent 1301 * out have the correct offset (they can arrive at the other end out 1302 * of order). 1303 */ 1304 1305 handle = cto->ct_reserved; 1306 cto->ct_reserved = 0; 1307 1308 if ((send_status = (cto->ct_flags & CT2_SENDSTATUS)) != 0) { 1309 cto->ct_flags &= ~CT2_SENDSTATUS; 1310 1311 /* 1312 * Preserve residual, which is actually the total count. 1313 */ 1314 datalen = cto->ct_resid; 1315 1316 /* 1317 * Save actual SCSI status. We'll reinsert the 1318 * CT2_SNSLEN_VALID later if appropriate. 1319 */ 1320 scsi_status = cto->rsp.m0.ct_scsi_status & 0xff; 1321 send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID; 1322 1323 /* 1324 * If we're sending status and have a CHECK CONDTION and 1325 * have sense data, we send one more CTIO2 with just the 1326 * status and sense data. The upper layers have stashed 1327 * the sense data in the dataseg structure for us. 1328 */ 1329 1330 if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND && 1331 send_sense) { 1332 bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN); 1333 nctios++; 1334 } 1335 } else { 1336 scsi_status = send_sense = datalen = 0; 1337 } 1338 1339 totxfr = cto->ct_resid = 0; 1340 cto->rsp.m0.ct_scsi_status = 0; 1341 bzero(&cto->rsp, sizeof (cto->rsp)); 1342 1343 pci = (struct isp_pcisoftc *)mp->isp; 1344 dp = &pci->dmaps[isp_handle_index(handle)]; 1345 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1346 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1347 } else { 1348 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1349 } 1350 1351 while (nctios--) { 1352 int seg, seglim; 1353 1354 seglim = nseg; 1355 if (seglim) { 1356 if (seglim > ISP_RQDSEG_T2) 1357 seglim = ISP_RQDSEG_T2; 1358 1359 for (seg = 0; seg < seglim; seg++) { 1360 cto->rsp.m0.ct_dataseg[seg].ds_base = 1361 dm_segs->ds_addr; 1362 cto->rsp.m0.ct_dataseg[seg].ds_count = 1363 dm_segs->ds_len; 1364 cto->rsp.m0.ct_xfrlen += dm_segs->ds_len; 1365 totxfr += dm_segs->ds_len; 1366 dm_segs++; 1367 } 1368 cto->ct_seg_count = seg; 1369 } else { 1370 /* 1371 * This case should only happen when we're sending a 1372 * synthesized MODE1 final status with sense data. 1373 */ 1374 if (send_sense == 0) { 1375 isp_prt(mp->isp, ISP_LOGWARN, 1376 "dma2_tgt_fc ran out of segments, " 1377 "no SENSE DATA"); 1378 mp->error = EINVAL; 1379 return; 1380 } 1381 } 1382 1383 /* 1384 * At this point, the fields ct_lun, ct_iid, ct_rxid, 1385 * ct_timeout have been carried over unchanged from what 1386 * our caller had set. 1387 * 1388 * The field ct_reloff is either what the caller set, or 1389 * what we've added to below. 1390 * 1391 * The dataseg fields and the seg_count fields we just got 1392 * through setting. The data direction we've preserved all 1393 * along and only clear it if we're sending a MODE1 status 1394 * as the last CTIO. 1395 * 1396 */ 1397 1398 if (nctios == 0) { 1399 1400 /* 1401 * We're the last in a sequence of CTIO2s, so mark this 1402 * CTIO2 and save the handle to the CCB such that when 1403 * this CTIO2 completes we can free dma resources and 1404 * do whatever else we need to do to finish the rest 1405 * of the command. 1406 */ 1407 1408 cto->ct_reserved = handle; 1409 cto->ct_header.rqs_seqno = 1; 1410 1411 if (send_status) { 1412 if (send_sense) { 1413 bcopy(sense, cto->rsp.m1.ct_resp, 1414 QLTM_SENSELEN); 1415 cto->rsp.m1.ct_senselen = 1416 QLTM_SENSELEN; 1417 scsi_status |= CT2_SNSLEN_VALID; 1418 cto->rsp.m1.ct_scsi_status = 1419 scsi_status; 1420 cto->ct_flags &= CT2_FLAG_MMASK; 1421 cto->ct_flags |= CT2_FLAG_MODE1 | 1422 CT2_NO_DATA| CT2_SENDSTATUS; 1423 } else { 1424 cto->rsp.m0.ct_scsi_status = 1425 scsi_status; 1426 cto->ct_flags |= CT2_SENDSTATUS; 1427 } 1428 /* 1429 * Get 'real' residual and set flags based 1430 * on it. 1431 */ 1432 cto->ct_resid = datalen - totxfr; 1433 if (cto->ct_resid > 0) 1434 cto->ct_flags |= CT2_DATA_UNDER; 1435 else if (cto->ct_resid < 0) 1436 cto->ct_flags |= CT2_DATA_OVER; 1437 } 1438 ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto); 1439 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1440 "CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x sts 0x%x" 1441 " ssts 0x%x res %d", cto->ct_rxid, 1442 csio->ccb_h.target_lun, (int) cto->ct_iid, 1443 cto->ct_flags, cto->ct_status, 1444 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1445 ISP_SWIZ_CTIO2(isp, cto, cto); 1446 } else { 1447 ct2_entry_t *octo = cto; 1448 1449 /* 1450 * Make sure handle fields are clean 1451 */ 1452 cto->ct_reserved = 0; 1453 cto->ct_header.rqs_seqno = 0; 1454 1455 ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto); 1456 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1457 "CTIO2 RX_ID 0x%x lun %d->iid%d flgs 0x%x", 1458 cto->ct_rxid, csio->ccb_h.target_lun, 1459 (int) cto->ct_iid, cto->ct_flags); 1460 /* 1461 * Get a new CTIO2 1462 */ 1463 cto = (ct2_entry_t *) 1464 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1465 *mp->iptrp = 1466 ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp)); 1467 if (*mp->iptrp == mp->optr) { 1468 isp_prt(mp->isp, ISP_LOGWARN, 1469 "Queue Overflow in dma2_tgt_fc"); 1470 mp->error = MUSHERR_NOQENTRIES; 1471 return; 1472 } 1473 1474 /* 1475 * Fill in the new CTIO2 with info from the old one. 1476 */ 1477 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1478 cto->ct_header.rqs_entry_count = 1; 1479 cto->ct_header.rqs_flags = 0; 1480 /* ct_header.rqs_seqno && ct_reserved done later */ 1481 cto->ct_lun = octo->ct_lun; 1482 cto->ct_iid = octo->ct_iid; 1483 cto->ct_rxid = octo->ct_rxid; 1484 cto->ct_flags = octo->ct_flags; 1485 cto->ct_status = 0; 1486 cto->ct_resid = 0; 1487 cto->ct_timeout = octo->ct_timeout; 1488 cto->ct_seg_count = 0; 1489 /* 1490 * Adjust the new relative offset by the amount which 1491 * is recorded in the data segment of the old CTIO2 we 1492 * just finished filling out. 1493 */ 1494 cto->ct_reloff += octo->rsp.m0.ct_xfrlen; 1495 bzero(&cto->rsp, sizeof (cto->rsp)); 1496 ISP_SWIZ_CTIO2(isp, cto, cto); 1497 } 1498 } 1499 } 1500 #endif 1501 1502 static void dma2 __P((void *, bus_dma_segment_t *, int, int)); 1503 1504 static void 1505 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1506 { 1507 mush_t *mp; 1508 struct ccb_scsiio *csio; 1509 struct isp_pcisoftc *pci; 1510 bus_dmamap_t *dp; 1511 bus_dma_segment_t *eseg; 1512 ispreq_t *rq; 1513 ispcontreq_t *crq; 1514 int seglim, datalen; 1515 1516 mp = (mush_t *) arg; 1517 if (error) { 1518 mp->error = error; 1519 return; 1520 } 1521 1522 if (nseg < 1) { 1523 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1524 mp->error = EFAULT; 1525 return; 1526 } 1527 csio = mp->cmd_token; 1528 rq = mp->rq; 1529 pci = (struct isp_pcisoftc *)mp->isp; 1530 dp = &pci->dmaps[isp_handle_index(rq->req_handle)]; 1531 1532 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1533 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1534 } else { 1535 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1536 } 1537 1538 datalen = XS_XFRLEN(csio); 1539 1540 /* 1541 * We're passed an initial partially filled in entry that 1542 * has most fields filled in except for data transfer 1543 * related values. 1544 * 1545 * Our job is to fill in the initial request queue entry and 1546 * then to start allocating and filling in continuation entries 1547 * until we've covered the entire transfer. 1548 */ 1549 1550 if (IS_FC(mp->isp)) { 1551 seglim = ISP_RQDSEG_T2; 1552 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1553 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1554 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1555 } else { 1556 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1557 } 1558 } else { 1559 if (csio->cdb_len > 12) { 1560 seglim = 0; 1561 } else { 1562 seglim = ISP_RQDSEG; 1563 } 1564 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1565 rq->req_flags |= REQFLAG_DATA_IN; 1566 } else { 1567 rq->req_flags |= REQFLAG_DATA_OUT; 1568 } 1569 } 1570 1571 eseg = dm_segs + nseg; 1572 1573 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1574 if (IS_FC(mp->isp)) { 1575 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1576 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1577 dm_segs->ds_addr; 1578 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1579 dm_segs->ds_len; 1580 } else { 1581 rq->req_dataseg[rq->req_seg_count].ds_base = 1582 dm_segs->ds_addr; 1583 rq->req_dataseg[rq->req_seg_count].ds_count = 1584 dm_segs->ds_len; 1585 } 1586 datalen -= dm_segs->ds_len; 1587 #if 0 1588 if (IS_FC(mp->isp)) { 1589 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1590 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 1591 mp->isp->isp_name, rq->req_seg_count, 1592 rq2->req_dataseg[rq2->req_seg_count].ds_count, 1593 rq2->req_dataseg[rq2->req_seg_count].ds_base); 1594 } else { 1595 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 1596 mp->isp->isp_name, rq->req_seg_count, 1597 rq->req_dataseg[rq->req_seg_count].ds_count, 1598 rq->req_dataseg[rq->req_seg_count].ds_base); 1599 } 1600 #endif 1601 rq->req_seg_count++; 1602 dm_segs++; 1603 } 1604 1605 while (datalen > 0 && dm_segs != eseg) { 1606 crq = (ispcontreq_t *) 1607 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1608 *mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp)); 1609 if (*mp->iptrp == mp->optr) { 1610 #if 0 1611 printf("%s: Request Queue Overflow++\n", 1612 mp->isp->isp_name); 1613 #endif 1614 mp->error = MUSHERR_NOQENTRIES; 1615 return; 1616 } 1617 rq->req_header.rqs_entry_count++; 1618 bzero((void *)crq, sizeof (*crq)); 1619 crq->req_header.rqs_entry_count = 1; 1620 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1621 1622 seglim = 0; 1623 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1624 crq->req_dataseg[seglim].ds_base = 1625 dm_segs->ds_addr; 1626 crq->req_dataseg[seglim].ds_count = 1627 dm_segs->ds_len; 1628 #if 0 1629 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n", 1630 mp->isp->isp_name, rq->req_header.rqs_entry_count-1, 1631 seglim, crq->req_dataseg[seglim].ds_count, 1632 crq->req_dataseg[seglim].ds_base); 1633 #endif 1634 rq->req_seg_count++; 1635 dm_segs++; 1636 seglim++; 1637 datalen -= dm_segs->ds_len; 1638 } 1639 } 1640 } 1641 1642 static int 1643 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1644 u_int16_t *iptrp, u_int16_t optr) 1645 { 1646 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1647 bus_dmamap_t *dp = NULL; 1648 mush_t mush, *mp; 1649 void (*eptr) __P((void *, bus_dma_segment_t *, int, int)); 1650 1651 #ifdef ISP_TARGET_MODE 1652 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1653 if (IS_FC(isp)) { 1654 eptr = tdma_mkfc; 1655 } else { 1656 eptr = tdma_mk; 1657 } 1658 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1659 (csio->dxfer_len == 0)) { 1660 rq->req_seg_count = 1; 1661 mp = &mush; 1662 mp->isp = isp; 1663 mp->cmd_token = csio; 1664 mp->rq = rq; 1665 mp->iptrp = iptrp; 1666 mp->optr = optr; 1667 mp->error = 0; 1668 (*eptr)(mp, NULL, 0, 0); 1669 goto exit; 1670 } 1671 } else 1672 #endif 1673 eptr = dma2; 1674 1675 /* 1676 * NB: if we need to do request queue entry swizzling, 1677 * NB: this is where it would need to be done for cmds 1678 * NB: that move no data. For commands that move data, 1679 * NB: swizzling would take place in those functions. 1680 */ 1681 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1682 (csio->dxfer_len == 0)) { 1683 rq->req_seg_count = 1; 1684 return (CMD_QUEUED); 1685 } 1686 1687 /* 1688 * Do a virtual grapevine step to collect info for 1689 * the callback dma allocation that we have to use... 1690 */ 1691 mp = &mush; 1692 mp->isp = isp; 1693 mp->cmd_token = csio; 1694 mp->rq = rq; 1695 mp->iptrp = iptrp; 1696 mp->optr = optr; 1697 mp->error = 0; 1698 1699 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1700 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1701 int error, s; 1702 dp = &pci->dmaps[isp_handle_index(rq->req_handle)]; 1703 s = splsoftvm(); 1704 error = bus_dmamap_load(pci->parent_dmat, *dp, 1705 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1706 if (error == EINPROGRESS) { 1707 bus_dmamap_unload(pci->parent_dmat, *dp); 1708 mp->error = EINVAL; 1709 isp_prt(isp, ISP_LOGERR, 1710 "deferred dma allocation not supported"); 1711 } else if (error && mp->error == 0) { 1712 #ifdef DIAGNOSTIC 1713 printf("%s: error %d in dma mapping code\n", 1714 isp->isp_name, error); 1715 #endif 1716 mp->error = error; 1717 } 1718 splx(s); 1719 } else { 1720 /* Pointer to physical buffer */ 1721 struct bus_dma_segment seg; 1722 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1723 seg.ds_len = csio->dxfer_len; 1724 (*eptr)(mp, &seg, 1, 0); 1725 } 1726 } else { 1727 struct bus_dma_segment *segs; 1728 1729 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1730 isp_prt(isp, ISP_LOGERR, 1731 "Physical segment pointers unsupported"); 1732 mp->error = EINVAL; 1733 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1734 isp_prt(isp, ISP_LOGERR, 1735 "Virtual segment addresses unsupported"); 1736 mp->error = EINVAL; 1737 } else { 1738 /* Just use the segments provided */ 1739 segs = (struct bus_dma_segment *) csio->data_ptr; 1740 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1741 } 1742 } 1743 #ifdef ISP_TARGET_MODE 1744 exit: 1745 #endif 1746 if (mp->error) { 1747 int retval = CMD_COMPLETE; 1748 if (mp->error == MUSHERR_NOQENTRIES) { 1749 retval = CMD_EAGAIN; 1750 } else if (mp->error == EFBIG) { 1751 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1752 } else if (mp->error == EINVAL) { 1753 XS_SETERR(csio, CAM_REQ_INVALID); 1754 } else { 1755 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1756 } 1757 return (retval); 1758 } else { 1759 /* 1760 * Check to see if we weren't cancelled while sleeping on 1761 * getting DMA resources... 1762 */ 1763 if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1764 if (dp) { 1765 bus_dmamap_unload(pci->parent_dmat, *dp); 1766 } 1767 return (CMD_COMPLETE); 1768 } 1769 return (CMD_QUEUED); 1770 } 1771 } 1772 1773 static void 1774 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int32_t handle) 1775 { 1776 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1777 bus_dmamap_t *dp = &pci->dmaps[isp_handle_index(handle)]; 1778 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1779 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD); 1780 } else { 1781 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE); 1782 } 1783 bus_dmamap_unload(pci->parent_dmat, *dp); 1784 } 1785 1786 1787 static void 1788 isp_pci_reset1(struct ispsoftc *isp) 1789 { 1790 /* Make sure the BIOS is disabled */ 1791 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1792 /* and enable interrupts */ 1793 ENABLE_INTS(isp); 1794 } 1795 1796 static void 1797 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1798 { 1799 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1800 if (msg) 1801 printf("%s: %s\n", isp->isp_name, msg); 1802 if (IS_SCSI(isp)) 1803 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1804 else 1805 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1806 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1807 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1808 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1809 1810 1811 if (IS_SCSI(isp)) { 1812 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1813 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1814 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1815 ISP_READ(isp, CDMA_FIFO_STS)); 1816 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1817 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1818 ISP_READ(isp, DDMA_FIFO_STS)); 1819 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1820 ISP_READ(isp, SXP_INTERRUPT), 1821 ISP_READ(isp, SXP_GROSS_ERR), 1822 ISP_READ(isp, SXP_PINS_CTRL)); 1823 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1824 } 1825 printf(" mbox regs: %x %x %x %x %x\n", 1826 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1827 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1828 ISP_READ(isp, OUTMAILBOX4)); 1829 printf(" PCI Status Command/Status=%x\n", 1830 pci_read_config(pci->pci_dev, PCIR_COMMAND, 1)); 1831 } 1832