1 /* $FreeBSD$ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/bus.h> 35 36 #include <pci/pcireg.h> 37 #include <pci/pcivar.h> 38 39 #include <machine/bus_memio.h> 40 #include <machine/bus_pio.h> 41 #include <machine/bus.h> 42 #include <machine/resource.h> 43 #include <sys/rman.h> 44 #include <sys/malloc.h> 45 46 #include <dev/isp/isp_freebsd.h> 47 48 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 49 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 50 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 51 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 52 static int 53 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 54 static int 55 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 56 static int isp_pci_mbxdma(struct ispsoftc *); 57 static int 58 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 59 static void 60 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 61 62 static void isp_pci_reset1(struct ispsoftc *); 63 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 64 65 #ifndef ISP_CODE_ORG 66 #define ISP_CODE_ORG 0x1000 67 #endif 68 69 static struct ispmdvec mdvec = { 70 isp_pci_rd_isr, 71 isp_pci_rd_reg, 72 isp_pci_wr_reg, 73 isp_pci_mbxdma, 74 isp_pci_dmasetup, 75 isp_pci_dmateardown, 76 NULL, 77 isp_pci_reset1, 78 isp_pci_dumpregs, 79 NULL, 80 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 81 }; 82 83 static struct ispmdvec mdvec_1080 = { 84 isp_pci_rd_isr, 85 isp_pci_rd_reg_1080, 86 isp_pci_wr_reg_1080, 87 isp_pci_mbxdma, 88 isp_pci_dmasetup, 89 isp_pci_dmateardown, 90 NULL, 91 isp_pci_reset1, 92 isp_pci_dumpregs, 93 NULL, 94 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 95 }; 96 97 static struct ispmdvec mdvec_12160 = { 98 isp_pci_rd_isr, 99 isp_pci_rd_reg_1080, 100 isp_pci_wr_reg_1080, 101 isp_pci_mbxdma, 102 isp_pci_dmasetup, 103 isp_pci_dmateardown, 104 NULL, 105 isp_pci_reset1, 106 isp_pci_dumpregs, 107 NULL, 108 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 109 }; 110 111 static struct ispmdvec mdvec_2100 = { 112 isp_pci_rd_isr, 113 isp_pci_rd_reg, 114 isp_pci_wr_reg, 115 isp_pci_mbxdma, 116 isp_pci_dmasetup, 117 isp_pci_dmateardown, 118 NULL, 119 isp_pci_reset1, 120 isp_pci_dumpregs 121 }; 122 123 static struct ispmdvec mdvec_2200 = { 124 isp_pci_rd_isr, 125 isp_pci_rd_reg, 126 isp_pci_wr_reg, 127 isp_pci_mbxdma, 128 isp_pci_dmasetup, 129 isp_pci_dmateardown, 130 NULL, 131 isp_pci_reset1, 132 isp_pci_dumpregs 133 }; 134 135 static struct ispmdvec mdvec_2300 = { 136 isp_pci_rd_isr_2300, 137 isp_pci_rd_reg, 138 isp_pci_wr_reg, 139 isp_pci_mbxdma, 140 isp_pci_dmasetup, 141 isp_pci_dmateardown, 142 NULL, 143 isp_pci_reset1, 144 isp_pci_dumpregs 145 }; 146 147 #ifndef PCIM_CMD_INVEN 148 #define PCIM_CMD_INVEN 0x10 149 #endif 150 #ifndef PCIM_CMD_BUSMASTEREN 151 #define PCIM_CMD_BUSMASTEREN 0x0004 152 #endif 153 #ifndef PCIM_CMD_PERRESPEN 154 #define PCIM_CMD_PERRESPEN 0x0040 155 #endif 156 #ifndef PCIM_CMD_SEREN 157 #define PCIM_CMD_SEREN 0x0100 158 #endif 159 160 #ifndef PCIR_COMMAND 161 #define PCIR_COMMAND 0x04 162 #endif 163 164 #ifndef PCIR_CACHELNSZ 165 #define PCIR_CACHELNSZ 0x0c 166 #endif 167 168 #ifndef PCIR_LATTIMER 169 #define PCIR_LATTIMER 0x0d 170 #endif 171 172 #ifndef PCIR_ROMADDR 173 #define PCIR_ROMADDR 0x30 174 #endif 175 176 #ifndef PCI_VENDOR_QLOGIC 177 #define PCI_VENDOR_QLOGIC 0x1077 178 #endif 179 180 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 181 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 182 #endif 183 184 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 185 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 186 #endif 187 188 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 189 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 190 #endif 191 192 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 193 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 194 #endif 195 196 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 197 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 198 #endif 199 200 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 201 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 202 #endif 203 204 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 205 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 206 #endif 207 208 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 209 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 210 #endif 211 212 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 213 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 214 #endif 215 216 #define PCI_QLOGIC_ISP1020 \ 217 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 218 219 #define PCI_QLOGIC_ISP1080 \ 220 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 221 222 #define PCI_QLOGIC_ISP12160 \ 223 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 224 225 #define PCI_QLOGIC_ISP1240 \ 226 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 227 228 #define PCI_QLOGIC_ISP1280 \ 229 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 230 231 #define PCI_QLOGIC_ISP2100 \ 232 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 233 234 #define PCI_QLOGIC_ISP2200 \ 235 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 236 237 #define PCI_QLOGIC_ISP2300 \ 238 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 239 240 #define PCI_QLOGIC_ISP2312 \ 241 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 242 243 /* 244 * Odd case for some AMI raid cards... We need to *not* attach to this. 245 */ 246 #define AMI_RAID_SUBVENDOR_ID 0x101e 247 248 #define IO_MAP_REG 0x10 249 #define MEM_MAP_REG 0x14 250 251 #define PCI_DFLT_LTNCY 0x40 252 #define PCI_DFLT_LNSZ 0x10 253 254 static int isp_pci_probe (device_t); 255 static int isp_pci_attach (device_t); 256 257 258 struct isp_pcisoftc { 259 struct ispsoftc pci_isp; 260 device_t pci_dev; 261 struct resource * pci_reg; 262 bus_space_tag_t pci_st; 263 bus_space_handle_t pci_sh; 264 void * ih; 265 int16_t pci_poff[_NREG_BLKS]; 266 bus_dma_tag_t dmat; 267 bus_dmamap_t *dmaps; 268 }; 269 ispfwfunc *isp_get_firmware_p = NULL; 270 271 static device_method_t isp_pci_methods[] = { 272 /* Device interface */ 273 DEVMETHOD(device_probe, isp_pci_probe), 274 DEVMETHOD(device_attach, isp_pci_attach), 275 { 0, 0 } 276 }; 277 static void isp_pci_intr(void *); 278 279 static driver_t isp_pci_driver = { 280 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 281 }; 282 static devclass_t isp_devclass; 283 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 284 MODULE_VERSION(isp, 1); 285 286 static int 287 isp_pci_probe(device_t dev) 288 { 289 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 290 case PCI_QLOGIC_ISP1020: 291 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 292 break; 293 case PCI_QLOGIC_ISP1080: 294 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 295 break; 296 case PCI_QLOGIC_ISP1240: 297 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 298 break; 299 case PCI_QLOGIC_ISP1280: 300 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 301 break; 302 case PCI_QLOGIC_ISP12160: 303 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 304 return (ENXIO); 305 } 306 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 307 break; 308 case PCI_QLOGIC_ISP2100: 309 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 310 break; 311 case PCI_QLOGIC_ISP2200: 312 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 313 break; 314 case PCI_QLOGIC_ISP2300: 315 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 316 break; 317 case PCI_QLOGIC_ISP2312: 318 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 319 break; 320 default: 321 return (ENXIO); 322 } 323 if (device_get_unit(dev) == 0 && bootverbose) { 324 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 325 "Core Version %d.%d\n", 326 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 327 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 328 } 329 /* 330 * XXXX: Here is where we might load the f/w module 331 * XXXX: (or increase a reference count to it). 332 */ 333 return (0); 334 } 335 336 static int 337 isp_pci_attach(device_t dev) 338 { 339 struct resource *regs, *irq; 340 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 341 u_int32_t data, cmd, linesz, psize, basetype; 342 struct isp_pcisoftc *pcs; 343 struct ispsoftc *isp = NULL; 344 struct ispmdvec *mdvp; 345 const char *sptr; 346 int locksetup = 0; 347 348 /* 349 * Figure out if we're supposed to skip this one. 350 * If we are, we actually go to ISP_ROLE_NONE. 351 */ 352 353 tval = 0; 354 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 355 "disable", &tval) == 0 && tval) { 356 device_printf(dev, "device is disabled\n"); 357 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 358 return (0); 359 } 360 361 role = 0; 362 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 363 "role", &role) == 0 && 364 ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) { 365 device_printf(dev, "setting role to 0x%x\n", role); 366 } else { 367 #ifdef ISP_TARGET_MODE 368 role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET; 369 #else 370 role = ISP_DEFAULT_ROLES; 371 #endif 372 } 373 374 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 375 if (pcs == NULL) { 376 device_printf(dev, "cannot allocate softc\n"); 377 return (ENOMEM); 378 } 379 380 /* 381 * Figure out which we should try first - memory mapping or i/o mapping? 382 */ 383 #ifdef __alpha__ 384 m1 = PCIM_CMD_MEMEN; 385 m2 = PCIM_CMD_PORTEN; 386 #else 387 m1 = PCIM_CMD_PORTEN; 388 m2 = PCIM_CMD_MEMEN; 389 #endif 390 391 tval = 0; 392 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 393 "prefer_iomap", &tval) == 0 && tval != 0) { 394 m1 = PCIM_CMD_PORTEN; 395 m2 = PCIM_CMD_MEMEN; 396 } 397 tval = 0; 398 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 399 "prefer_memmap", &tval) == 0 && tval != 0) { 400 m1 = PCIM_CMD_MEMEN; 401 m2 = PCIM_CMD_PORTEN; 402 } 403 404 linesz = PCI_DFLT_LNSZ; 405 irq = regs = NULL; 406 rgd = rtp = iqd = 0; 407 408 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 409 if (cmd & m1) { 410 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 411 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 412 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 413 } 414 if (regs == NULL && (cmd & m2)) { 415 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 416 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 417 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 418 } 419 if (regs == NULL) { 420 device_printf(dev, "unable to map any ports\n"); 421 goto bad; 422 } 423 if (bootverbose) 424 device_printf(dev, "using %s space register mapping\n", 425 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 426 pcs->pci_dev = dev; 427 pcs->pci_reg = regs; 428 pcs->pci_st = rman_get_bustag(regs); 429 pcs->pci_sh = rman_get_bushandle(regs); 430 431 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 432 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 433 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 434 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 435 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 436 mdvp = &mdvec; 437 basetype = ISP_HA_SCSI_UNKNOWN; 438 psize = sizeof (sdparam); 439 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 440 mdvp = &mdvec; 441 basetype = ISP_HA_SCSI_UNKNOWN; 442 psize = sizeof (sdparam); 443 } 444 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 445 mdvp = &mdvec_1080; 446 basetype = ISP_HA_SCSI_1080; 447 psize = sizeof (sdparam); 448 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 449 ISP1080_DMA_REGS_OFF; 450 } 451 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 452 mdvp = &mdvec_1080; 453 basetype = ISP_HA_SCSI_1240; 454 psize = 2 * sizeof (sdparam); 455 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 456 ISP1080_DMA_REGS_OFF; 457 } 458 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 459 mdvp = &mdvec_1080; 460 basetype = ISP_HA_SCSI_1280; 461 psize = 2 * sizeof (sdparam); 462 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 463 ISP1080_DMA_REGS_OFF; 464 } 465 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 466 mdvp = &mdvec_12160; 467 basetype = ISP_HA_SCSI_12160; 468 psize = 2 * sizeof (sdparam); 469 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 470 ISP1080_DMA_REGS_OFF; 471 } 472 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 473 mdvp = &mdvec_2100; 474 basetype = ISP_HA_FC_2100; 475 psize = sizeof (fcparam); 476 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 477 PCI_MBOX_REGS2100_OFF; 478 if (pci_get_revid(dev) < 3) { 479 /* 480 * XXX: Need to get the actual revision 481 * XXX: number of the 2100 FB. At any rate, 482 * XXX: lower cache line size for early revision 483 * XXX; boards. 484 */ 485 linesz = 1; 486 } 487 } 488 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 489 mdvp = &mdvec_2200; 490 basetype = ISP_HA_FC_2200; 491 psize = sizeof (fcparam); 492 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 493 PCI_MBOX_REGS2100_OFF; 494 } 495 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 496 mdvp = &mdvec_2300; 497 basetype = ISP_HA_FC_2300; 498 psize = sizeof (fcparam); 499 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 500 PCI_MBOX_REGS2300_OFF; 501 } 502 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 503 mdvp = &mdvec_2300; 504 basetype = ISP_HA_FC_2312; 505 psize = sizeof (fcparam); 506 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 507 PCI_MBOX_REGS2300_OFF; 508 } 509 isp = &pcs->pci_isp; 510 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 511 if (isp->isp_param == NULL) { 512 device_printf(dev, "cannot allocate parameter data\n"); 513 goto bad; 514 } 515 isp->isp_mdvec = mdvp; 516 isp->isp_type = basetype; 517 isp->isp_revision = pci_get_revid(dev); 518 isp->isp_role = role; 519 isp->isp_dev = dev; 520 521 /* 522 * Try and find firmware for this device. 523 */ 524 525 if (isp_get_firmware_p) { 526 int device = (int) pci_get_device(dev); 527 #ifdef ISP_TARGET_MODE 528 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 529 #else 530 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 531 #endif 532 } 533 534 /* 535 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 536 * are set. 537 */ 538 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 539 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 540 if (IS_2300(isp)) { /* per QLogic errata */ 541 cmd &= ~PCIM_CMD_INVEN; 542 } 543 if (IS_23XX(isp)) { 544 /* 545 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 546 */ 547 isp->isp_touched = 1; 548 549 } 550 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 551 552 /* 553 * Make sure the Cache Line Size register is set sensibly. 554 */ 555 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 556 if (data != linesz) { 557 data = PCI_DFLT_LNSZ; 558 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 559 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 560 } 561 562 /* 563 * Make sure the Latency Timer is sane. 564 */ 565 data = pci_read_config(dev, PCIR_LATTIMER, 1); 566 if (data < PCI_DFLT_LTNCY) { 567 data = PCI_DFLT_LTNCY; 568 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 569 pci_write_config(dev, PCIR_LATTIMER, data, 1); 570 } 571 572 /* 573 * Make sure we've disabled the ROM. 574 */ 575 data = pci_read_config(dev, PCIR_ROMADDR, 4); 576 data &= ~1; 577 pci_write_config(dev, PCIR_ROMADDR, data, 4); 578 579 iqd = 0; 580 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 581 1, RF_ACTIVE | RF_SHAREABLE); 582 if (irq == NULL) { 583 device_printf(dev, "could not allocate interrupt\n"); 584 goto bad; 585 } 586 587 tval = 0; 588 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 589 "fwload_disable", &tval) == 0 && tval != 0) { 590 isp->isp_confopts |= ISP_CFG_NORELOAD; 591 } 592 tval = 0; 593 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 594 "ignore_nvram", &tval) == 0 && tval != 0) { 595 isp->isp_confopts |= ISP_CFG_NONVRAM; 596 } 597 tval = 0; 598 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 599 "fullduplex", &tval) == 0 && tval != 0) { 600 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 601 } 602 #ifdef ISP_FW_CRASH_DUMP 603 tval = 0; 604 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 605 "fw_dump_enable", &tval) == 0 && tval != 0) { 606 size_t amt = 0; 607 if (IS_2200(isp)) { 608 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 609 } else if (IS_23XX(isp)) { 610 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 611 } 612 if (amt) { 613 FCPARAM(isp)->isp_dump_data = 614 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 615 } else { 616 device_printf(dev, 617 "f/w crash dumps not supported for this model\n"); 618 } 619 } 620 #endif 621 622 sptr = 0; 623 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 624 "topology", (const char **) &sptr) == 0 && sptr != 0) { 625 if (strcmp(sptr, "lport") == 0) { 626 isp->isp_confopts |= ISP_CFG_LPORT; 627 } else if (strcmp(sptr, "nport") == 0) { 628 isp->isp_confopts |= ISP_CFG_NPORT; 629 } else if (strcmp(sptr, "lport-only") == 0) { 630 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 631 } else if (strcmp(sptr, "nport-only") == 0) { 632 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 633 } 634 } 635 636 /* 637 * Because the resource_*_value functions can neither return 638 * 64 bit integer values, nor can they be directly coerced 639 * to interpret the right hand side of the assignment as 640 * you want them to interpret it, we have to force WWN 641 * hint replacement to specify WWN strings with a leading 642 * 'w' (e..g w50000000aaaa0001). Sigh. 643 */ 644 sptr = 0; 645 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 646 "portwwn", (const char **) &sptr); 647 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 648 char *eptr = 0; 649 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 650 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 651 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 652 isp->isp_osinfo.default_port_wwn = 0; 653 } else { 654 isp->isp_confopts |= ISP_CFG_OWNWWPN; 655 } 656 } 657 if (isp->isp_osinfo.default_port_wwn == 0) { 658 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 659 } 660 661 sptr = 0; 662 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 663 "nodewwn", (const char **) &sptr); 664 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 665 char *eptr = 0; 666 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 667 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 668 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 669 isp->isp_osinfo.default_node_wwn = 0; 670 } else { 671 isp->isp_confopts |= ISP_CFG_OWNWWNN; 672 } 673 } 674 if (isp->isp_osinfo.default_node_wwn == 0) { 675 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 676 } 677 678 isp_debug = 0; 679 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 680 "debug", &isp_debug); 681 682 /* Make sure the lock is set up. */ 683 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 684 locksetup++; 685 686 #ifdef ISP_SMPLOCK 687 #define INTR_FLAGS INTR_TYPE_CAM | INTR_MPSAFE | INTR_ENTROPY 688 #else 689 #define INTR_FLAGS INTR_TYPE_CAM | INTR_ENTROPY 690 #endif 691 if (bus_setup_intr(dev, irq, INTR_FLAGS, isp_pci_intr, isp, &pcs->ih)) { 692 device_printf(dev, "could not setup interrupt\n"); 693 goto bad; 694 } 695 696 /* 697 * Set up logging levels. 698 */ 699 if (isp_debug) { 700 isp->isp_dblev = isp_debug; 701 } else { 702 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 703 } 704 if (bootverbose) 705 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 706 707 /* 708 * Last minute checks... 709 */ 710 if (IS_2312(isp)) { 711 isp->isp_port = pci_get_function(dev); 712 } 713 714 /* 715 * Make sure we're in reset state. 716 */ 717 ISP_LOCK(isp); 718 isp_reset(isp); 719 if (isp->isp_state != ISP_RESETSTATE) { 720 ISP_UNLOCK(isp); 721 goto bad; 722 } 723 isp_init(isp); 724 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 725 isp_uninit(isp); 726 ISP_UNLOCK(isp); 727 goto bad; 728 } 729 isp_attach(isp); 730 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 731 isp_uninit(isp); 732 ISP_UNLOCK(isp); 733 goto bad; 734 } 735 /* 736 * XXXX: Here is where we might unload the f/w module 737 * XXXX: (or decrease the reference count to it). 738 */ 739 ISP_UNLOCK(isp); 740 return (0); 741 742 bad: 743 744 if (pcs && pcs->ih) { 745 (void) bus_teardown_intr(dev, irq, pcs->ih); 746 } 747 748 if (locksetup && isp) { 749 mtx_destroy(&isp->isp_osinfo.lock); 750 } 751 752 if (irq) { 753 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 754 } 755 756 757 if (regs) { 758 (void) bus_release_resource(dev, rtp, rgd, regs); 759 } 760 761 if (pcs) { 762 if (pcs->pci_isp.isp_param) 763 free(pcs->pci_isp.isp_param, M_DEVBUF); 764 free(pcs, M_DEVBUF); 765 } 766 767 /* 768 * XXXX: Here is where we might unload the f/w module 769 * XXXX: (or decrease the reference count to it). 770 */ 771 return (ENXIO); 772 } 773 774 static void 775 isp_pci_intr(void *arg) 776 { 777 struct ispsoftc *isp = arg; 778 u_int16_t isr, sema, mbox; 779 780 ISP_LOCK(isp); 781 isp->isp_intcnt++; 782 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 783 isp->isp_intbogus++; 784 } else { 785 int iok = isp->isp_osinfo.intsok; 786 isp->isp_osinfo.intsok = 0; 787 isp_intr(isp, isr, sema, mbox); 788 isp->isp_osinfo.intsok = iok; 789 } 790 ISP_UNLOCK(isp); 791 } 792 793 794 #define IspVirt2Off(a, x) \ 795 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 796 _BLK_REG_SHFT] + ((x) & 0xff)) 797 798 #define BXR2(pcs, off) \ 799 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 800 #define BXW2(pcs, off, v) \ 801 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 802 803 804 static INLINE int 805 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 806 { 807 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 808 u_int16_t val0, val1; 809 int i = 0; 810 811 do { 812 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 813 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 814 } while (val0 != val1 && ++i < 1000); 815 if (val0 != val1) { 816 return (1); 817 } 818 *rp = val0; 819 return (0); 820 } 821 822 static int 823 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 824 u_int16_t *semap, u_int16_t *mbp) 825 { 826 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 827 u_int16_t isr, sema; 828 829 if (IS_2100(isp)) { 830 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 831 return (0); 832 } 833 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 834 return (0); 835 } 836 } else { 837 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 838 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 839 } 840 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 841 isr &= INT_PENDING_MASK(isp); 842 sema &= BIU_SEMA_LOCK; 843 if (isr == 0 && sema == 0) { 844 return (0); 845 } 846 *isrp = isr; 847 if ((*semap = sema) != 0) { 848 if (IS_2100(isp)) { 849 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 850 return (0); 851 } 852 } else { 853 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 854 } 855 } 856 return (1); 857 } 858 859 static int 860 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 861 u_int16_t *semap, u_int16_t *mbox0p) 862 { 863 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 864 u_int32_t r2hisr; 865 866 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 867 *isrp = 0; 868 return (0); 869 } 870 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 871 IspVirt2Off(pcs, BIU_R2HSTSLO)); 872 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 873 if ((r2hisr & BIU_R2HST_INTR) == 0) { 874 *isrp = 0; 875 return (0); 876 } 877 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 878 case ISPR2HST_ROM_MBX_OK: 879 case ISPR2HST_ROM_MBX_FAIL: 880 case ISPR2HST_MBX_OK: 881 case ISPR2HST_MBX_FAIL: 882 case ISPR2HST_ASYNC_EVENT: 883 case ISPR2HST_RIO_16: 884 case ISPR2HST_FPOST: 885 case ISPR2HST_FPOST_CTIO: 886 *isrp = r2hisr & 0xffff; 887 *mbox0p = (r2hisr >> 16); 888 *semap = 1; 889 return (1); 890 case ISPR2HST_RSPQ_UPDATE: 891 *isrp = r2hisr & 0xffff; 892 *mbox0p = 0; 893 *semap = 0; 894 return (1); 895 default: 896 return (0); 897 } 898 } 899 900 static u_int16_t 901 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 902 { 903 u_int16_t rv; 904 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 905 int oldconf = 0; 906 907 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 908 /* 909 * We will assume that someone has paused the RISC processor. 910 */ 911 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 912 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 913 oldconf | BIU_PCI_CONF1_SXP); 914 } 915 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 916 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 917 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 918 } 919 return (rv); 920 } 921 922 static void 923 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 924 { 925 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 926 int oldconf = 0; 927 928 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 929 /* 930 * We will assume that someone has paused the RISC processor. 931 */ 932 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 933 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 934 oldconf | BIU_PCI_CONF1_SXP); 935 } 936 BXW2(pcs, IspVirt2Off(isp, regoff), val); 937 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 938 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 939 } 940 } 941 942 static u_int16_t 943 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 944 { 945 u_int16_t rv, oc = 0; 946 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 947 948 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 949 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 950 u_int16_t tc; 951 /* 952 * We will assume that someone has paused the RISC processor. 953 */ 954 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 955 tc = oc & ~BIU_PCI1080_CONF1_DMA; 956 if (regoff & SXP_BANK1_SELECT) 957 tc |= BIU_PCI1080_CONF1_SXP1; 958 else 959 tc |= BIU_PCI1080_CONF1_SXP0; 960 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 961 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 962 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 963 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 964 oc | BIU_PCI1080_CONF1_DMA); 965 } 966 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 967 if (oc) { 968 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 969 } 970 return (rv); 971 } 972 973 static void 974 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 975 { 976 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 977 int oc = 0; 978 979 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 980 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 981 u_int16_t tc; 982 /* 983 * We will assume that someone has paused the RISC processor. 984 */ 985 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 986 tc = oc & ~BIU_PCI1080_CONF1_DMA; 987 if (regoff & SXP_BANK1_SELECT) 988 tc |= BIU_PCI1080_CONF1_SXP1; 989 else 990 tc |= BIU_PCI1080_CONF1_SXP0; 991 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 992 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 993 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 994 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 995 oc | BIU_PCI1080_CONF1_DMA); 996 } 997 BXW2(pcs, IspVirt2Off(isp, regoff), val); 998 if (oc) { 999 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1000 } 1001 } 1002 1003 1004 struct imush { 1005 struct ispsoftc *isp; 1006 int error; 1007 }; 1008 1009 static void imc(void *, bus_dma_segment_t *, int, int); 1010 1011 static void 1012 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1013 { 1014 struct imush *imushp = (struct imush *) arg; 1015 if (error) { 1016 imushp->error = error; 1017 } else { 1018 struct ispsoftc *isp =imushp->isp; 1019 bus_addr_t addr = segs->ds_addr; 1020 1021 isp->isp_rquest_dma = addr; 1022 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1023 isp->isp_result_dma = addr; 1024 if (IS_FC(isp)) { 1025 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1026 FCPARAM(isp)->isp_scdma = addr; 1027 } 1028 } 1029 } 1030 1031 /* 1032 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1033 */ 1034 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1035 1036 static int 1037 isp_pci_mbxdma(struct ispsoftc *isp) 1038 { 1039 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1040 caddr_t base; 1041 u_int32_t len; 1042 int i, error, ns; 1043 bus_size_t bl; 1044 struct imush im; 1045 1046 /* 1047 * Already been here? If so, leave... 1048 */ 1049 if (isp->isp_rquest) { 1050 return (0); 1051 } 1052 1053 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1054 bl = BUS_SPACE_UNRESTRICTED; 1055 } else { 1056 bl = BUS_SPACE_MAXADDR_24BIT; 1057 } 1058 1059 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR, 1060 BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 1061 ISP_NSEGS, bl, 0, &pcs->dmat)) { 1062 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1063 return(1); 1064 } 1065 1066 1067 len = sizeof (XS_T **) * isp->isp_maxcmds; 1068 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1069 if (isp->isp_xflist == NULL) { 1070 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1071 return (1); 1072 } 1073 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1074 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1075 if (pcs->dmaps == NULL) { 1076 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1077 free(isp->isp_xflist, M_DEVBUF); 1078 return (1); 1079 } 1080 1081 /* 1082 * Allocate and map the request, result queues, plus FC scratch area. 1083 */ 1084 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1085 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1086 if (IS_FC(isp)) { 1087 len += ISP2100_SCRLEN; 1088 } 1089 1090 ns = (len / PAGE_SIZE) + 1; 1091 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, 0, BUS_SPACE_MAXADDR, 1092 BUS_SPACE_MAXADDR, NULL, NULL, len, ns, bl, 0, &isp->isp_cdmat)) { 1093 isp_prt(isp, ISP_LOGERR, 1094 "cannot create a dma tag for control spaces"); 1095 free(pcs->dmaps, M_DEVBUF); 1096 free(isp->isp_xflist, M_DEVBUF); 1097 return (1); 1098 } 1099 1100 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1101 &isp->isp_cdmap) != 0) { 1102 isp_prt(isp, ISP_LOGERR, 1103 "cannot allocate %d bytes of CCB memory", len); 1104 bus_dma_tag_destroy(isp->isp_cdmat); 1105 free(isp->isp_xflist, M_DEVBUF); 1106 free(pcs->dmaps, M_DEVBUF); 1107 return (1); 1108 } 1109 1110 for (i = 0; i < isp->isp_maxcmds; i++) { 1111 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1112 if (error) { 1113 isp_prt(isp, ISP_LOGERR, 1114 "error %d creating per-cmd DMA maps", error); 1115 while (--i >= 0) { 1116 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1117 } 1118 goto bad; 1119 } 1120 } 1121 1122 im.isp = isp; 1123 im.error = 0; 1124 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1125 if (im.error) { 1126 isp_prt(isp, ISP_LOGERR, 1127 "error %d loading dma map for control areas", im.error); 1128 goto bad; 1129 } 1130 1131 isp->isp_rquest = base; 1132 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1133 isp->isp_result = base; 1134 if (IS_FC(isp)) { 1135 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1136 FCPARAM(isp)->isp_scratch = base; 1137 } 1138 return (0); 1139 1140 bad: 1141 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1142 bus_dma_tag_destroy(isp->isp_cdmat); 1143 free(isp->isp_xflist, M_DEVBUF); 1144 free(pcs->dmaps, M_DEVBUF); 1145 isp->isp_rquest = NULL; 1146 return (1); 1147 } 1148 1149 typedef struct { 1150 struct ispsoftc *isp; 1151 void *cmd_token; 1152 void *rq; 1153 u_int16_t *nxtip; 1154 u_int16_t optr; 1155 u_int error; 1156 } mush_t; 1157 1158 #define MUSHERR_NOQENTRIES -2 1159 1160 #ifdef ISP_TARGET_MODE 1161 /* 1162 * We need to handle DMA for target mode differently from initiator mode. 1163 * 1164 * DMA mapping and construction and submission of CTIO Request Entries 1165 * and rendevous for completion are very tightly coupled because we start 1166 * out by knowing (per platform) how much data we have to move, but we 1167 * don't know, up front, how many DMA mapping segments will have to be used 1168 * cover that data, so we don't know how many CTIO Request Entries we 1169 * will end up using. Further, for performance reasons we may want to 1170 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1171 * 1172 * The standard vector still goes through isp_pci_dmasetup, but the callback 1173 * for the DMA mapping routines comes here instead with the whole transfer 1174 * mapped and a pointer to a partially filled in already allocated request 1175 * queue entry. We finish the job. 1176 */ 1177 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1178 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1179 1180 #define STATUS_WITH_DATA 1 1181 1182 static void 1183 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1184 { 1185 mush_t *mp; 1186 struct ccb_scsiio *csio; 1187 struct ispsoftc *isp; 1188 struct isp_pcisoftc *pcs; 1189 bus_dmamap_t *dp; 1190 ct_entry_t *cto, *qe; 1191 u_int8_t scsi_status; 1192 u_int16_t curi, nxti, handle; 1193 u_int32_t sflags; 1194 int32_t resid; 1195 int nth_ctio, nctios, send_status; 1196 1197 mp = (mush_t *) arg; 1198 if (error) { 1199 mp->error = error; 1200 return; 1201 } 1202 1203 isp = mp->isp; 1204 csio = mp->cmd_token; 1205 cto = mp->rq; 1206 curi = isp->isp_reqidx; 1207 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1208 1209 cto->ct_xfrlen = 0; 1210 cto->ct_seg_count = 0; 1211 cto->ct_header.rqs_entry_count = 1; 1212 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1213 1214 if (nseg == 0) { 1215 cto->ct_header.rqs_seqno = 1; 1216 isp_prt(isp, ISP_LOGTDEBUG1, 1217 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1218 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1219 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1220 cto->ct_scsi_status, cto->ct_resid); 1221 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1222 isp_put_ctio(isp, cto, qe); 1223 return; 1224 } 1225 1226 nctios = nseg / ISP_RQDSEG; 1227 if (nseg % ISP_RQDSEG) { 1228 nctios++; 1229 } 1230 1231 /* 1232 * Save syshandle, and potentially any SCSI status, which we'll 1233 * reinsert on the last CTIO we're going to send. 1234 */ 1235 1236 handle = cto->ct_syshandle; 1237 cto->ct_syshandle = 0; 1238 cto->ct_header.rqs_seqno = 0; 1239 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1240 1241 if (send_status) { 1242 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1243 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1244 /* 1245 * Preserve residual. 1246 */ 1247 resid = cto->ct_resid; 1248 1249 /* 1250 * Save actual SCSI status. 1251 */ 1252 scsi_status = cto->ct_scsi_status; 1253 1254 #ifndef STATUS_WITH_DATA 1255 sflags |= CT_NO_DATA; 1256 /* 1257 * We can't do a status at the same time as a data CTIO, so 1258 * we need to synthesize an extra CTIO at this level. 1259 */ 1260 nctios++; 1261 #endif 1262 } else { 1263 sflags = scsi_status = resid = 0; 1264 } 1265 1266 cto->ct_resid = 0; 1267 cto->ct_scsi_status = 0; 1268 1269 pcs = (struct isp_pcisoftc *)isp; 1270 dp = &pcs->dmaps[isp_handle_index(handle)]; 1271 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1272 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1273 } else { 1274 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1275 } 1276 1277 nxti = *mp->nxtip; 1278 1279 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1280 int seglim; 1281 1282 seglim = nseg; 1283 if (seglim) { 1284 int seg; 1285 1286 if (seglim > ISP_RQDSEG) 1287 seglim = ISP_RQDSEG; 1288 1289 for (seg = 0; seg < seglim; seg++, nseg--) { 1290 /* 1291 * Unlike normal initiator commands, we don't 1292 * do any swizzling here. 1293 */ 1294 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1295 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1296 cto->ct_xfrlen += dm_segs->ds_len; 1297 dm_segs++; 1298 } 1299 cto->ct_seg_count = seg; 1300 } else { 1301 /* 1302 * This case should only happen when we're sending an 1303 * extra CTIO with final status. 1304 */ 1305 if (send_status == 0) { 1306 isp_prt(isp, ISP_LOGWARN, 1307 "tdma_mk ran out of segments"); 1308 mp->error = EINVAL; 1309 return; 1310 } 1311 } 1312 1313 /* 1314 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1315 * ct_tagtype, and ct_timeout have been carried over 1316 * unchanged from what our caller had set. 1317 * 1318 * The dataseg fields and the seg_count fields we just got 1319 * through setting. The data direction we've preserved all 1320 * along and only clear it if we're now sending status. 1321 */ 1322 1323 if (nth_ctio == nctios - 1) { 1324 /* 1325 * We're the last in a sequence of CTIOs, so mark 1326 * this CTIO and save the handle to the CCB such that 1327 * when this CTIO completes we can free dma resources 1328 * and do whatever else we need to do to finish the 1329 * rest of the command. We *don't* give this to the 1330 * firmware to work on- the caller will do that. 1331 */ 1332 1333 cto->ct_syshandle = handle; 1334 cto->ct_header.rqs_seqno = 1; 1335 1336 if (send_status) { 1337 cto->ct_scsi_status = scsi_status; 1338 cto->ct_flags |= sflags; 1339 cto->ct_resid = resid; 1340 } 1341 if (send_status) { 1342 isp_prt(isp, ISP_LOGTDEBUG1, 1343 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1344 "scsi status %x resid %d", 1345 cto->ct_fwhandle, csio->ccb_h.target_lun, 1346 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1347 cto->ct_scsi_status, cto->ct_resid); 1348 } else { 1349 isp_prt(isp, ISP_LOGTDEBUG1, 1350 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1351 cto->ct_fwhandle, csio->ccb_h.target_lun, 1352 cto->ct_iid, cto->ct_tag_val, 1353 cto->ct_flags); 1354 } 1355 isp_put_ctio(isp, cto, qe); 1356 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1357 if (nctios > 1) { 1358 MEMORYBARRIER(isp, SYNC_REQUEST, 1359 curi, QENTRY_LEN); 1360 } 1361 } else { 1362 ct_entry_t *oqe = qe; 1363 1364 /* 1365 * Make sure syshandle fields are clean 1366 */ 1367 cto->ct_syshandle = 0; 1368 cto->ct_header.rqs_seqno = 0; 1369 1370 isp_prt(isp, ISP_LOGTDEBUG1, 1371 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1372 cto->ct_fwhandle, csio->ccb_h.target_lun, 1373 cto->ct_iid, cto->ct_flags); 1374 1375 /* 1376 * Get a new CTIO 1377 */ 1378 qe = (ct_entry_t *) 1379 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1380 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1381 if (nxti == mp->optr) { 1382 isp_prt(isp, ISP_LOGTDEBUG0, 1383 "Queue Overflow in tdma_mk"); 1384 mp->error = MUSHERR_NOQENTRIES; 1385 return; 1386 } 1387 1388 /* 1389 * Now that we're done with the old CTIO, 1390 * flush it out to the request queue. 1391 */ 1392 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1393 isp_put_ctio(isp, cto, oqe); 1394 if (nth_ctio != 0) { 1395 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1396 QENTRY_LEN); 1397 } 1398 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1399 1400 /* 1401 * Reset some fields in the CTIO so we can reuse 1402 * for the next one we'll flush to the request 1403 * queue. 1404 */ 1405 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1406 cto->ct_header.rqs_entry_count = 1; 1407 cto->ct_header.rqs_flags = 0; 1408 cto->ct_status = 0; 1409 cto->ct_scsi_status = 0; 1410 cto->ct_xfrlen = 0; 1411 cto->ct_resid = 0; 1412 cto->ct_seg_count = 0; 1413 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1414 } 1415 } 1416 *mp->nxtip = nxti; 1417 } 1418 1419 /* 1420 * We don't have to do multiple CTIOs here. Instead, we can just do 1421 * continuation segments as needed. This greatly simplifies the code 1422 * improves performance. 1423 */ 1424 1425 static void 1426 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1427 { 1428 mush_t *mp; 1429 struct ccb_scsiio *csio; 1430 struct ispsoftc *isp; 1431 ct2_entry_t *cto, *qe; 1432 u_int16_t curi, nxti; 1433 int segcnt; 1434 1435 mp = (mush_t *) arg; 1436 if (error) { 1437 mp->error = error; 1438 return; 1439 } 1440 1441 isp = mp->isp; 1442 csio = mp->cmd_token; 1443 cto = mp->rq; 1444 1445 curi = isp->isp_reqidx; 1446 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1447 1448 if (nseg == 0) { 1449 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1450 isp_prt(isp, ISP_LOGWARN, 1451 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1452 "set (0x%x)", cto->ct_flags); 1453 mp->error = EINVAL; 1454 return; 1455 } 1456 /* 1457 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1458 * flags to NO DATA and clear relative offset flags. 1459 * We preserve the ct_resid and the response area. 1460 */ 1461 cto->ct_header.rqs_seqno = 1; 1462 cto->ct_seg_count = 0; 1463 cto->ct_reloff = 0; 1464 isp_prt(isp, ISP_LOGTDEBUG1, 1465 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1466 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1467 cto->ct_iid, cto->ct_flags, cto->ct_status, 1468 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1469 isp_put_ctio2(isp, cto, qe); 1470 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1471 return; 1472 } 1473 1474 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1475 isp_prt(isp, ISP_LOGERR, 1476 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1477 "(0x%x)", cto->ct_flags); 1478 mp->error = EINVAL; 1479 return; 1480 } 1481 1482 1483 nxti = *mp->nxtip; 1484 1485 /* 1486 * Set up the CTIO2 data segments. 1487 */ 1488 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1489 cto->ct_seg_count++, segcnt++) { 1490 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1491 dm_segs[segcnt].ds_addr; 1492 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1493 dm_segs[segcnt].ds_len; 1494 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1495 isp_prt(isp, ISP_LOGTDEBUG1, "isp_send_ctio2: ent0[%d]0x%x:%d", 1496 cto->ct_seg_count, dm_segs[segcnt].ds_addr, 1497 dm_segs[segcnt].ds_len); 1498 } 1499 1500 while (segcnt < nseg) { 1501 u_int16_t curip; 1502 int seg; 1503 ispcontreq_t local, *crq = &local, *qep; 1504 1505 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1506 curip = nxti; 1507 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1508 if (nxti == mp->optr) { 1509 ISP_UNLOCK(isp); 1510 isp_prt(isp, ISP_LOGTDEBUG0, 1511 "tdma_mkfc: request queue overflow"); 1512 mp->error = MUSHERR_NOQENTRIES; 1513 return; 1514 } 1515 cto->ct_header.rqs_entry_count++; 1516 MEMZERO((void *)crq, sizeof (*crq)); 1517 crq->req_header.rqs_entry_count = 1; 1518 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1519 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1520 segcnt++, seg++) { 1521 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1522 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1523 isp_prt(isp, ISP_LOGTDEBUG1, 1524 "isp_send_ctio2: ent%d[%d]%x:%u", 1525 cto->ct_header.rqs_entry_count-1, seg, 1526 dm_segs[segcnt].ds_addr, dm_segs[segcnt].ds_len); 1527 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1528 cto->ct_seg_count++; 1529 } 1530 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1531 isp_put_cont_req(isp, crq, qep); 1532 ISP_TDQE(isp, "cont entry", curi, qep); 1533 } 1534 1535 /* 1536 * No do final twiddling for the CTIO itself. 1537 */ 1538 cto->ct_header.rqs_seqno = 1; 1539 isp_prt(isp, ISP_LOGTDEBUG1, 1540 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1541 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1542 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1543 cto->ct_resid); 1544 isp_put_ctio2(isp, cto, qe); 1545 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1546 *mp->nxtip = nxti; 1547 } 1548 #endif 1549 1550 static void dma2(void *, bus_dma_segment_t *, int, int); 1551 1552 static void 1553 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1554 { 1555 mush_t *mp; 1556 struct ispsoftc *isp; 1557 struct ccb_scsiio *csio; 1558 struct isp_pcisoftc *pcs; 1559 bus_dmamap_t *dp; 1560 bus_dma_segment_t *eseg; 1561 ispreq_t *rq; 1562 int seglim, datalen; 1563 u_int16_t nxti; 1564 1565 mp = (mush_t *) arg; 1566 if (error) { 1567 mp->error = error; 1568 return; 1569 } 1570 1571 if (nseg < 1) { 1572 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1573 mp->error = EFAULT; 1574 return; 1575 } 1576 csio = mp->cmd_token; 1577 isp = mp->isp; 1578 rq = mp->rq; 1579 pcs = (struct isp_pcisoftc *)mp->isp; 1580 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1581 nxti = *mp->nxtip; 1582 1583 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1584 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1585 } else { 1586 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1587 } 1588 1589 datalen = XS_XFRLEN(csio); 1590 1591 /* 1592 * We're passed an initial partially filled in entry that 1593 * has most fields filled in except for data transfer 1594 * related values. 1595 * 1596 * Our job is to fill in the initial request queue entry and 1597 * then to start allocating and filling in continuation entries 1598 * until we've covered the entire transfer. 1599 */ 1600 1601 if (IS_FC(isp)) { 1602 seglim = ISP_RQDSEG_T2; 1603 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1604 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1605 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1606 } else { 1607 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1608 } 1609 } else { 1610 if (csio->cdb_len > 12) { 1611 seglim = 0; 1612 } else { 1613 seglim = ISP_RQDSEG; 1614 } 1615 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1616 rq->req_flags |= REQFLAG_DATA_IN; 1617 } else { 1618 rq->req_flags |= REQFLAG_DATA_OUT; 1619 } 1620 } 1621 1622 eseg = dm_segs + nseg; 1623 1624 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1625 if (IS_FC(isp)) { 1626 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1627 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1628 dm_segs->ds_addr; 1629 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1630 dm_segs->ds_len; 1631 } else { 1632 rq->req_dataseg[rq->req_seg_count].ds_base = 1633 dm_segs->ds_addr; 1634 rq->req_dataseg[rq->req_seg_count].ds_count = 1635 dm_segs->ds_len; 1636 } 1637 datalen -= dm_segs->ds_len; 1638 rq->req_seg_count++; 1639 dm_segs++; 1640 } 1641 1642 while (datalen > 0 && dm_segs != eseg) { 1643 u_int16_t onxti; 1644 ispcontreq_t local, *crq = &local, *cqe; 1645 1646 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1647 onxti = nxti; 1648 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1649 if (nxti == mp->optr) { 1650 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1651 mp->error = MUSHERR_NOQENTRIES; 1652 return; 1653 } 1654 rq->req_header.rqs_entry_count++; 1655 MEMZERO((void *)crq, sizeof (*crq)); 1656 crq->req_header.rqs_entry_count = 1; 1657 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1658 1659 seglim = 0; 1660 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1661 crq->req_dataseg[seglim].ds_base = 1662 dm_segs->ds_addr; 1663 crq->req_dataseg[seglim].ds_count = 1664 dm_segs->ds_len; 1665 rq->req_seg_count++; 1666 dm_segs++; 1667 seglim++; 1668 datalen -= dm_segs->ds_len; 1669 } 1670 isp_put_cont_req(isp, crq, cqe); 1671 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1672 } 1673 *mp->nxtip = nxti; 1674 } 1675 1676 static int 1677 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1678 u_int16_t *nxtip, u_int16_t optr) 1679 { 1680 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1681 ispreq_t *qep; 1682 bus_dmamap_t *dp = NULL; 1683 mush_t mush, *mp; 1684 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1685 1686 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1687 #ifdef ISP_TARGET_MODE 1688 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1689 if (IS_FC(isp)) { 1690 eptr = tdma_mkfc; 1691 } else { 1692 eptr = tdma_mk; 1693 } 1694 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1695 (csio->dxfer_len == 0)) { 1696 mp = &mush; 1697 mp->isp = isp; 1698 mp->cmd_token = csio; 1699 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1700 mp->nxtip = nxtip; 1701 mp->optr = optr; 1702 mp->error = 0; 1703 (*eptr)(mp, NULL, 0, 0); 1704 goto mbxsync; 1705 } 1706 } else 1707 #endif 1708 eptr = dma2; 1709 1710 1711 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1712 (csio->dxfer_len == 0)) { 1713 rq->req_seg_count = 1; 1714 goto mbxsync; 1715 } 1716 1717 /* 1718 * Do a virtual grapevine step to collect info for 1719 * the callback dma allocation that we have to use... 1720 */ 1721 mp = &mush; 1722 mp->isp = isp; 1723 mp->cmd_token = csio; 1724 mp->rq = rq; 1725 mp->nxtip = nxtip; 1726 mp->optr = optr; 1727 mp->error = 0; 1728 1729 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1730 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1731 int error, s; 1732 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1733 s = splsoftvm(); 1734 error = bus_dmamap_load(pcs->dmat, *dp, 1735 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1736 if (error == EINPROGRESS) { 1737 bus_dmamap_unload(pcs->dmat, *dp); 1738 mp->error = EINVAL; 1739 isp_prt(isp, ISP_LOGERR, 1740 "deferred dma allocation not supported"); 1741 } else if (error && mp->error == 0) { 1742 #ifdef DIAGNOSTIC 1743 isp_prt(isp, ISP_LOGERR, 1744 "error %d in dma mapping code", error); 1745 #endif 1746 mp->error = error; 1747 } 1748 splx(s); 1749 } else { 1750 /* Pointer to physical buffer */ 1751 struct bus_dma_segment seg; 1752 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1753 seg.ds_len = csio->dxfer_len; 1754 (*eptr)(mp, &seg, 1, 0); 1755 } 1756 } else { 1757 struct bus_dma_segment *segs; 1758 1759 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1760 isp_prt(isp, ISP_LOGERR, 1761 "Physical segment pointers unsupported"); 1762 mp->error = EINVAL; 1763 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1764 isp_prt(isp, ISP_LOGERR, 1765 "Virtual segment addresses unsupported"); 1766 mp->error = EINVAL; 1767 } else { 1768 /* Just use the segments provided */ 1769 segs = (struct bus_dma_segment *) csio->data_ptr; 1770 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1771 } 1772 } 1773 if (mp->error) { 1774 int retval = CMD_COMPLETE; 1775 if (mp->error == MUSHERR_NOQENTRIES) { 1776 retval = CMD_EAGAIN; 1777 } else if (mp->error == EFBIG) { 1778 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1779 } else if (mp->error == EINVAL) { 1780 XS_SETERR(csio, CAM_REQ_INVALID); 1781 } else { 1782 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1783 } 1784 return (retval); 1785 } 1786 mbxsync: 1787 switch (rq->req_header.rqs_entry_type) { 1788 case RQSTYPE_REQUEST: 1789 isp_put_request(isp, rq, qep); 1790 break; 1791 case RQSTYPE_CMDONLY: 1792 isp_put_extended_request(isp, (ispextreq_t *)rq, 1793 (ispextreq_t *)qep); 1794 break; 1795 case RQSTYPE_T2RQS: 1796 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 1797 break; 1798 } 1799 return (CMD_QUEUED); 1800 } 1801 1802 static void 1803 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 1804 { 1805 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1806 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 1807 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1808 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 1809 } else { 1810 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 1811 } 1812 bus_dmamap_unload(pcs->dmat, *dp); 1813 } 1814 1815 1816 static void 1817 isp_pci_reset1(struct ispsoftc *isp) 1818 { 1819 /* Make sure the BIOS is disabled */ 1820 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1821 /* and enable interrupts */ 1822 ENABLE_INTS(isp); 1823 } 1824 1825 static void 1826 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1827 { 1828 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1829 if (msg) 1830 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1831 else 1832 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1833 if (IS_SCSI(isp)) 1834 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1835 else 1836 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1837 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1838 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1839 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1840 1841 1842 if (IS_SCSI(isp)) { 1843 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1844 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1845 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1846 ISP_READ(isp, CDMA_FIFO_STS)); 1847 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1848 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1849 ISP_READ(isp, DDMA_FIFO_STS)); 1850 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1851 ISP_READ(isp, SXP_INTERRUPT), 1852 ISP_READ(isp, SXP_GROSS_ERR), 1853 ISP_READ(isp, SXP_PINS_CTRL)); 1854 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1855 } 1856 printf(" mbox regs: %x %x %x %x %x\n", 1857 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1858 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1859 ISP_READ(isp, OUTMAILBOX4)); 1860 printf(" PCI Status Command/Status=%x\n", 1861 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 1862 } 1863