1 /*- 2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/bus.h> 37 38 #include <dev/pci/pcireg.h> 39 #include <dev/pci/pcivar.h> 40 41 #include <machine/bus_memio.h> 42 #include <machine/bus_pio.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 48 #include <dev/isp/isp_freebsd.h> 49 50 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 51 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 52 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 53 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 54 static int 55 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 56 static int 57 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 58 static int isp_pci_mbxdma(struct ispsoftc *); 59 static int 60 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 61 static void 62 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 63 64 static void isp_pci_reset1(struct ispsoftc *); 65 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 66 67 static struct ispmdvec mdvec = { 68 isp_pci_rd_isr, 69 isp_pci_rd_reg, 70 isp_pci_wr_reg, 71 isp_pci_mbxdma, 72 isp_pci_dmasetup, 73 isp_pci_dmateardown, 74 NULL, 75 isp_pci_reset1, 76 isp_pci_dumpregs, 77 NULL, 78 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 79 }; 80 81 static struct ispmdvec mdvec_1080 = { 82 isp_pci_rd_isr, 83 isp_pci_rd_reg_1080, 84 isp_pci_wr_reg_1080, 85 isp_pci_mbxdma, 86 isp_pci_dmasetup, 87 isp_pci_dmateardown, 88 NULL, 89 isp_pci_reset1, 90 isp_pci_dumpregs, 91 NULL, 92 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 93 }; 94 95 static struct ispmdvec mdvec_12160 = { 96 isp_pci_rd_isr, 97 isp_pci_rd_reg_1080, 98 isp_pci_wr_reg_1080, 99 isp_pci_mbxdma, 100 isp_pci_dmasetup, 101 isp_pci_dmateardown, 102 NULL, 103 isp_pci_reset1, 104 isp_pci_dumpregs, 105 NULL, 106 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 107 }; 108 109 static struct ispmdvec mdvec_2100 = { 110 isp_pci_rd_isr, 111 isp_pci_rd_reg, 112 isp_pci_wr_reg, 113 isp_pci_mbxdma, 114 isp_pci_dmasetup, 115 isp_pci_dmateardown, 116 NULL, 117 isp_pci_reset1, 118 isp_pci_dumpregs 119 }; 120 121 static struct ispmdvec mdvec_2200 = { 122 isp_pci_rd_isr, 123 isp_pci_rd_reg, 124 isp_pci_wr_reg, 125 isp_pci_mbxdma, 126 isp_pci_dmasetup, 127 isp_pci_dmateardown, 128 NULL, 129 isp_pci_reset1, 130 isp_pci_dumpregs 131 }; 132 133 static struct ispmdvec mdvec_2300 = { 134 isp_pci_rd_isr_2300, 135 isp_pci_rd_reg, 136 isp_pci_wr_reg, 137 isp_pci_mbxdma, 138 isp_pci_dmasetup, 139 isp_pci_dmateardown, 140 NULL, 141 isp_pci_reset1, 142 isp_pci_dumpregs 143 }; 144 145 #ifndef PCIM_CMD_INVEN 146 #define PCIM_CMD_INVEN 0x10 147 #endif 148 #ifndef PCIM_CMD_BUSMASTEREN 149 #define PCIM_CMD_BUSMASTEREN 0x0004 150 #endif 151 #ifndef PCIM_CMD_PERRESPEN 152 #define PCIM_CMD_PERRESPEN 0x0040 153 #endif 154 #ifndef PCIM_CMD_SEREN 155 #define PCIM_CMD_SEREN 0x0100 156 #endif 157 158 #ifndef PCIR_COMMAND 159 #define PCIR_COMMAND 0x04 160 #endif 161 162 #ifndef PCIR_CACHELNSZ 163 #define PCIR_CACHELNSZ 0x0c 164 #endif 165 166 #ifndef PCIR_LATTIMER 167 #define PCIR_LATTIMER 0x0d 168 #endif 169 170 #ifndef PCIR_ROMADDR 171 #define PCIR_ROMADDR 0x30 172 #endif 173 174 #ifndef PCI_VENDOR_QLOGIC 175 #define PCI_VENDOR_QLOGIC 0x1077 176 #endif 177 178 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 179 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 180 #endif 181 182 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 183 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 184 #endif 185 186 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 187 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 188 #endif 189 190 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 191 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 192 #endif 193 194 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 195 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 196 #endif 197 198 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 199 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 200 #endif 201 202 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 203 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 204 #endif 205 206 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 207 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 208 #endif 209 210 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 211 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 212 #endif 213 214 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 215 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 216 #endif 217 218 #define PCI_QLOGIC_ISP1020 \ 219 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 220 221 #define PCI_QLOGIC_ISP1080 \ 222 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 223 224 #define PCI_QLOGIC_ISP10160 \ 225 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 226 227 #define PCI_QLOGIC_ISP12160 \ 228 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 229 230 #define PCI_QLOGIC_ISP1240 \ 231 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 232 233 #define PCI_QLOGIC_ISP1280 \ 234 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 235 236 #define PCI_QLOGIC_ISP2100 \ 237 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 238 239 #define PCI_QLOGIC_ISP2200 \ 240 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 241 242 #define PCI_QLOGIC_ISP2300 \ 243 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 244 245 #define PCI_QLOGIC_ISP2312 \ 246 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 247 248 /* 249 * Odd case for some AMI raid cards... We need to *not* attach to this. 250 */ 251 #define AMI_RAID_SUBVENDOR_ID 0x101e 252 253 #define IO_MAP_REG 0x10 254 #define MEM_MAP_REG 0x14 255 256 #define PCI_DFLT_LTNCY 0x40 257 #define PCI_DFLT_LNSZ 0x10 258 259 static int isp_pci_probe (device_t); 260 static int isp_pci_attach (device_t); 261 262 263 struct isp_pcisoftc { 264 struct ispsoftc pci_isp; 265 device_t pci_dev; 266 struct resource * pci_reg; 267 bus_space_tag_t pci_st; 268 bus_space_handle_t pci_sh; 269 void * ih; 270 int16_t pci_poff[_NREG_BLKS]; 271 bus_dma_tag_t dmat; 272 bus_dmamap_t *dmaps; 273 }; 274 extern ispfwfunc *isp_get_firmware_p; 275 276 static device_method_t isp_pci_methods[] = { 277 /* Device interface */ 278 DEVMETHOD(device_probe, isp_pci_probe), 279 DEVMETHOD(device_attach, isp_pci_attach), 280 { 0, 0 } 281 }; 282 static void isp_pci_intr(void *); 283 284 static driver_t isp_pci_driver = { 285 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 286 }; 287 static devclass_t isp_devclass; 288 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 289 290 static int 291 isp_pci_probe(device_t dev) 292 { 293 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 294 case PCI_QLOGIC_ISP1020: 295 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 296 break; 297 case PCI_QLOGIC_ISP1080: 298 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 299 break; 300 case PCI_QLOGIC_ISP1240: 301 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 302 break; 303 case PCI_QLOGIC_ISP1280: 304 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 305 break; 306 case PCI_QLOGIC_ISP10160: 307 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 308 break; 309 case PCI_QLOGIC_ISP12160: 310 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 311 return (ENXIO); 312 } 313 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 314 break; 315 case PCI_QLOGIC_ISP2100: 316 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 317 break; 318 case PCI_QLOGIC_ISP2200: 319 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 320 break; 321 case PCI_QLOGIC_ISP2300: 322 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 323 break; 324 case PCI_QLOGIC_ISP2312: 325 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 326 break; 327 default: 328 return (ENXIO); 329 } 330 if (isp_announced == 0 && bootverbose) { 331 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 332 "Core Version %d.%d\n", 333 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 334 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 335 isp_announced++; 336 } 337 /* 338 * XXXX: Here is where we might load the f/w module 339 * XXXX: (or increase a reference count to it). 340 */ 341 return (0); 342 } 343 344 static int 345 isp_pci_attach(device_t dev) 346 { 347 struct resource *regs, *irq; 348 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 349 u_int32_t data, cmd, linesz, psize, basetype; 350 struct isp_pcisoftc *pcs; 351 struct ispsoftc *isp = NULL; 352 struct ispmdvec *mdvp; 353 const char *sptr; 354 int locksetup = 0; 355 356 /* 357 * Figure out if we're supposed to skip this one. 358 */ 359 360 tval = 0; 361 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 362 "disable", &tval) == 0 && tval) { 363 device_printf(dev, "device is disabled\n"); 364 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 365 return (0); 366 } 367 368 role = -1; 369 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 370 "role", &role) == 0 && role != -1) { 371 role &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 372 device_printf(dev, "setting role to 0x%x\n", role); 373 } else { 374 #ifdef ISP_TARGET_MODE 375 role = ISP_ROLE_TARGET; 376 #else 377 role = ISP_DEFAULT_ROLES; 378 #endif 379 } 380 381 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 382 if (pcs == NULL) { 383 device_printf(dev, "cannot allocate softc\n"); 384 return (ENOMEM); 385 } 386 387 /* 388 * Figure out which we should try first - memory mapping or i/o mapping? 389 */ 390 #ifdef __alpha__ 391 m1 = PCIM_CMD_MEMEN; 392 m2 = PCIM_CMD_PORTEN; 393 #else 394 m1 = PCIM_CMD_PORTEN; 395 m2 = PCIM_CMD_MEMEN; 396 #endif 397 398 tval = 0; 399 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 400 "prefer_iomap", &tval) == 0 && tval != 0) { 401 m1 = PCIM_CMD_PORTEN; 402 m2 = PCIM_CMD_MEMEN; 403 } 404 tval = 0; 405 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 406 "prefer_memmap", &tval) == 0 && tval != 0) { 407 m1 = PCIM_CMD_MEMEN; 408 m2 = PCIM_CMD_PORTEN; 409 } 410 411 linesz = PCI_DFLT_LNSZ; 412 irq = regs = NULL; 413 rgd = rtp = iqd = 0; 414 415 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 416 if (cmd & m1) { 417 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 418 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 419 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 420 } 421 if (regs == NULL && (cmd & m2)) { 422 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 423 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 424 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 425 } 426 if (regs == NULL) { 427 device_printf(dev, "unable to map any ports\n"); 428 goto bad; 429 } 430 if (bootverbose) 431 device_printf(dev, "using %s space register mapping\n", 432 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 433 pcs->pci_dev = dev; 434 pcs->pci_reg = regs; 435 pcs->pci_st = rman_get_bustag(regs); 436 pcs->pci_sh = rman_get_bushandle(regs); 437 438 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 439 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 440 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 441 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 442 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 443 mdvp = &mdvec; 444 basetype = ISP_HA_SCSI_UNKNOWN; 445 psize = sizeof (sdparam); 446 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 447 mdvp = &mdvec; 448 basetype = ISP_HA_SCSI_UNKNOWN; 449 psize = sizeof (sdparam); 450 } 451 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 452 mdvp = &mdvec_1080; 453 basetype = ISP_HA_SCSI_1080; 454 psize = sizeof (sdparam); 455 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 456 ISP1080_DMA_REGS_OFF; 457 } 458 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 459 mdvp = &mdvec_1080; 460 basetype = ISP_HA_SCSI_1240; 461 psize = 2 * sizeof (sdparam); 462 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 463 ISP1080_DMA_REGS_OFF; 464 } 465 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 466 mdvp = &mdvec_1080; 467 basetype = ISP_HA_SCSI_1280; 468 psize = 2 * sizeof (sdparam); 469 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 470 ISP1080_DMA_REGS_OFF; 471 } 472 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 473 mdvp = &mdvec_12160; 474 basetype = ISP_HA_SCSI_10160; 475 psize = sizeof (sdparam); 476 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 477 ISP1080_DMA_REGS_OFF; 478 } 479 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 480 mdvp = &mdvec_12160; 481 basetype = ISP_HA_SCSI_12160; 482 psize = 2 * sizeof (sdparam); 483 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 484 ISP1080_DMA_REGS_OFF; 485 } 486 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 487 mdvp = &mdvec_2100; 488 basetype = ISP_HA_FC_2100; 489 psize = sizeof (fcparam); 490 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 491 PCI_MBOX_REGS2100_OFF; 492 if (pci_get_revid(dev) < 3) { 493 /* 494 * XXX: Need to get the actual revision 495 * XXX: number of the 2100 FB. At any rate, 496 * XXX: lower cache line size for early revision 497 * XXX; boards. 498 */ 499 linesz = 1; 500 } 501 } 502 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 503 mdvp = &mdvec_2200; 504 basetype = ISP_HA_FC_2200; 505 psize = sizeof (fcparam); 506 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 507 PCI_MBOX_REGS2100_OFF; 508 } 509 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 510 mdvp = &mdvec_2300; 511 basetype = ISP_HA_FC_2300; 512 psize = sizeof (fcparam); 513 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 514 PCI_MBOX_REGS2300_OFF; 515 } 516 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 517 mdvp = &mdvec_2300; 518 basetype = ISP_HA_FC_2312; 519 psize = sizeof (fcparam); 520 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 521 PCI_MBOX_REGS2300_OFF; 522 } 523 isp = &pcs->pci_isp; 524 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 525 if (isp->isp_param == NULL) { 526 device_printf(dev, "cannot allocate parameter data\n"); 527 goto bad; 528 } 529 isp->isp_mdvec = mdvp; 530 isp->isp_type = basetype; 531 isp->isp_revision = pci_get_revid(dev); 532 isp->isp_role = role; 533 isp->isp_dev = dev; 534 535 /* 536 * Try and find firmware for this device. 537 */ 538 539 if (isp_get_firmware_p) { 540 int device = (int) pci_get_device(dev); 541 #ifdef ISP_TARGET_MODE 542 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 543 #else 544 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 545 #endif 546 } 547 548 /* 549 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 550 * are set. 551 */ 552 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 553 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 554 if (IS_2300(isp)) { /* per QLogic errata */ 555 cmd &= ~PCIM_CMD_INVEN; 556 } 557 if (IS_23XX(isp)) { 558 /* 559 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 560 */ 561 isp->isp_touched = 1; 562 563 } 564 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 565 566 /* 567 * Make sure the Cache Line Size register is set sensibly. 568 */ 569 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 570 if (data != linesz) { 571 data = PCI_DFLT_LNSZ; 572 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 573 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 574 } 575 576 /* 577 * Make sure the Latency Timer is sane. 578 */ 579 data = pci_read_config(dev, PCIR_LATTIMER, 1); 580 if (data < PCI_DFLT_LTNCY) { 581 data = PCI_DFLT_LTNCY; 582 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 583 pci_write_config(dev, PCIR_LATTIMER, data, 1); 584 } 585 586 /* 587 * Make sure we've disabled the ROM. 588 */ 589 data = pci_read_config(dev, PCIR_ROMADDR, 4); 590 data &= ~1; 591 pci_write_config(dev, PCIR_ROMADDR, data, 4); 592 593 iqd = 0; 594 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 595 RF_ACTIVE | RF_SHAREABLE); 596 if (irq == NULL) { 597 device_printf(dev, "could not allocate interrupt\n"); 598 goto bad; 599 } 600 601 tval = 0; 602 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 603 "fwload_disable", &tval) == 0 && tval != 0) { 604 isp->isp_confopts |= ISP_CFG_NORELOAD; 605 } 606 tval = 0; 607 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 608 "ignore_nvram", &tval) == 0 && tval != 0) { 609 isp->isp_confopts |= ISP_CFG_NONVRAM; 610 } 611 tval = 0; 612 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 613 "fullduplex", &tval) == 0 && tval != 0) { 614 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 615 } 616 #ifdef ISP_FW_CRASH_DUMP 617 tval = 0; 618 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 619 "fw_dump_enable", &tval) == 0 && tval != 0) { 620 size_t amt = 0; 621 if (IS_2200(isp)) { 622 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 623 } else if (IS_23XX(isp)) { 624 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 625 } 626 if (amt) { 627 FCPARAM(isp)->isp_dump_data = 628 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 629 } else { 630 device_printf(dev, 631 "f/w crash dumps not supported for this model\n"); 632 } 633 } 634 #endif 635 636 sptr = 0; 637 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 638 "topology", (const char **) &sptr) == 0 && sptr != 0) { 639 if (strcmp(sptr, "lport") == 0) { 640 isp->isp_confopts |= ISP_CFG_LPORT; 641 } else if (strcmp(sptr, "nport") == 0) { 642 isp->isp_confopts |= ISP_CFG_NPORT; 643 } else if (strcmp(sptr, "lport-only") == 0) { 644 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 645 } else if (strcmp(sptr, "nport-only") == 0) { 646 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 647 } 648 } 649 650 /* 651 * Because the resource_*_value functions can neither return 652 * 64 bit integer values, nor can they be directly coerced 653 * to interpret the right hand side of the assignment as 654 * you want them to interpret it, we have to force WWN 655 * hint replacement to specify WWN strings with a leading 656 * 'w' (e..g w50000000aaaa0001). Sigh. 657 */ 658 sptr = 0; 659 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 660 "portwwn", (const char **) &sptr); 661 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 662 char *eptr = 0; 663 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 664 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 665 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 666 isp->isp_osinfo.default_port_wwn = 0; 667 } else { 668 isp->isp_confopts |= ISP_CFG_OWNWWPN; 669 } 670 } 671 if (isp->isp_osinfo.default_port_wwn == 0) { 672 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 673 } 674 675 sptr = 0; 676 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 677 "nodewwn", (const char **) &sptr); 678 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 679 char *eptr = 0; 680 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 681 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 682 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 683 isp->isp_osinfo.default_node_wwn = 0; 684 } else { 685 isp->isp_confopts |= ISP_CFG_OWNWWNN; 686 } 687 } 688 if (isp->isp_osinfo.default_node_wwn == 0) { 689 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 690 } 691 692 isp->isp_osinfo.default_id = -1; 693 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 694 "iid", &tval) == 0) { 695 isp->isp_osinfo.default_id = tval; 696 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 697 } 698 if (isp->isp_osinfo.default_id == -1) { 699 if (IS_FC(isp)) { 700 isp->isp_osinfo.default_id = 109; 701 } else { 702 isp->isp_osinfo.default_id = 7; 703 } 704 } 705 706 isp_debug = 0; 707 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 708 "debug", &isp_debug); 709 710 /* Make sure the lock is set up. */ 711 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 712 locksetup++; 713 714 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 715 device_printf(dev, "could not setup interrupt\n"); 716 goto bad; 717 } 718 719 /* 720 * Set up logging levels. 721 */ 722 if (isp_debug) { 723 isp->isp_dblev = isp_debug; 724 } else { 725 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 726 } 727 if (bootverbose) 728 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 729 730 /* 731 * Last minute checks... 732 */ 733 if (IS_2312(isp)) { 734 isp->isp_port = pci_get_function(dev); 735 } 736 737 /* 738 * Make sure we're in reset state. 739 */ 740 ISP_LOCK(isp); 741 isp_reset(isp); 742 if (isp->isp_state != ISP_RESETSTATE) { 743 ISP_UNLOCK(isp); 744 goto bad; 745 } 746 isp_init(isp); 747 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 748 isp_uninit(isp); 749 ISP_UNLOCK(isp); 750 goto bad; 751 } 752 isp_attach(isp); 753 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 754 isp_uninit(isp); 755 ISP_UNLOCK(isp); 756 goto bad; 757 } 758 /* 759 * XXXX: Here is where we might unload the f/w module 760 * XXXX: (or decrease the reference count to it). 761 */ 762 ISP_UNLOCK(isp); 763 return (0); 764 765 bad: 766 767 if (pcs && pcs->ih) { 768 (void) bus_teardown_intr(dev, irq, pcs->ih); 769 } 770 771 if (locksetup && isp) { 772 mtx_destroy(&isp->isp_osinfo.lock); 773 } 774 775 if (irq) { 776 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 777 } 778 779 780 if (regs) { 781 (void) bus_release_resource(dev, rtp, rgd, regs); 782 } 783 784 if (pcs) { 785 if (pcs->pci_isp.isp_param) 786 free(pcs->pci_isp.isp_param, M_DEVBUF); 787 free(pcs, M_DEVBUF); 788 } 789 790 /* 791 * XXXX: Here is where we might unload the f/w module 792 * XXXX: (or decrease the reference count to it). 793 */ 794 return (ENXIO); 795 } 796 797 static void 798 isp_pci_intr(void *arg) 799 { 800 struct ispsoftc *isp = arg; 801 u_int16_t isr, sema, mbox; 802 803 ISP_LOCK(isp); 804 isp->isp_intcnt++; 805 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 806 isp->isp_intbogus++; 807 } else { 808 int iok = isp->isp_osinfo.intsok; 809 isp->isp_osinfo.intsok = 0; 810 isp_intr(isp, isr, sema, mbox); 811 isp->isp_osinfo.intsok = iok; 812 } 813 ISP_UNLOCK(isp); 814 } 815 816 817 #define IspVirt2Off(a, x) \ 818 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 819 _BLK_REG_SHFT] + ((x) & 0xff)) 820 821 #define BXR2(pcs, off) \ 822 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 823 #define BXW2(pcs, off, v) \ 824 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 825 826 827 static INLINE int 828 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 829 { 830 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 831 u_int16_t val0, val1; 832 int i = 0; 833 834 do { 835 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 836 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 837 } while (val0 != val1 && ++i < 1000); 838 if (val0 != val1) { 839 return (1); 840 } 841 *rp = val0; 842 return (0); 843 } 844 845 static int 846 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 847 u_int16_t *semap, u_int16_t *mbp) 848 { 849 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 850 u_int16_t isr, sema; 851 852 if (IS_2100(isp)) { 853 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 854 return (0); 855 } 856 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 857 return (0); 858 } 859 } else { 860 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 861 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 862 } 863 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 864 isr &= INT_PENDING_MASK(isp); 865 sema &= BIU_SEMA_LOCK; 866 if (isr == 0 && sema == 0) { 867 return (0); 868 } 869 *isrp = isr; 870 if ((*semap = sema) != 0) { 871 if (IS_2100(isp)) { 872 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 873 return (0); 874 } 875 } else { 876 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 877 } 878 } 879 return (1); 880 } 881 882 static int 883 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 884 u_int16_t *semap, u_int16_t *mbox0p) 885 { 886 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 887 u_int32_t r2hisr; 888 889 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 890 *isrp = 0; 891 return (0); 892 } 893 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 894 IspVirt2Off(pcs, BIU_R2HSTSLO)); 895 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 896 if ((r2hisr & BIU_R2HST_INTR) == 0) { 897 *isrp = 0; 898 return (0); 899 } 900 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 901 case ISPR2HST_ROM_MBX_OK: 902 case ISPR2HST_ROM_MBX_FAIL: 903 case ISPR2HST_MBX_OK: 904 case ISPR2HST_MBX_FAIL: 905 case ISPR2HST_ASYNC_EVENT: 906 *isrp = r2hisr & 0xffff; 907 *mbox0p = (r2hisr >> 16); 908 *semap = 1; 909 return (1); 910 case ISPR2HST_RIO_16: 911 *isrp = r2hisr & 0xffff; 912 *mbox0p = ASYNC_RIO1; 913 *semap = 1; 914 return (1); 915 case ISPR2HST_FPOST: 916 *isrp = r2hisr & 0xffff; 917 *mbox0p = ASYNC_CMD_CMPLT; 918 *semap = 1; 919 return (1); 920 case ISPR2HST_FPOST_CTIO: 921 *isrp = r2hisr & 0xffff; 922 *mbox0p = ASYNC_CTIO_DONE; 923 *semap = 1; 924 return (1); 925 case ISPR2HST_RSPQ_UPDATE: 926 *isrp = r2hisr & 0xffff; 927 *mbox0p = 0; 928 *semap = 0; 929 return (1); 930 default: 931 return (0); 932 } 933 } 934 935 static u_int16_t 936 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 937 { 938 u_int16_t rv; 939 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 940 int oldconf = 0; 941 942 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 943 /* 944 * We will assume that someone has paused the RISC processor. 945 */ 946 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 947 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 948 oldconf | BIU_PCI_CONF1_SXP); 949 } 950 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 951 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 952 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 953 } 954 return (rv); 955 } 956 957 static void 958 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 959 { 960 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 961 int oldconf = 0; 962 963 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 964 /* 965 * We will assume that someone has paused the RISC processor. 966 */ 967 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 968 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 969 oldconf | BIU_PCI_CONF1_SXP); 970 } 971 BXW2(pcs, IspVirt2Off(isp, regoff), val); 972 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 973 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 974 } 975 } 976 977 static u_int16_t 978 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 979 { 980 u_int16_t rv, oc = 0; 981 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 982 983 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 984 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 985 u_int16_t tc; 986 /* 987 * We will assume that someone has paused the RISC processor. 988 */ 989 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 990 tc = oc & ~BIU_PCI1080_CONF1_DMA; 991 if (regoff & SXP_BANK1_SELECT) 992 tc |= BIU_PCI1080_CONF1_SXP1; 993 else 994 tc |= BIU_PCI1080_CONF1_SXP0; 995 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 996 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 997 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 998 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 999 oc | BIU_PCI1080_CONF1_DMA); 1000 } 1001 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1002 if (oc) { 1003 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1004 } 1005 return (rv); 1006 } 1007 1008 static void 1009 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 1010 { 1011 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1012 int oc = 0; 1013 1014 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1015 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1016 u_int16_t tc; 1017 /* 1018 * We will assume that someone has paused the RISC processor. 1019 */ 1020 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1021 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1022 if (regoff & SXP_BANK1_SELECT) 1023 tc |= BIU_PCI1080_CONF1_SXP1; 1024 else 1025 tc |= BIU_PCI1080_CONF1_SXP0; 1026 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1027 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1028 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1029 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1030 oc | BIU_PCI1080_CONF1_DMA); 1031 } 1032 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1033 if (oc) { 1034 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1035 } 1036 } 1037 1038 1039 struct imush { 1040 struct ispsoftc *isp; 1041 int error; 1042 }; 1043 1044 static void imc(void *, bus_dma_segment_t *, int, int); 1045 1046 static void 1047 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1048 { 1049 struct imush *imushp = (struct imush *) arg; 1050 if (error) { 1051 imushp->error = error; 1052 } else { 1053 struct ispsoftc *isp =imushp->isp; 1054 bus_addr_t addr = segs->ds_addr; 1055 1056 isp->isp_rquest_dma = addr; 1057 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1058 isp->isp_result_dma = addr; 1059 if (IS_FC(isp)) { 1060 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1061 FCPARAM(isp)->isp_scdma = addr; 1062 } 1063 } 1064 } 1065 1066 /* 1067 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1068 */ 1069 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1070 1071 static int 1072 isp_pci_mbxdma(struct ispsoftc *isp) 1073 { 1074 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1075 caddr_t base; 1076 u_int32_t len; 1077 int i, error, ns; 1078 bus_size_t alim, slim; 1079 struct imush im; 1080 1081 /* 1082 * Already been here? If so, leave... 1083 */ 1084 if (isp->isp_rquest) { 1085 return (0); 1086 } 1087 1088 #ifdef ISP_DAC_SUPPORTED 1089 alim = BUS_SPACE_UNRESTRICTED; 1090 #else 1091 alim = BUS_SPACE_MAXADDR_32BIT; 1092 #endif 1093 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1094 slim = BUS_SPACE_MAXADDR_32BIT; 1095 } else { 1096 slim = BUS_SPACE_MAXADDR_24BIT; 1097 } 1098 1099 ISP_UNLOCK(isp); 1100 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1101 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1102 busdma_lock_mutex, &Giant, &pcs->dmat)) { 1103 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1104 ISP_LOCK(isp); 1105 return(1); 1106 } 1107 1108 1109 len = sizeof (XS_T **) * isp->isp_maxcmds; 1110 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1111 if (isp->isp_xflist == NULL) { 1112 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1113 ISP_LOCK(isp); 1114 return (1); 1115 } 1116 #ifdef ISP_TARGET_MODE 1117 len = sizeof (void **) * isp->isp_maxcmds; 1118 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1119 if (isp->isp_tgtlist == NULL) { 1120 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1121 ISP_LOCK(isp); 1122 return (1); 1123 } 1124 #endif 1125 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1126 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1127 if (pcs->dmaps == NULL) { 1128 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1129 free(isp->isp_xflist, M_DEVBUF); 1130 #ifdef ISP_TARGET_MODE 1131 free(isp->isp_tgtlist, M_DEVBUF); 1132 #endif 1133 ISP_LOCK(isp); 1134 return (1); 1135 } 1136 1137 /* 1138 * Allocate and map the request, result queues, plus FC scratch area. 1139 */ 1140 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1141 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1142 if (IS_FC(isp)) { 1143 len += ISP2100_SCRLEN; 1144 } 1145 1146 ns = (len / PAGE_SIZE) + 1; 1147 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim, 1148 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant, 1149 &isp->isp_cdmat)) { 1150 isp_prt(isp, ISP_LOGERR, 1151 "cannot create a dma tag for control spaces"); 1152 free(pcs->dmaps, M_DEVBUF); 1153 free(isp->isp_xflist, M_DEVBUF); 1154 #ifdef ISP_TARGET_MODE 1155 free(isp->isp_tgtlist, M_DEVBUF); 1156 #endif 1157 ISP_LOCK(isp); 1158 return (1); 1159 } 1160 1161 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1162 &isp->isp_cdmap) != 0) { 1163 isp_prt(isp, ISP_LOGERR, 1164 "cannot allocate %d bytes of CCB memory", len); 1165 bus_dma_tag_destroy(isp->isp_cdmat); 1166 free(isp->isp_xflist, M_DEVBUF); 1167 #ifdef ISP_TARGET_MODE 1168 free(isp->isp_tgtlist, M_DEVBUF); 1169 #endif 1170 free(pcs->dmaps, M_DEVBUF); 1171 ISP_LOCK(isp); 1172 return (1); 1173 } 1174 1175 for (i = 0; i < isp->isp_maxcmds; i++) { 1176 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1177 if (error) { 1178 isp_prt(isp, ISP_LOGERR, 1179 "error %d creating per-cmd DMA maps", error); 1180 while (--i >= 0) { 1181 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1182 } 1183 goto bad; 1184 } 1185 } 1186 1187 im.isp = isp; 1188 im.error = 0; 1189 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1190 if (im.error) { 1191 isp_prt(isp, ISP_LOGERR, 1192 "error %d loading dma map for control areas", im.error); 1193 goto bad; 1194 } 1195 1196 isp->isp_rquest = base; 1197 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1198 isp->isp_result = base; 1199 if (IS_FC(isp)) { 1200 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1201 FCPARAM(isp)->isp_scratch = base; 1202 } 1203 ISP_LOCK(isp); 1204 return (0); 1205 1206 bad: 1207 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1208 bus_dma_tag_destroy(isp->isp_cdmat); 1209 free(isp->isp_xflist, M_DEVBUF); 1210 #ifdef ISP_TARGET_MODE 1211 free(isp->isp_tgtlist, M_DEVBUF); 1212 #endif 1213 free(pcs->dmaps, M_DEVBUF); 1214 ISP_LOCK(isp); 1215 isp->isp_rquest = NULL; 1216 return (1); 1217 } 1218 1219 typedef struct { 1220 struct ispsoftc *isp; 1221 void *cmd_token; 1222 void *rq; 1223 u_int16_t *nxtip; 1224 u_int16_t optr; 1225 u_int error; 1226 } mush_t; 1227 1228 #define MUSHERR_NOQENTRIES -2 1229 1230 #ifdef ISP_TARGET_MODE 1231 /* 1232 * We need to handle DMA for target mode differently from initiator mode. 1233 * 1234 * DMA mapping and construction and submission of CTIO Request Entries 1235 * and rendevous for completion are very tightly coupled because we start 1236 * out by knowing (per platform) how much data we have to move, but we 1237 * don't know, up front, how many DMA mapping segments will have to be used 1238 * cover that data, so we don't know how many CTIO Request Entries we 1239 * will end up using. Further, for performance reasons we may want to 1240 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1241 * 1242 * The standard vector still goes through isp_pci_dmasetup, but the callback 1243 * for the DMA mapping routines comes here instead with the whole transfer 1244 * mapped and a pointer to a partially filled in already allocated request 1245 * queue entry. We finish the job. 1246 */ 1247 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1248 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1249 1250 #define STATUS_WITH_DATA 1 1251 1252 static void 1253 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1254 { 1255 mush_t *mp; 1256 struct ccb_scsiio *csio; 1257 struct ispsoftc *isp; 1258 struct isp_pcisoftc *pcs; 1259 bus_dmamap_t *dp; 1260 ct_entry_t *cto, *qe; 1261 u_int8_t scsi_status; 1262 u_int16_t curi, nxti, handle; 1263 u_int32_t sflags; 1264 int32_t resid; 1265 int nth_ctio, nctios, send_status; 1266 1267 mp = (mush_t *) arg; 1268 if (error) { 1269 mp->error = error; 1270 return; 1271 } 1272 1273 isp = mp->isp; 1274 csio = mp->cmd_token; 1275 cto = mp->rq; 1276 curi = isp->isp_reqidx; 1277 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1278 1279 cto->ct_xfrlen = 0; 1280 cto->ct_seg_count = 0; 1281 cto->ct_header.rqs_entry_count = 1; 1282 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1283 1284 if (nseg == 0) { 1285 cto->ct_header.rqs_seqno = 1; 1286 isp_prt(isp, ISP_LOGTDEBUG1, 1287 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1288 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1289 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1290 cto->ct_scsi_status, cto->ct_resid); 1291 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1292 isp_put_ctio(isp, cto, qe); 1293 return; 1294 } 1295 1296 nctios = nseg / ISP_RQDSEG; 1297 if (nseg % ISP_RQDSEG) { 1298 nctios++; 1299 } 1300 1301 /* 1302 * Save syshandle, and potentially any SCSI status, which we'll 1303 * reinsert on the last CTIO we're going to send. 1304 */ 1305 1306 handle = cto->ct_syshandle; 1307 cto->ct_syshandle = 0; 1308 cto->ct_header.rqs_seqno = 0; 1309 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1310 1311 if (send_status) { 1312 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1313 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1314 /* 1315 * Preserve residual. 1316 */ 1317 resid = cto->ct_resid; 1318 1319 /* 1320 * Save actual SCSI status. 1321 */ 1322 scsi_status = cto->ct_scsi_status; 1323 1324 #ifndef STATUS_WITH_DATA 1325 sflags |= CT_NO_DATA; 1326 /* 1327 * We can't do a status at the same time as a data CTIO, so 1328 * we need to synthesize an extra CTIO at this level. 1329 */ 1330 nctios++; 1331 #endif 1332 } else { 1333 sflags = scsi_status = resid = 0; 1334 } 1335 1336 cto->ct_resid = 0; 1337 cto->ct_scsi_status = 0; 1338 1339 pcs = (struct isp_pcisoftc *)isp; 1340 dp = &pcs->dmaps[isp_handle_index(handle)]; 1341 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1342 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1343 } else { 1344 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1345 } 1346 1347 nxti = *mp->nxtip; 1348 1349 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1350 int seglim; 1351 1352 seglim = nseg; 1353 if (seglim) { 1354 int seg; 1355 1356 if (seglim > ISP_RQDSEG) 1357 seglim = ISP_RQDSEG; 1358 1359 for (seg = 0; seg < seglim; seg++, nseg--) { 1360 /* 1361 * Unlike normal initiator commands, we don't 1362 * do any swizzling here. 1363 */ 1364 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1365 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1366 cto->ct_xfrlen += dm_segs->ds_len; 1367 dm_segs++; 1368 } 1369 cto->ct_seg_count = seg; 1370 } else { 1371 /* 1372 * This case should only happen when we're sending an 1373 * extra CTIO with final status. 1374 */ 1375 if (send_status == 0) { 1376 isp_prt(isp, ISP_LOGWARN, 1377 "tdma_mk ran out of segments"); 1378 mp->error = EINVAL; 1379 return; 1380 } 1381 } 1382 1383 /* 1384 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1385 * ct_tagtype, and ct_timeout have been carried over 1386 * unchanged from what our caller had set. 1387 * 1388 * The dataseg fields and the seg_count fields we just got 1389 * through setting. The data direction we've preserved all 1390 * along and only clear it if we're now sending status. 1391 */ 1392 1393 if (nth_ctio == nctios - 1) { 1394 /* 1395 * We're the last in a sequence of CTIOs, so mark 1396 * this CTIO and save the handle to the CCB such that 1397 * when this CTIO completes we can free dma resources 1398 * and do whatever else we need to do to finish the 1399 * rest of the command. We *don't* give this to the 1400 * firmware to work on- the caller will do that. 1401 */ 1402 1403 cto->ct_syshandle = handle; 1404 cto->ct_header.rqs_seqno = 1; 1405 1406 if (send_status) { 1407 cto->ct_scsi_status = scsi_status; 1408 cto->ct_flags |= sflags; 1409 cto->ct_resid = resid; 1410 } 1411 if (send_status) { 1412 isp_prt(isp, ISP_LOGTDEBUG1, 1413 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1414 "scsi status %x resid %d", 1415 cto->ct_fwhandle, csio->ccb_h.target_lun, 1416 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1417 cto->ct_scsi_status, cto->ct_resid); 1418 } else { 1419 isp_prt(isp, ISP_LOGTDEBUG1, 1420 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1421 cto->ct_fwhandle, csio->ccb_h.target_lun, 1422 cto->ct_iid, cto->ct_tag_val, 1423 cto->ct_flags); 1424 } 1425 isp_put_ctio(isp, cto, qe); 1426 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1427 if (nctios > 1) { 1428 MEMORYBARRIER(isp, SYNC_REQUEST, 1429 curi, QENTRY_LEN); 1430 } 1431 } else { 1432 ct_entry_t *oqe = qe; 1433 1434 /* 1435 * Make sure syshandle fields are clean 1436 */ 1437 cto->ct_syshandle = 0; 1438 cto->ct_header.rqs_seqno = 0; 1439 1440 isp_prt(isp, ISP_LOGTDEBUG1, 1441 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1442 cto->ct_fwhandle, csio->ccb_h.target_lun, 1443 cto->ct_iid, cto->ct_flags); 1444 1445 /* 1446 * Get a new CTIO 1447 */ 1448 qe = (ct_entry_t *) 1449 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1450 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1451 if (nxti == mp->optr) { 1452 isp_prt(isp, ISP_LOGTDEBUG0, 1453 "Queue Overflow in tdma_mk"); 1454 mp->error = MUSHERR_NOQENTRIES; 1455 return; 1456 } 1457 1458 /* 1459 * Now that we're done with the old CTIO, 1460 * flush it out to the request queue. 1461 */ 1462 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1463 isp_put_ctio(isp, cto, oqe); 1464 if (nth_ctio != 0) { 1465 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1466 QENTRY_LEN); 1467 } 1468 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1469 1470 /* 1471 * Reset some fields in the CTIO so we can reuse 1472 * for the next one we'll flush to the request 1473 * queue. 1474 */ 1475 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1476 cto->ct_header.rqs_entry_count = 1; 1477 cto->ct_header.rqs_flags = 0; 1478 cto->ct_status = 0; 1479 cto->ct_scsi_status = 0; 1480 cto->ct_xfrlen = 0; 1481 cto->ct_resid = 0; 1482 cto->ct_seg_count = 0; 1483 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1484 } 1485 } 1486 *mp->nxtip = nxti; 1487 } 1488 1489 /* 1490 * We don't have to do multiple CTIOs here. Instead, we can just do 1491 * continuation segments as needed. This greatly simplifies the code 1492 * improves performance. 1493 */ 1494 1495 static void 1496 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1497 { 1498 mush_t *mp; 1499 struct ccb_scsiio *csio; 1500 struct ispsoftc *isp; 1501 ct2_entry_t *cto, *qe; 1502 u_int16_t curi, nxti; 1503 int segcnt; 1504 1505 mp = (mush_t *) arg; 1506 if (error) { 1507 mp->error = error; 1508 return; 1509 } 1510 1511 isp = mp->isp; 1512 csio = mp->cmd_token; 1513 cto = mp->rq; 1514 1515 curi = isp->isp_reqidx; 1516 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1517 1518 if (nseg == 0) { 1519 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1520 isp_prt(isp, ISP_LOGWARN, 1521 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1522 "set (0x%x)", cto->ct_flags); 1523 mp->error = EINVAL; 1524 return; 1525 } 1526 /* 1527 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1528 * flags to NO DATA and clear relative offset flags. 1529 * We preserve the ct_resid and the response area. 1530 */ 1531 cto->ct_header.rqs_seqno = 1; 1532 cto->ct_seg_count = 0; 1533 cto->ct_reloff = 0; 1534 isp_prt(isp, ISP_LOGTDEBUG1, 1535 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1536 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1537 cto->ct_iid, cto->ct_flags, cto->ct_status, 1538 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1539 isp_put_ctio2(isp, cto, qe); 1540 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1541 return; 1542 } 1543 1544 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1545 isp_prt(isp, ISP_LOGERR, 1546 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1547 "(0x%x)", cto->ct_flags); 1548 mp->error = EINVAL; 1549 return; 1550 } 1551 1552 1553 nxti = *mp->nxtip; 1554 1555 /* 1556 * Set up the CTIO2 data segments. 1557 */ 1558 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1559 cto->ct_seg_count++, segcnt++) { 1560 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1561 dm_segs[segcnt].ds_addr; 1562 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1563 dm_segs[segcnt].ds_len; 1564 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1565 isp_prt(isp, ISP_LOGTDEBUG1, 1566 "isp_send_ctio2: ent0[%d]0x%llx:%lld", 1567 cto->ct_seg_count, (long long)dm_segs[segcnt].ds_addr, 1568 (long long)dm_segs[segcnt].ds_len); 1569 } 1570 1571 while (segcnt < nseg) { 1572 u_int16_t curip; 1573 int seg; 1574 ispcontreq_t local, *crq = &local, *qep; 1575 1576 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1577 curip = nxti; 1578 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1579 if (nxti == mp->optr) { 1580 ISP_UNLOCK(isp); 1581 isp_prt(isp, ISP_LOGTDEBUG0, 1582 "tdma_mkfc: request queue overflow"); 1583 mp->error = MUSHERR_NOQENTRIES; 1584 return; 1585 } 1586 cto->ct_header.rqs_entry_count++; 1587 MEMZERO((void *)crq, sizeof (*crq)); 1588 crq->req_header.rqs_entry_count = 1; 1589 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1590 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1591 segcnt++, seg++) { 1592 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1593 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1594 isp_prt(isp, ISP_LOGTDEBUG1, 1595 "isp_send_ctio2: ent%d[%d]0x%llx:%lld", 1596 cto->ct_header.rqs_entry_count-1, seg, 1597 (long long) dm_segs[segcnt].ds_addr, 1598 (long long) dm_segs[segcnt].ds_len); 1599 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1600 cto->ct_seg_count++; 1601 } 1602 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1603 isp_put_cont_req(isp, crq, qep); 1604 ISP_TDQE(isp, "cont entry", curi, qep); 1605 } 1606 1607 /* 1608 * No do final twiddling for the CTIO itself. 1609 */ 1610 cto->ct_header.rqs_seqno = 1; 1611 isp_prt(isp, ISP_LOGTDEBUG1, 1612 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1613 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1614 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1615 cto->ct_resid); 1616 isp_put_ctio2(isp, cto, qe); 1617 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1618 *mp->nxtip = nxti; 1619 } 1620 #endif 1621 1622 static void dma2(void *, bus_dma_segment_t *, int, int); 1623 1624 static void 1625 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1626 { 1627 mush_t *mp; 1628 struct ispsoftc *isp; 1629 struct ccb_scsiio *csio; 1630 struct isp_pcisoftc *pcs; 1631 bus_dmamap_t *dp; 1632 bus_dma_segment_t *eseg; 1633 ispreq_t *rq; 1634 int seglim, datalen; 1635 u_int16_t nxti; 1636 1637 mp = (mush_t *) arg; 1638 if (error) { 1639 mp->error = error; 1640 return; 1641 } 1642 1643 if (nseg < 1) { 1644 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1645 mp->error = EFAULT; 1646 return; 1647 } 1648 csio = mp->cmd_token; 1649 isp = mp->isp; 1650 rq = mp->rq; 1651 pcs = (struct isp_pcisoftc *)mp->isp; 1652 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1653 nxti = *mp->nxtip; 1654 1655 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1656 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1657 } else { 1658 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1659 } 1660 1661 datalen = XS_XFRLEN(csio); 1662 1663 /* 1664 * We're passed an initial partially filled in entry that 1665 * has most fields filled in except for data transfer 1666 * related values. 1667 * 1668 * Our job is to fill in the initial request queue entry and 1669 * then to start allocating and filling in continuation entries 1670 * until we've covered the entire transfer. 1671 */ 1672 1673 if (IS_FC(isp)) { 1674 seglim = ISP_RQDSEG_T2; 1675 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1676 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1677 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1678 } else { 1679 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1680 } 1681 } else { 1682 if (csio->cdb_len > 12) { 1683 seglim = 0; 1684 } else { 1685 seglim = ISP_RQDSEG; 1686 } 1687 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1688 rq->req_flags |= REQFLAG_DATA_IN; 1689 } else { 1690 rq->req_flags |= REQFLAG_DATA_OUT; 1691 } 1692 } 1693 1694 eseg = dm_segs + nseg; 1695 1696 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1697 if (IS_FC(isp)) { 1698 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1699 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1700 dm_segs->ds_addr; 1701 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1702 dm_segs->ds_len; 1703 } else { 1704 rq->req_dataseg[rq->req_seg_count].ds_base = 1705 dm_segs->ds_addr; 1706 rq->req_dataseg[rq->req_seg_count].ds_count = 1707 dm_segs->ds_len; 1708 } 1709 datalen -= dm_segs->ds_len; 1710 rq->req_seg_count++; 1711 dm_segs++; 1712 } 1713 1714 while (datalen > 0 && dm_segs != eseg) { 1715 u_int16_t onxti; 1716 ispcontreq_t local, *crq = &local, *cqe; 1717 1718 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1719 onxti = nxti; 1720 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1721 if (nxti == mp->optr) { 1722 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1723 mp->error = MUSHERR_NOQENTRIES; 1724 return; 1725 } 1726 rq->req_header.rqs_entry_count++; 1727 MEMZERO((void *)crq, sizeof (*crq)); 1728 crq->req_header.rqs_entry_count = 1; 1729 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1730 1731 seglim = 0; 1732 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1733 crq->req_dataseg[seglim].ds_base = 1734 dm_segs->ds_addr; 1735 crq->req_dataseg[seglim].ds_count = 1736 dm_segs->ds_len; 1737 rq->req_seg_count++; 1738 dm_segs++; 1739 seglim++; 1740 datalen -= dm_segs->ds_len; 1741 } 1742 isp_put_cont_req(isp, crq, cqe); 1743 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1744 } 1745 *mp->nxtip = nxti; 1746 } 1747 1748 static int 1749 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1750 u_int16_t *nxtip, u_int16_t optr) 1751 { 1752 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1753 ispreq_t *qep; 1754 bus_dmamap_t *dp = NULL; 1755 mush_t mush, *mp; 1756 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1757 1758 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1759 #ifdef ISP_TARGET_MODE 1760 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1761 if (IS_FC(isp)) { 1762 eptr = tdma_mkfc; 1763 } else { 1764 eptr = tdma_mk; 1765 } 1766 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1767 (csio->dxfer_len == 0)) { 1768 mp = &mush; 1769 mp->isp = isp; 1770 mp->cmd_token = csio; 1771 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1772 mp->nxtip = nxtip; 1773 mp->optr = optr; 1774 mp->error = 0; 1775 (*eptr)(mp, NULL, 0, 0); 1776 goto mbxsync; 1777 } 1778 } else 1779 #endif 1780 eptr = dma2; 1781 1782 1783 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1784 (csio->dxfer_len == 0)) { 1785 rq->req_seg_count = 1; 1786 goto mbxsync; 1787 } 1788 1789 /* 1790 * Do a virtual grapevine step to collect info for 1791 * the callback dma allocation that we have to use... 1792 */ 1793 mp = &mush; 1794 mp->isp = isp; 1795 mp->cmd_token = csio; 1796 mp->rq = rq; 1797 mp->nxtip = nxtip; 1798 mp->optr = optr; 1799 mp->error = 0; 1800 1801 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1802 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1803 int error, s; 1804 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1805 s = splsoftvm(); 1806 error = bus_dmamap_load(pcs->dmat, *dp, 1807 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1808 if (error == EINPROGRESS) { 1809 bus_dmamap_unload(pcs->dmat, *dp); 1810 mp->error = EINVAL; 1811 isp_prt(isp, ISP_LOGERR, 1812 "deferred dma allocation not supported"); 1813 } else if (error && mp->error == 0) { 1814 #ifdef DIAGNOSTIC 1815 isp_prt(isp, ISP_LOGERR, 1816 "error %d in dma mapping code", error); 1817 #endif 1818 mp->error = error; 1819 } 1820 splx(s); 1821 } else { 1822 /* Pointer to physical buffer */ 1823 struct bus_dma_segment seg; 1824 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1825 seg.ds_len = csio->dxfer_len; 1826 (*eptr)(mp, &seg, 1, 0); 1827 } 1828 } else { 1829 struct bus_dma_segment *segs; 1830 1831 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1832 isp_prt(isp, ISP_LOGERR, 1833 "Physical segment pointers unsupported"); 1834 mp->error = EINVAL; 1835 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1836 isp_prt(isp, ISP_LOGERR, 1837 "Virtual segment addresses unsupported"); 1838 mp->error = EINVAL; 1839 } else { 1840 /* Just use the segments provided */ 1841 segs = (struct bus_dma_segment *) csio->data_ptr; 1842 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1843 } 1844 } 1845 if (mp->error) { 1846 int retval = CMD_COMPLETE; 1847 if (mp->error == MUSHERR_NOQENTRIES) { 1848 retval = CMD_EAGAIN; 1849 } else if (mp->error == EFBIG) { 1850 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1851 } else if (mp->error == EINVAL) { 1852 XS_SETERR(csio, CAM_REQ_INVALID); 1853 } else { 1854 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1855 } 1856 return (retval); 1857 } 1858 mbxsync: 1859 switch (rq->req_header.rqs_entry_type) { 1860 case RQSTYPE_REQUEST: 1861 isp_put_request(isp, rq, qep); 1862 break; 1863 case RQSTYPE_CMDONLY: 1864 isp_put_extended_request(isp, (ispextreq_t *)rq, 1865 (ispextreq_t *)qep); 1866 break; 1867 case RQSTYPE_T2RQS: 1868 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 1869 break; 1870 } 1871 return (CMD_QUEUED); 1872 } 1873 1874 static void 1875 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 1876 { 1877 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1878 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 1879 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1880 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 1881 } else { 1882 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 1883 } 1884 bus_dmamap_unload(pcs->dmat, *dp); 1885 } 1886 1887 1888 static void 1889 isp_pci_reset1(struct ispsoftc *isp) 1890 { 1891 /* Make sure the BIOS is disabled */ 1892 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1893 /* and enable interrupts */ 1894 ENABLE_INTS(isp); 1895 } 1896 1897 static void 1898 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1899 { 1900 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1901 if (msg) 1902 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1903 else 1904 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1905 if (IS_SCSI(isp)) 1906 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1907 else 1908 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1909 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1910 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1911 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1912 1913 1914 if (IS_SCSI(isp)) { 1915 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1916 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1917 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1918 ISP_READ(isp, CDMA_FIFO_STS)); 1919 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1920 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1921 ISP_READ(isp, DDMA_FIFO_STS)); 1922 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1923 ISP_READ(isp, SXP_INTERRUPT), 1924 ISP_READ(isp, SXP_GROSS_ERR), 1925 ISP_READ(isp, SXP_PINS_CTRL)); 1926 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1927 } 1928 printf(" mbox regs: %x %x %x %x %x\n", 1929 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1930 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1931 ISP_READ(isp, OUTMAILBOX4)); 1932 printf(" PCI Status Command/Status=%x\n", 1933 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 1934 } 1935