1 /* $FreeBSD$ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/bus.h> 35 36 #include <pci/pcireg.h> 37 #include <pci/pcivar.h> 38 39 #include <machine/bus_memio.h> 40 #include <machine/bus_pio.h> 41 #include <machine/bus.h> 42 #include <machine/resource.h> 43 #include <sys/rman.h> 44 #include <sys/malloc.h> 45 46 #include <dev/isp/isp_freebsd.h> 47 48 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 49 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 50 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 51 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 52 static int 53 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 54 static int 55 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 56 static int isp_pci_mbxdma(struct ispsoftc *); 57 static int 58 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 59 static void 60 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 61 62 static void isp_pci_reset1(struct ispsoftc *); 63 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 64 65 #ifndef ISP_CODE_ORG 66 #define ISP_CODE_ORG 0x1000 67 #endif 68 69 static struct ispmdvec mdvec = { 70 isp_pci_rd_isr, 71 isp_pci_rd_reg, 72 isp_pci_wr_reg, 73 isp_pci_mbxdma, 74 isp_pci_dmasetup, 75 isp_pci_dmateardown, 76 NULL, 77 isp_pci_reset1, 78 isp_pci_dumpregs, 79 NULL, 80 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 81 }; 82 83 static struct ispmdvec mdvec_1080 = { 84 isp_pci_rd_isr, 85 isp_pci_rd_reg_1080, 86 isp_pci_wr_reg_1080, 87 isp_pci_mbxdma, 88 isp_pci_dmasetup, 89 isp_pci_dmateardown, 90 NULL, 91 isp_pci_reset1, 92 isp_pci_dumpregs, 93 NULL, 94 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 95 }; 96 97 static struct ispmdvec mdvec_12160 = { 98 isp_pci_rd_isr, 99 isp_pci_rd_reg_1080, 100 isp_pci_wr_reg_1080, 101 isp_pci_mbxdma, 102 isp_pci_dmasetup, 103 isp_pci_dmateardown, 104 NULL, 105 isp_pci_reset1, 106 isp_pci_dumpregs, 107 NULL, 108 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 109 }; 110 111 static struct ispmdvec mdvec_2100 = { 112 isp_pci_rd_isr, 113 isp_pci_rd_reg, 114 isp_pci_wr_reg, 115 isp_pci_mbxdma, 116 isp_pci_dmasetup, 117 isp_pci_dmateardown, 118 NULL, 119 isp_pci_reset1, 120 isp_pci_dumpregs 121 }; 122 123 static struct ispmdvec mdvec_2200 = { 124 isp_pci_rd_isr, 125 isp_pci_rd_reg, 126 isp_pci_wr_reg, 127 isp_pci_mbxdma, 128 isp_pci_dmasetup, 129 isp_pci_dmateardown, 130 NULL, 131 isp_pci_reset1, 132 isp_pci_dumpregs 133 }; 134 135 static struct ispmdvec mdvec_2300 = { 136 isp_pci_rd_isr_2300, 137 isp_pci_rd_reg, 138 isp_pci_wr_reg, 139 isp_pci_mbxdma, 140 isp_pci_dmasetup, 141 isp_pci_dmateardown, 142 NULL, 143 isp_pci_reset1, 144 isp_pci_dumpregs 145 }; 146 147 #ifndef PCIM_CMD_INVEN 148 #define PCIM_CMD_INVEN 0x10 149 #endif 150 #ifndef PCIM_CMD_BUSMASTEREN 151 #define PCIM_CMD_BUSMASTEREN 0x0004 152 #endif 153 #ifndef PCIM_CMD_PERRESPEN 154 #define PCIM_CMD_PERRESPEN 0x0040 155 #endif 156 #ifndef PCIM_CMD_SEREN 157 #define PCIM_CMD_SEREN 0x0100 158 #endif 159 160 #ifndef PCIR_COMMAND 161 #define PCIR_COMMAND 0x04 162 #endif 163 164 #ifndef PCIR_CACHELNSZ 165 #define PCIR_CACHELNSZ 0x0c 166 #endif 167 168 #ifndef PCIR_LATTIMER 169 #define PCIR_LATTIMER 0x0d 170 #endif 171 172 #ifndef PCIR_ROMADDR 173 #define PCIR_ROMADDR 0x30 174 #endif 175 176 #ifndef PCI_VENDOR_QLOGIC 177 #define PCI_VENDOR_QLOGIC 0x1077 178 #endif 179 180 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 181 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 182 #endif 183 184 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 185 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 186 #endif 187 188 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 189 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 190 #endif 191 192 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 193 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 194 #endif 195 196 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 197 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 198 #endif 199 200 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 201 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 202 #endif 203 204 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 205 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 206 #endif 207 208 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 209 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 210 #endif 211 212 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 213 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 214 #endif 215 216 #define PCI_QLOGIC_ISP1020 \ 217 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 218 219 #define PCI_QLOGIC_ISP1080 \ 220 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 221 222 #define PCI_QLOGIC_ISP12160 \ 223 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 224 225 #define PCI_QLOGIC_ISP1240 \ 226 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 227 228 #define PCI_QLOGIC_ISP1280 \ 229 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 230 231 #define PCI_QLOGIC_ISP2100 \ 232 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 233 234 #define PCI_QLOGIC_ISP2200 \ 235 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 236 237 #define PCI_QLOGIC_ISP2300 \ 238 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 239 240 #define PCI_QLOGIC_ISP2312 \ 241 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 242 243 /* 244 * Odd case for some AMI raid cards... We need to *not* attach to this. 245 */ 246 #define AMI_RAID_SUBVENDOR_ID 0x101e 247 248 #define IO_MAP_REG 0x10 249 #define MEM_MAP_REG 0x14 250 251 #define PCI_DFLT_LTNCY 0x40 252 #define PCI_DFLT_LNSZ 0x10 253 254 static int isp_pci_probe (device_t); 255 static int isp_pci_attach (device_t); 256 257 struct isp_pcisoftc { 258 struct ispsoftc pci_isp; 259 device_t pci_dev; 260 struct resource * pci_reg; 261 bus_space_tag_t pci_st; 262 bus_space_handle_t pci_sh; 263 void * ih; 264 int16_t pci_poff[_NREG_BLKS]; 265 bus_dma_tag_t parent_dmat; 266 bus_dma_tag_t cntrol_dmat; 267 bus_dmamap_t cntrol_dmap; 268 bus_dmamap_t *dmaps; 269 }; 270 ispfwfunc *isp_get_firmware_p = NULL; 271 272 static device_method_t isp_pci_methods[] = { 273 /* Device interface */ 274 DEVMETHOD(device_probe, isp_pci_probe), 275 DEVMETHOD(device_attach, isp_pci_attach), 276 { 0, 0 } 277 }; 278 static void isp_pci_intr(void *); 279 280 static driver_t isp_pci_driver = { 281 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 282 }; 283 static devclass_t isp_devclass; 284 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 285 MODULE_VERSION(isp, 1); 286 287 static int 288 isp_pci_probe(device_t dev) 289 { 290 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 291 case PCI_QLOGIC_ISP1020: 292 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 293 break; 294 case PCI_QLOGIC_ISP1080: 295 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 296 break; 297 case PCI_QLOGIC_ISP1240: 298 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 299 break; 300 case PCI_QLOGIC_ISP1280: 301 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 302 break; 303 case PCI_QLOGIC_ISP12160: 304 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 305 return (ENXIO); 306 } 307 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 308 break; 309 case PCI_QLOGIC_ISP2100: 310 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 311 break; 312 case PCI_QLOGIC_ISP2200: 313 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 314 break; 315 case PCI_QLOGIC_ISP2300: 316 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 317 break; 318 case PCI_QLOGIC_ISP2312: 319 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 320 break; 321 default: 322 return (ENXIO); 323 } 324 if (device_get_unit(dev) == 0 && bootverbose) { 325 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 326 "Core Version %d.%d\n", 327 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 328 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 329 } 330 /* 331 * XXXX: Here is where we might load the f/w module 332 * XXXX: (or increase a reference count to it). 333 */ 334 return (0); 335 } 336 337 static int 338 isp_pci_attach(device_t dev) 339 { 340 struct resource *regs, *irq; 341 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 342 u_int32_t data, cmd, linesz, psize, basetype; 343 struct isp_pcisoftc *pcs; 344 struct ispsoftc *isp = NULL; 345 struct ispmdvec *mdvp; 346 bus_size_t lim; 347 const char *sptr; 348 int locksetup = 0; 349 350 /* 351 * Figure out if we're supposed to skip this one. 352 * If we are, we actually go to ISP_ROLE_NONE. 353 */ 354 355 tval = 0; 356 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 357 "disable", &tval) == 0 && tval) { 358 device_printf(dev, "device is disabled\n"); 359 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 360 return (0); 361 } 362 363 role = 0; 364 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 365 "role", &role) == 0 && 366 ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) { 367 device_printf(dev, "setting role to 0x%x\n", role); 368 } else { 369 #ifdef ISP_TARGET_MODE 370 role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET; 371 #else 372 role = ISP_DEFAULT_ROLES; 373 #endif 374 } 375 376 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 377 if (pcs == NULL) { 378 device_printf(dev, "cannot allocate softc\n"); 379 return (ENOMEM); 380 } 381 382 /* 383 * Figure out which we should try first - memory mapping or i/o mapping? 384 */ 385 #ifdef __alpha__ 386 m1 = PCIM_CMD_MEMEN; 387 m2 = PCIM_CMD_PORTEN; 388 #else 389 m1 = PCIM_CMD_PORTEN; 390 m2 = PCIM_CMD_MEMEN; 391 #endif 392 393 tval = 0; 394 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 395 "prefer_iomap", &tval) == 0 && tval != 0) { 396 m1 = PCIM_CMD_PORTEN; 397 m2 = PCIM_CMD_MEMEN; 398 } 399 tval = 0; 400 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 401 "prefer_memmap", &tval) == 0 && tval != 0) { 402 m1 = PCIM_CMD_MEMEN; 403 m2 = PCIM_CMD_PORTEN; 404 } 405 406 linesz = PCI_DFLT_LNSZ; 407 irq = regs = NULL; 408 rgd = rtp = iqd = 0; 409 410 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 411 if (cmd & m1) { 412 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 413 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 414 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 415 } 416 if (regs == NULL && (cmd & m2)) { 417 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 418 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 419 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 420 } 421 if (regs == NULL) { 422 device_printf(dev, "unable to map any ports\n"); 423 goto bad; 424 } 425 if (bootverbose) 426 device_printf(dev, "using %s space register mapping\n", 427 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 428 pcs->pci_dev = dev; 429 pcs->pci_reg = regs; 430 pcs->pci_st = rman_get_bustag(regs); 431 pcs->pci_sh = rman_get_bushandle(regs); 432 433 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 434 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 435 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 436 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 437 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 438 mdvp = &mdvec; 439 basetype = ISP_HA_SCSI_UNKNOWN; 440 psize = sizeof (sdparam); 441 lim = BUS_SPACE_MAXSIZE_32BIT; 442 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 443 mdvp = &mdvec; 444 basetype = ISP_HA_SCSI_UNKNOWN; 445 psize = sizeof (sdparam); 446 lim = BUS_SPACE_MAXSIZE_24BIT; 447 } 448 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 449 mdvp = &mdvec_1080; 450 basetype = ISP_HA_SCSI_1080; 451 psize = sizeof (sdparam); 452 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 453 ISP1080_DMA_REGS_OFF; 454 } 455 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 456 mdvp = &mdvec_1080; 457 basetype = ISP_HA_SCSI_1240; 458 psize = 2 * sizeof (sdparam); 459 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 460 ISP1080_DMA_REGS_OFF; 461 } 462 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 463 mdvp = &mdvec_1080; 464 basetype = ISP_HA_SCSI_1280; 465 psize = 2 * sizeof (sdparam); 466 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 467 ISP1080_DMA_REGS_OFF; 468 } 469 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 470 mdvp = &mdvec_12160; 471 basetype = ISP_HA_SCSI_12160; 472 psize = 2 * sizeof (sdparam); 473 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 474 ISP1080_DMA_REGS_OFF; 475 } 476 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 477 mdvp = &mdvec_2100; 478 basetype = ISP_HA_FC_2100; 479 psize = sizeof (fcparam); 480 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 481 PCI_MBOX_REGS2100_OFF; 482 if (pci_get_revid(dev) < 3) { 483 /* 484 * XXX: Need to get the actual revision 485 * XXX: number of the 2100 FB. At any rate, 486 * XXX: lower cache line size for early revision 487 * XXX; boards. 488 */ 489 linesz = 1; 490 } 491 } 492 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 493 mdvp = &mdvec_2200; 494 basetype = ISP_HA_FC_2200; 495 psize = sizeof (fcparam); 496 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 497 PCI_MBOX_REGS2100_OFF; 498 } 499 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 500 mdvp = &mdvec_2300; 501 basetype = ISP_HA_FC_2300; 502 psize = sizeof (fcparam); 503 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 504 PCI_MBOX_REGS2300_OFF; 505 } 506 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 507 mdvp = &mdvec_2300; 508 basetype = ISP_HA_FC_2312; 509 psize = sizeof (fcparam); 510 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 511 PCI_MBOX_REGS2300_OFF; 512 } 513 isp = &pcs->pci_isp; 514 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 515 if (isp->isp_param == NULL) { 516 device_printf(dev, "cannot allocate parameter data\n"); 517 goto bad; 518 } 519 isp->isp_mdvec = mdvp; 520 isp->isp_type = basetype; 521 isp->isp_revision = pci_get_revid(dev); 522 isp->isp_role = role; 523 isp->isp_dev = dev; 524 525 /* 526 * Try and find firmware for this device. 527 */ 528 529 if (isp_get_firmware_p) { 530 int device = (int) pci_get_device(dev); 531 #ifdef ISP_TARGET_MODE 532 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 533 #else 534 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 535 #endif 536 } 537 538 /* 539 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 540 * are set. 541 */ 542 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 543 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 544 if (IS_2300(isp)) { /* per QLogic errata */ 545 cmd &= ~PCIM_CMD_INVEN; 546 } 547 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 548 549 /* 550 * Make sure the Cache Line Size register is set sensibly. 551 */ 552 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 553 if (data != linesz) { 554 data = PCI_DFLT_LNSZ; 555 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 556 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 557 } 558 559 /* 560 * Make sure the Latency Timer is sane. 561 */ 562 data = pci_read_config(dev, PCIR_LATTIMER, 1); 563 if (data < PCI_DFLT_LTNCY) { 564 data = PCI_DFLT_LTNCY; 565 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 566 pci_write_config(dev, PCIR_LATTIMER, data, 1); 567 } 568 569 /* 570 * Make sure we've disabled the ROM. 571 */ 572 data = pci_read_config(dev, PCIR_ROMADDR, 4); 573 data &= ~1; 574 pci_write_config(dev, PCIR_ROMADDR, data, 4); 575 576 577 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 578 BUS_SPACE_MAXADDR, NULL, NULL, lim + 1, 579 255, lim, 0, &pcs->parent_dmat) != 0) { 580 device_printf(dev, "could not create master dma tag\n"); 581 free(isp->isp_param, M_DEVBUF); 582 free(pcs, M_DEVBUF); 583 return (ENXIO); 584 } 585 586 iqd = 0; 587 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 588 1, RF_ACTIVE | RF_SHAREABLE); 589 if (irq == NULL) { 590 device_printf(dev, "could not allocate interrupt\n"); 591 goto bad; 592 } 593 594 tval = 0; 595 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 596 "fwload_disable", &tval) == 0 && tval != 0) { 597 isp->isp_confopts |= ISP_CFG_NORELOAD; 598 } 599 tval = 0; 600 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 601 "ignore_nvram", &tval) == 0 && tval != 0) { 602 isp->isp_confopts |= ISP_CFG_NONVRAM; 603 } 604 tval = 0; 605 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 606 "fullduplex", &tval) == 0 && tval != 0) { 607 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 608 } 609 #ifdef ISP_FW_CRASH_DUMP 610 tval = 0; 611 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 612 "fw_dump_enable", &tval) == 0 && tval != 0) { 613 size_t amt = 0; 614 if (IS_2200(isp)) { 615 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 616 } else if (IS_23XX(isp)) { 617 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 618 } 619 if (amt) { 620 FCPARAM(isp)->isp_dump_data = 621 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 622 } else { 623 device_printf(dev, 624 "f/w crash dumps not supported for this model\n"); 625 } 626 } 627 #endif 628 629 sptr = 0; 630 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 631 "topology", (const char **) &sptr) == 0 && sptr != 0) { 632 if (strcmp(sptr, "lport") == 0) { 633 isp->isp_confopts |= ISP_CFG_LPORT; 634 } else if (strcmp(sptr, "nport") == 0) { 635 isp->isp_confopts |= ISP_CFG_NPORT; 636 } else if (strcmp(sptr, "lport-only") == 0) { 637 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 638 } else if (strcmp(sptr, "nport-only") == 0) { 639 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 640 } 641 } 642 643 /* 644 * Because the resource_*_value functions can neither return 645 * 64 bit integer values, nor can they be directly coerced 646 * to interpret the right hand side of the assignment as 647 * you want them to interpret it, we have to force WWN 648 * hint replacement to specify WWN strings with a leading 649 * 'w' (e..g w50000000aaaa0001). Sigh. 650 */ 651 sptr = 0; 652 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 653 "portwwn", (const char **) &sptr); 654 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 655 char *eptr = 0; 656 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 657 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 658 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 659 isp->isp_osinfo.default_port_wwn = 0; 660 } else { 661 isp->isp_confopts |= ISP_CFG_OWNWWPN; 662 } 663 } 664 if (isp->isp_osinfo.default_port_wwn == 0) { 665 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 666 } 667 668 sptr = 0; 669 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 670 "nodewwn", (const char **) &sptr); 671 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 672 char *eptr = 0; 673 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 674 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 675 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 676 isp->isp_osinfo.default_node_wwn = 0; 677 } else { 678 isp->isp_confopts |= ISP_CFG_OWNWWNN; 679 } 680 } 681 if (isp->isp_osinfo.default_node_wwn == 0) { 682 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 683 } 684 685 isp_debug = 0; 686 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 687 "debug", &isp_debug); 688 689 /* Make sure the lock is set up. */ 690 mtx_init(&isp->isp_osinfo.lock, "isp", MTX_DEF); 691 locksetup++; 692 693 #ifdef ISP_SMPLOCK 694 #define INTR_FLAGS INTR_TYPE_CAM | INTR_MPSAFE | INTR_ENTROPY 695 #else 696 #define INTR_FLAGS INTR_TYPE_CAM | INTR_ENTROPY 697 #endif 698 if (bus_setup_intr(dev, irq, INTR_FLAGS, isp_pci_intr, isp, &pcs->ih)) { 699 device_printf(dev, "could not setup interrupt\n"); 700 goto bad; 701 } 702 703 /* 704 * Set up logging levels. 705 */ 706 if (isp_debug) { 707 isp->isp_dblev = isp_debug; 708 } else { 709 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 710 } 711 if (bootverbose) 712 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 713 714 /* 715 * Last minute checks... 716 */ 717 if (IS_2312(isp)) { 718 isp->isp_port = pci_get_function(dev); 719 } 720 721 /* 722 * Make sure we're in reset state. 723 */ 724 ISP_LOCK(isp); 725 isp_reset(isp); 726 if (isp->isp_state != ISP_RESETSTATE) { 727 ISP_UNLOCK(isp); 728 goto bad; 729 } 730 isp_init(isp); 731 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 732 isp_uninit(isp); 733 ISP_UNLOCK(isp); 734 goto bad; 735 } 736 isp_attach(isp); 737 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 738 isp_uninit(isp); 739 ISP_UNLOCK(isp); 740 goto bad; 741 } 742 /* 743 * XXXX: Here is where we might unload the f/w module 744 * XXXX: (or decrease the reference count to it). 745 */ 746 ISP_UNLOCK(isp); 747 return (0); 748 749 bad: 750 751 if (pcs && pcs->ih) { 752 (void) bus_teardown_intr(dev, irq, pcs->ih); 753 } 754 755 if (locksetup && isp) { 756 mtx_destroy(&isp->isp_osinfo.lock); 757 } 758 759 if (irq) { 760 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 761 } 762 763 764 if (regs) { 765 (void) bus_release_resource(dev, rtp, rgd, regs); 766 } 767 768 if (pcs) { 769 if (pcs->pci_isp.isp_param) 770 free(pcs->pci_isp.isp_param, M_DEVBUF); 771 free(pcs, M_DEVBUF); 772 } 773 774 /* 775 * XXXX: Here is where we might unload the f/w module 776 * XXXX: (or decrease the reference count to it). 777 */ 778 return (ENXIO); 779 } 780 781 static void 782 isp_pci_intr(void *arg) 783 { 784 struct ispsoftc *isp = arg; 785 u_int16_t isr, sema, mbox; 786 787 ISP_LOCK(isp); 788 isp->isp_intcnt++; 789 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 790 isp->isp_intbogus++; 791 } else { 792 int iok = isp->isp_osinfo.intsok; 793 isp->isp_osinfo.intsok = 0; 794 isp_intr(isp, isr, sema, mbox); 795 isp->isp_osinfo.intsok = iok; 796 } 797 ISP_UNLOCK(isp); 798 } 799 800 801 #define IspVirt2Off(a, x) \ 802 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 803 _BLK_REG_SHFT] + ((x) & 0xff)) 804 805 #define BXR2(pcs, off) \ 806 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 807 #define BXW2(pcs, off, v) \ 808 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 809 810 811 static INLINE int 812 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 813 { 814 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 815 u_int16_t val0, val1; 816 int i = 0; 817 818 do { 819 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 820 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 821 } while (val0 != val1 && ++i < 1000); 822 if (val0 != val1) { 823 return (1); 824 } 825 *rp = val0; 826 return (0); 827 } 828 829 static int 830 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 831 u_int16_t *semap, u_int16_t *mbp) 832 { 833 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 834 u_int16_t isr, sema; 835 836 if (IS_2100(isp)) { 837 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 838 return (0); 839 } 840 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 841 return (0); 842 } 843 } else { 844 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 845 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 846 } 847 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 848 isr &= INT_PENDING_MASK(isp); 849 sema &= BIU_SEMA_LOCK; 850 if (isr == 0 && sema == 0) { 851 return (0); 852 } 853 *isrp = isr; 854 if ((*semap = sema) != 0) { 855 if (IS_2100(isp)) { 856 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 857 return (0); 858 } 859 } else { 860 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 861 } 862 } 863 return (1); 864 } 865 866 static int 867 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 868 u_int16_t *semap, u_int16_t *mbox0p) 869 { 870 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 871 u_int32_t r2hisr; 872 873 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 874 *isrp = 0; 875 return (0); 876 } 877 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 878 IspVirt2Off(pcs, BIU_R2HSTSLO)); 879 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 880 if ((r2hisr & BIU_R2HST_INTR) == 0) { 881 *isrp = 0; 882 return (0); 883 } 884 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 885 case ISPR2HST_ROM_MBX_OK: 886 case ISPR2HST_ROM_MBX_FAIL: 887 case ISPR2HST_MBX_OK: 888 case ISPR2HST_MBX_FAIL: 889 case ISPR2HST_ASYNC_EVENT: 890 case ISPR2HST_RIO_16: 891 case ISPR2HST_FPOST: 892 case ISPR2HST_FPOST_CTIO: 893 *isrp = r2hisr & 0xffff; 894 *mbox0p = (r2hisr >> 16); 895 *semap = 1; 896 return (1); 897 case ISPR2HST_RSPQ_UPDATE: 898 *isrp = r2hisr & 0xffff; 899 *mbox0p = 0; 900 *semap = 0; 901 return (1); 902 default: 903 return (0); 904 } 905 } 906 907 static u_int16_t 908 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 909 { 910 u_int16_t rv; 911 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 912 int oldconf = 0; 913 914 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 915 /* 916 * We will assume that someone has paused the RISC processor. 917 */ 918 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 919 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 920 oldconf | BIU_PCI_CONF1_SXP); 921 } 922 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 923 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 924 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 925 } 926 return (rv); 927 } 928 929 static void 930 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 931 { 932 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 933 int oldconf = 0; 934 935 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 936 /* 937 * We will assume that someone has paused the RISC processor. 938 */ 939 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 940 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 941 oldconf | BIU_PCI_CONF1_SXP); 942 } 943 BXW2(pcs, IspVirt2Off(isp, regoff), val); 944 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 945 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 946 } 947 } 948 949 static u_int16_t 950 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 951 { 952 u_int16_t rv, oc = 0; 953 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 954 955 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 956 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 957 u_int16_t tc; 958 /* 959 * We will assume that someone has paused the RISC processor. 960 */ 961 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 962 tc = oc & ~BIU_PCI1080_CONF1_DMA; 963 if (regoff & SXP_BANK1_SELECT) 964 tc |= BIU_PCI1080_CONF1_SXP1; 965 else 966 tc |= BIU_PCI1080_CONF1_SXP0; 967 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 968 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 969 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 970 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 971 oc | BIU_PCI1080_CONF1_DMA); 972 } 973 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 974 if (oc) { 975 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 976 } 977 return (rv); 978 } 979 980 static void 981 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 982 { 983 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 984 int oc = 0; 985 986 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 987 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 988 u_int16_t tc; 989 /* 990 * We will assume that someone has paused the RISC processor. 991 */ 992 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 993 tc = oc & ~BIU_PCI1080_CONF1_DMA; 994 if (regoff & SXP_BANK1_SELECT) 995 tc |= BIU_PCI1080_CONF1_SXP1; 996 else 997 tc |= BIU_PCI1080_CONF1_SXP0; 998 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 999 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1000 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1001 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1002 oc | BIU_PCI1080_CONF1_DMA); 1003 } 1004 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1005 if (oc) { 1006 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1007 } 1008 } 1009 1010 static void isp_map_rquest(void *, bus_dma_segment_t *, int, int); 1011 static void isp_map_result(void *, bus_dma_segment_t *, int, int); 1012 static void isp_map_fcscrt(void *, bus_dma_segment_t *, int, int); 1013 1014 struct imush { 1015 struct ispsoftc *isp; 1016 int error; 1017 }; 1018 1019 static void 1020 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1021 { 1022 struct imush *imushp = (struct imush *) arg; 1023 if (error) { 1024 imushp->error = error; 1025 } else { 1026 imushp->isp->isp_rquest_dma = segs->ds_addr; 1027 } 1028 } 1029 1030 static void 1031 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1032 { 1033 struct imush *imushp = (struct imush *) arg; 1034 if (error) { 1035 imushp->error = error; 1036 } else { 1037 imushp->isp->isp_result_dma = segs->ds_addr; 1038 } 1039 } 1040 1041 static void 1042 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1043 { 1044 struct imush *imushp = (struct imush *) arg; 1045 if (error) { 1046 imushp->error = error; 1047 } else { 1048 fcparam *fcp = imushp->isp->isp_param; 1049 fcp->isp_scdma = segs->ds_addr; 1050 } 1051 } 1052 1053 static int 1054 isp_pci_mbxdma(struct ispsoftc *isp) 1055 { 1056 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1057 caddr_t base; 1058 u_int32_t len; 1059 int i, error; 1060 bus_size_t lim; 1061 struct imush im; 1062 1063 1064 /* 1065 * Already been here? If so, leave... 1066 */ 1067 if (isp->isp_rquest) { 1068 return (0); 1069 } 1070 1071 len = sizeof (XS_T **) * isp->isp_maxcmds; 1072 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1073 if (isp->isp_xflist == NULL) { 1074 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1075 return (1); 1076 } 1077 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1078 pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1079 if (pci->dmaps == NULL) { 1080 isp_prt(isp, ISP_LOGERR, "can't alloc dma maps"); 1081 free(isp->isp_xflist, M_DEVBUF); 1082 return (1); 1083 } 1084 1085 if (IS_FC(isp) || IS_ULTRA2(isp)) 1086 lim = BUS_SPACE_MAXADDR + 1; 1087 else 1088 lim = BUS_SPACE_MAXADDR_24BIT + 1; 1089 1090 /* 1091 * Allocate and map the request, result queues, plus FC scratch area. 1092 */ 1093 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1094 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1095 if (IS_FC(isp)) { 1096 len += ISP2100_SCRLEN; 1097 } 1098 if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim, 1099 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, 1100 BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) { 1101 isp_prt(isp, ISP_LOGERR, 1102 "cannot create a dma tag for control spaces"); 1103 free(isp->isp_xflist, M_DEVBUF); 1104 free(pci->dmaps, M_DEVBUF); 1105 return (1); 1106 } 1107 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base, 1108 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) { 1109 isp_prt(isp, ISP_LOGERR, 1110 "cannot allocate %d bytes of CCB memory", len); 1111 free(isp->isp_xflist, M_DEVBUF); 1112 free(pci->dmaps, M_DEVBUF); 1113 return (1); 1114 } 1115 1116 isp->isp_rquest = base; 1117 im.isp = isp; 1118 im.error = 0; 1119 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest, 1120 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), isp_map_rquest, &im, 0); 1121 if (im.error) { 1122 isp_prt(isp, ISP_LOGERR, 1123 "error %d loading dma map for DMA request queue", im.error); 1124 free(isp->isp_xflist, M_DEVBUF); 1125 free(pci->dmaps, M_DEVBUF); 1126 isp->isp_rquest = NULL; 1127 return (1); 1128 } 1129 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1130 im.error = 0; 1131 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result, 1132 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)), isp_map_result, &im, 0); 1133 if (im.error) { 1134 isp_prt(isp, ISP_LOGERR, 1135 "error %d loading dma map for DMA result queue", im.error); 1136 free(isp->isp_xflist, M_DEVBUF); 1137 free(pci->dmaps, M_DEVBUF); 1138 isp->isp_rquest = NULL; 1139 return (1); 1140 } 1141 1142 for (i = 0; i < isp->isp_maxcmds; i++) { 1143 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]); 1144 if (error) { 1145 isp_prt(isp, ISP_LOGERR, 1146 "error %d creating per-cmd DMA maps", error); 1147 free(isp->isp_xflist, M_DEVBUF); 1148 free(pci->dmaps, M_DEVBUF); 1149 isp->isp_rquest = NULL; 1150 return (1); 1151 } 1152 } 1153 1154 if (IS_FC(isp)) { 1155 fcparam *fcp = (fcparam *) isp->isp_param; 1156 fcp->isp_scratch = base + 1157 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) + 1158 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1159 im.error = 0; 1160 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, 1161 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0); 1162 if (im.error) { 1163 isp_prt(isp, ISP_LOGERR, 1164 "error %d loading FC scratch area", im.error); 1165 free(isp->isp_xflist, M_DEVBUF); 1166 free(pci->dmaps, M_DEVBUF); 1167 isp->isp_rquest = NULL; 1168 return (1); 1169 } 1170 } 1171 return (0); 1172 } 1173 1174 typedef struct { 1175 struct ispsoftc *isp; 1176 void *cmd_token; 1177 void *rq; 1178 u_int16_t *nxtip; 1179 u_int16_t optr; 1180 u_int error; 1181 } mush_t; 1182 1183 #define MUSHERR_NOQENTRIES -2 1184 1185 #ifdef ISP_TARGET_MODE 1186 /* 1187 * We need to handle DMA for target mode differently from initiator mode. 1188 * 1189 * DMA mapping and construction and submission of CTIO Request Entries 1190 * and rendevous for completion are very tightly coupled because we start 1191 * out by knowing (per platform) how much data we have to move, but we 1192 * don't know, up front, how many DMA mapping segments will have to be used 1193 * cover that data, so we don't know how many CTIO Request Entries we 1194 * will end up using. Further, for performance reasons we may want to 1195 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1196 * 1197 * The standard vector still goes through isp_pci_dmasetup, but the callback 1198 * for the DMA mapping routines comes here instead with the whole transfer 1199 * mapped and a pointer to a partially filled in already allocated request 1200 * queue entry. We finish the job. 1201 */ 1202 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1203 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1204 1205 #define STATUS_WITH_DATA 1 1206 1207 static void 1208 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1209 { 1210 mush_t *mp; 1211 struct ccb_scsiio *csio; 1212 struct ispsoftc *isp; 1213 struct isp_pcisoftc *pci; 1214 bus_dmamap_t *dp; 1215 ct_entry_t *cto, *qe; 1216 u_int8_t scsi_status; 1217 u_int16_t curi, nxti, handle; 1218 u_int32_t sflags; 1219 int32_t resid; 1220 int nth_ctio, nctios, send_status; 1221 1222 mp = (mush_t *) arg; 1223 if (error) { 1224 mp->error = error; 1225 return; 1226 } 1227 1228 isp = mp->isp; 1229 csio = mp->cmd_token; 1230 cto = mp->rq; 1231 curi = isp->isp_reqidx; 1232 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1233 1234 cto->ct_xfrlen = 0; 1235 cto->ct_seg_count = 0; 1236 cto->ct_header.rqs_entry_count = 1; 1237 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1238 1239 if (nseg == 0) { 1240 cto->ct_header.rqs_seqno = 1; 1241 isp_prt(isp, ISP_LOGTDEBUG1, 1242 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1243 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1244 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1245 cto->ct_scsi_status, cto->ct_resid); 1246 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1247 isp_put_ctio(isp, cto, qe); 1248 return; 1249 } 1250 1251 nctios = nseg / ISP_RQDSEG; 1252 if (nseg % ISP_RQDSEG) { 1253 nctios++; 1254 } 1255 1256 /* 1257 * Save syshandle, and potentially any SCSI status, which we'll 1258 * reinsert on the last CTIO we're going to send. 1259 */ 1260 1261 handle = cto->ct_syshandle; 1262 cto->ct_syshandle = 0; 1263 cto->ct_header.rqs_seqno = 0; 1264 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1265 1266 if (send_status) { 1267 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1268 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1269 /* 1270 * Preserve residual. 1271 */ 1272 resid = cto->ct_resid; 1273 1274 /* 1275 * Save actual SCSI status. 1276 */ 1277 scsi_status = cto->ct_scsi_status; 1278 1279 #ifndef STATUS_WITH_DATA 1280 sflags |= CT_NO_DATA; 1281 /* 1282 * We can't do a status at the same time as a data CTIO, so 1283 * we need to synthesize an extra CTIO at this level. 1284 */ 1285 nctios++; 1286 #endif 1287 } else { 1288 sflags = scsi_status = resid = 0; 1289 } 1290 1291 cto->ct_resid = 0; 1292 cto->ct_scsi_status = 0; 1293 1294 pci = (struct isp_pcisoftc *)isp; 1295 dp = &pci->dmaps[isp_handle_index(handle)]; 1296 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1297 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1298 } else { 1299 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1300 } 1301 1302 nxti = *mp->nxtip; 1303 1304 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1305 int seglim; 1306 1307 seglim = nseg; 1308 if (seglim) { 1309 int seg; 1310 1311 if (seglim > ISP_RQDSEG) 1312 seglim = ISP_RQDSEG; 1313 1314 for (seg = 0; seg < seglim; seg++, nseg--) { 1315 /* 1316 * Unlike normal initiator commands, we don't 1317 * do any swizzling here. 1318 */ 1319 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1320 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1321 cto->ct_xfrlen += dm_segs->ds_len; 1322 dm_segs++; 1323 } 1324 cto->ct_seg_count = seg; 1325 } else { 1326 /* 1327 * This case should only happen when we're sending an 1328 * extra CTIO with final status. 1329 */ 1330 if (send_status == 0) { 1331 isp_prt(isp, ISP_LOGWARN, 1332 "tdma_mk ran out of segments"); 1333 mp->error = EINVAL; 1334 return; 1335 } 1336 } 1337 1338 /* 1339 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1340 * ct_tagtype, and ct_timeout have been carried over 1341 * unchanged from what our caller had set. 1342 * 1343 * The dataseg fields and the seg_count fields we just got 1344 * through setting. The data direction we've preserved all 1345 * along and only clear it if we're now sending status. 1346 */ 1347 1348 if (nth_ctio == nctios - 1) { 1349 /* 1350 * We're the last in a sequence of CTIOs, so mark 1351 * this CTIO and save the handle to the CCB such that 1352 * when this CTIO completes we can free dma resources 1353 * and do whatever else we need to do to finish the 1354 * rest of the command. We *don't* give this to the 1355 * firmware to work on- the caller will do that. 1356 */ 1357 1358 cto->ct_syshandle = handle; 1359 cto->ct_header.rqs_seqno = 1; 1360 1361 if (send_status) { 1362 cto->ct_scsi_status = scsi_status; 1363 cto->ct_flags |= sflags; 1364 cto->ct_resid = resid; 1365 } 1366 if (send_status) { 1367 isp_prt(isp, ISP_LOGTDEBUG1, 1368 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1369 "scsi status %x resid %d", 1370 cto->ct_fwhandle, csio->ccb_h.target_lun, 1371 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1372 cto->ct_scsi_status, cto->ct_resid); 1373 } else { 1374 isp_prt(isp, ISP_LOGTDEBUG1, 1375 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1376 cto->ct_fwhandle, csio->ccb_h.target_lun, 1377 cto->ct_iid, cto->ct_tag_val, 1378 cto->ct_flags); 1379 } 1380 isp_put_ctio(isp, cto, qe); 1381 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1382 if (nctios > 1) { 1383 MEMORYBARRIER(isp, SYNC_REQUEST, 1384 curi, QENTRY_LEN); 1385 } 1386 } else { 1387 ct_entry_t *oqe = qe; 1388 1389 /* 1390 * Make sure syshandle fields are clean 1391 */ 1392 cto->ct_syshandle = 0; 1393 cto->ct_header.rqs_seqno = 0; 1394 1395 isp_prt(isp, ISP_LOGTDEBUG1, 1396 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1397 cto->ct_fwhandle, csio->ccb_h.target_lun, 1398 cto->ct_iid, cto->ct_flags); 1399 1400 /* 1401 * Get a new CTIO 1402 */ 1403 qe = (ct_entry_t *) 1404 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1405 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1406 if (nxti == mp->optr) { 1407 isp_prt(isp, ISP_LOGTDEBUG0, 1408 "Queue Overflow in tdma_mk"); 1409 mp->error = MUSHERR_NOQENTRIES; 1410 return; 1411 } 1412 1413 /* 1414 * Now that we're done with the old CTIO, 1415 * flush it out to the request queue. 1416 */ 1417 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1418 isp_put_ctio(isp, cto, oqe); 1419 if (nth_ctio != 0) { 1420 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1421 QENTRY_LEN); 1422 } 1423 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1424 1425 /* 1426 * Reset some fields in the CTIO so we can reuse 1427 * for the next one we'll flush to the request 1428 * queue. 1429 */ 1430 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1431 cto->ct_header.rqs_entry_count = 1; 1432 cto->ct_header.rqs_flags = 0; 1433 cto->ct_status = 0; 1434 cto->ct_scsi_status = 0; 1435 cto->ct_xfrlen = 0; 1436 cto->ct_resid = 0; 1437 cto->ct_seg_count = 0; 1438 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1439 } 1440 } 1441 *mp->nxtip = nxti; 1442 } 1443 1444 static void 1445 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1446 { 1447 mush_t *mp; 1448 u_int8_t sense[QLTM_SENSELEN]; 1449 struct ccb_scsiio *csio; 1450 struct ispsoftc *isp; 1451 struct isp_pcisoftc *pci; 1452 bus_dmamap_t *dp; 1453 ct2_entry_t *cto, *qe; 1454 u_int16_t scsi_status, send_status, send_sense, handle; 1455 u_int16_t curi, nxti; 1456 int32_t resid; 1457 int nth_ctio, nctios; 1458 1459 mp = (mush_t *) arg; 1460 if (error) { 1461 mp->error = error; 1462 return; 1463 } 1464 1465 isp = mp->isp; 1466 csio = mp->cmd_token; 1467 cto = mp->rq; 1468 curi = isp->isp_reqidx; 1469 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1470 1471 if (nseg == 0) { 1472 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1473 isp_prt(isp, ISP_LOGWARN, 1474 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1475 "set (0x%x)", cto->ct_flags); 1476 mp->error = EINVAL; 1477 return; 1478 } 1479 cto->ct_header.rqs_entry_count = 1; 1480 cto->ct_header.rqs_seqno = 1; 1481 /* ct_syshandle contains the handle set by caller */ 1482 /* 1483 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1484 * flags to NO DATA and clear relative offset flags. 1485 * We preserve the ct_resid and the response area. 1486 */ 1487 cto->ct_flags |= CT2_NO_DATA; 1488 if (cto->ct_resid > 0) 1489 cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; 1490 else if (cto->ct_resid < 0) 1491 cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER; 1492 cto->ct_seg_count = 0; 1493 cto->ct_reloff = 0; 1494 isp_prt(isp, ISP_LOGTDEBUG1, 1495 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1496 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1497 cto->ct_iid, cto->ct_flags, cto->ct_status, 1498 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1499 isp_put_ctio2(isp, cto, qe); 1500 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1501 return; 1502 } 1503 1504 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1505 isp_prt(isp, ISP_LOGWARN, 1506 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1507 "(0x%x)", cto->ct_flags); 1508 mp->error = EINVAL; 1509 return; 1510 } 1511 1512 1513 nctios = nseg / ISP_RQDSEG_T2; 1514 if (nseg % ISP_RQDSEG_T2) { 1515 nctios++; 1516 } 1517 1518 /* 1519 * Save the handle, status, reloff, and residual. We'll reinsert the 1520 * handle into the last CTIO2 we're going to send, and reinsert status 1521 * and residual (and possibly sense data) if that's to be sent as well. 1522 * 1523 * We preserve ct_reloff and adjust it for each data CTIO2 we send past 1524 * the first one. This is needed so that the FCP DATA IUs being sent 1525 * out have the correct offset (they can arrive at the other end out 1526 * of order). 1527 */ 1528 1529 handle = cto->ct_syshandle; 1530 cto->ct_syshandle = 0; 1531 send_status = (cto->ct_flags & CT2_SENDSTATUS) != 0; 1532 1533 if (send_status) { 1534 cto->ct_flags &= ~(CT2_SENDSTATUS|CT2_CCINCR); 1535 1536 /* 1537 * Preserve residual. 1538 */ 1539 resid = cto->ct_resid; 1540 1541 /* 1542 * Save actual SCSI status. We'll reinsert the 1543 * CT2_SNSLEN_VALID later if appropriate. 1544 */ 1545 scsi_status = cto->rsp.m0.ct_scsi_status & 0xff; 1546 send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID; 1547 1548 /* 1549 * If we're sending status and have a CHECK CONDTION and 1550 * have sense data, we send one more CTIO2 with just the 1551 * status and sense data. The upper layers have stashed 1552 * the sense data in the dataseg structure for us. 1553 */ 1554 1555 if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND && 1556 send_sense) { 1557 bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN); 1558 nctios++; 1559 } 1560 } else { 1561 scsi_status = send_sense = resid = 0; 1562 } 1563 1564 cto->ct_resid = 0; 1565 cto->rsp.m0.ct_scsi_status = 0; 1566 MEMZERO(&cto->rsp, sizeof (cto->rsp)); 1567 1568 pci = (struct isp_pcisoftc *)isp; 1569 dp = &pci->dmaps[isp_handle_index(handle)]; 1570 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1571 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1572 } else { 1573 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1574 } 1575 1576 nxti = *mp->nxtip; 1577 1578 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1579 u_int32_t oxfrlen; 1580 int seglim; 1581 1582 seglim = nseg; 1583 if (seglim) { 1584 int seg; 1585 if (seglim > ISP_RQDSEG_T2) 1586 seglim = ISP_RQDSEG_T2; 1587 for (seg = 0; seg < seglim; seg++) { 1588 cto->rsp.m0.ct_dataseg[seg].ds_base = 1589 dm_segs->ds_addr; 1590 cto->rsp.m0.ct_dataseg[seg].ds_count = 1591 dm_segs->ds_len; 1592 cto->rsp.m0.ct_xfrlen += dm_segs->ds_len; 1593 dm_segs++; 1594 } 1595 cto->ct_seg_count = seg; 1596 oxfrlen = cto->rsp.m0.ct_xfrlen; 1597 } else { 1598 /* 1599 * This case should only happen when we're sending a 1600 * synthesized MODE1 final status with sense data. 1601 */ 1602 if (send_sense == 0) { 1603 isp_prt(isp, ISP_LOGWARN, 1604 "dma2_tgt_fc ran out of segments, " 1605 "no SENSE DATA"); 1606 mp->error = EINVAL; 1607 return; 1608 } 1609 oxfrlen = 0; 1610 } 1611 1612 1613 /* 1614 * At this point, the fields ct_lun, ct_iid, ct_rxid, 1615 * ct_timeout have been carried over unchanged from what 1616 * our caller had set. 1617 * 1618 * The field ct_reloff is either what the caller set, or 1619 * what we've added to below. 1620 * 1621 * The dataseg fields and the seg_count fields we just got 1622 * through setting. The data direction we've preserved all 1623 * along and only clear it if we're sending a MODE1 status 1624 * as the last CTIO. 1625 * 1626 */ 1627 1628 if (nth_ctio == nctios - 1) { 1629 /* 1630 * We're the last in a sequence of CTIO2s, so mark this 1631 * CTIO2 and save the handle to the CCB such that when 1632 * this CTIO2 completes we can free dma resources and 1633 * do whatever else we need to do to finish the rest 1634 * of the command. 1635 */ 1636 1637 cto->ct_syshandle = handle; 1638 cto->ct_header.rqs_seqno = 1; 1639 1640 if (send_status) { 1641 /* 1642 * Get 'real' residual and set flags based 1643 * on it. 1644 */ 1645 cto->ct_resid = resid; 1646 if (send_sense) { 1647 MEMCPY(cto->rsp.m1.ct_resp, sense, 1648 QLTM_SENSELEN); 1649 cto->rsp.m1.ct_senselen = 1650 QLTM_SENSELEN; 1651 scsi_status |= CT2_SNSLEN_VALID; 1652 cto->rsp.m1.ct_scsi_status = 1653 scsi_status; 1654 cto->ct_flags &= CT2_FLAG_MMASK; 1655 cto->ct_flags |= CT2_FLAG_MODE1 | 1656 CT2_NO_DATA | CT2_SENDSTATUS | 1657 CT2_CCINCR; 1658 if (cto->ct_resid > 0) 1659 cto->rsp.m1.ct_scsi_status |= 1660 CT2_DATA_UNDER; 1661 else if (cto->ct_resid < 0) 1662 cto->rsp.m1.ct_scsi_status |= 1663 CT2_DATA_OVER; 1664 } else { 1665 cto->rsp.m0.ct_scsi_status = 1666 scsi_status; 1667 cto->ct_flags |= 1668 CT2_SENDSTATUS | CT2_CCINCR; 1669 if (cto->ct_resid > 0) 1670 cto->rsp.m0.ct_scsi_status |= 1671 CT2_DATA_UNDER; 1672 else if (cto->ct_resid < 0) 1673 cto->rsp.m0.ct_scsi_status |= 1674 CT2_DATA_OVER; 1675 } 1676 } 1677 isp_prt(isp, ISP_LOGTDEBUG1, 1678 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x" 1679 " ssts 0x%x res %d", cto->ct_rxid, 1680 csio->ccb_h.target_lun, (int) cto->ct_iid, 1681 cto->ct_flags, cto->ct_status, 1682 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1683 isp_put_ctio2(isp, cto, qe); 1684 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1685 if (nctios > 1) { 1686 MEMORYBARRIER(isp, SYNC_REQUEST, 1687 curi, QENTRY_LEN); 1688 } 1689 } else { 1690 ct2_entry_t *oqe = qe; 1691 1692 /* 1693 * Make sure handle fields are clean 1694 */ 1695 cto->ct_syshandle = 0; 1696 cto->ct_header.rqs_seqno = 0; 1697 isp_prt(isp, ISP_LOGTDEBUG1, 1698 "CTIO2[%x] lun %d->iid%d flgs 0x%x", 1699 cto->ct_rxid, csio->ccb_h.target_lun, 1700 (int) cto->ct_iid, cto->ct_flags); 1701 /* 1702 * Get a new CTIO2 entry from the request queue. 1703 */ 1704 qe = (ct2_entry_t *) 1705 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1706 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1707 if (nxti == mp->optr) { 1708 isp_prt(isp, ISP_LOGWARN, 1709 "Queue Overflow in dma2_tgt_fc"); 1710 mp->error = MUSHERR_NOQENTRIES; 1711 return; 1712 } 1713 1714 /* 1715 * Now that we're done with the old CTIO2, 1716 * flush it out to the request queue. 1717 */ 1718 ISP_TDQE(isp, "tdma_mkfc", curi, cto); 1719 isp_put_ctio2(isp, cto, oqe); 1720 if (nth_ctio != 0) { 1721 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1722 QENTRY_LEN); 1723 } 1724 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1725 1726 /* 1727 * Reset some fields in the CTIO2 so we can reuse 1728 * for the next one we'll flush to the request 1729 * queue. 1730 */ 1731 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1732 cto->ct_header.rqs_entry_count = 1; 1733 cto->ct_header.rqs_flags = 0; 1734 cto->ct_status = 0; 1735 cto->ct_resid = 0; 1736 cto->ct_seg_count = 0; 1737 /* 1738 * Adjust the new relative offset by the amount which 1739 * is recorded in the data segment of the old CTIO2 we 1740 * just finished filling out. 1741 */ 1742 cto->ct_reloff += oxfrlen; 1743 MEMZERO(&cto->rsp, sizeof (cto->rsp)); 1744 } 1745 } 1746 *mp->nxtip = nxti; 1747 } 1748 #endif 1749 1750 static void dma2(void *, bus_dma_segment_t *, int, int); 1751 1752 static void 1753 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1754 { 1755 mush_t *mp; 1756 struct ispsoftc *isp; 1757 struct ccb_scsiio *csio; 1758 struct isp_pcisoftc *pci; 1759 bus_dmamap_t *dp; 1760 bus_dma_segment_t *eseg; 1761 ispreq_t *rq; 1762 int seglim, datalen; 1763 u_int16_t nxti; 1764 1765 mp = (mush_t *) arg; 1766 if (error) { 1767 mp->error = error; 1768 return; 1769 } 1770 1771 if (nseg < 1) { 1772 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1773 mp->error = EFAULT; 1774 return; 1775 } 1776 csio = mp->cmd_token; 1777 isp = mp->isp; 1778 rq = mp->rq; 1779 pci = (struct isp_pcisoftc *)mp->isp; 1780 dp = &pci->dmaps[isp_handle_index(rq->req_handle)]; 1781 nxti = *mp->nxtip; 1782 1783 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1784 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1785 } else { 1786 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1787 } 1788 1789 datalen = XS_XFRLEN(csio); 1790 1791 /* 1792 * We're passed an initial partially filled in entry that 1793 * has most fields filled in except for data transfer 1794 * related values. 1795 * 1796 * Our job is to fill in the initial request queue entry and 1797 * then to start allocating and filling in continuation entries 1798 * until we've covered the entire transfer. 1799 */ 1800 1801 if (IS_FC(isp)) { 1802 seglim = ISP_RQDSEG_T2; 1803 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1804 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1805 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1806 } else { 1807 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1808 } 1809 } else { 1810 if (csio->cdb_len > 12) { 1811 seglim = 0; 1812 } else { 1813 seglim = ISP_RQDSEG; 1814 } 1815 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1816 rq->req_flags |= REQFLAG_DATA_IN; 1817 } else { 1818 rq->req_flags |= REQFLAG_DATA_OUT; 1819 } 1820 } 1821 1822 eseg = dm_segs + nseg; 1823 1824 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1825 if (IS_FC(isp)) { 1826 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1827 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1828 dm_segs->ds_addr; 1829 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1830 dm_segs->ds_len; 1831 } else { 1832 rq->req_dataseg[rq->req_seg_count].ds_base = 1833 dm_segs->ds_addr; 1834 rq->req_dataseg[rq->req_seg_count].ds_count = 1835 dm_segs->ds_len; 1836 } 1837 datalen -= dm_segs->ds_len; 1838 rq->req_seg_count++; 1839 dm_segs++; 1840 } 1841 1842 while (datalen > 0 && dm_segs != eseg) { 1843 u_int16_t onxti; 1844 ispcontreq_t local, *crq = &local, *cqe; 1845 1846 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1847 onxti = nxti; 1848 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1849 if (nxti == mp->optr) { 1850 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1851 mp->error = MUSHERR_NOQENTRIES; 1852 return; 1853 } 1854 rq->req_header.rqs_entry_count++; 1855 MEMZERO((void *)crq, sizeof (*crq)); 1856 crq->req_header.rqs_entry_count = 1; 1857 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1858 1859 seglim = 0; 1860 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1861 crq->req_dataseg[seglim].ds_base = 1862 dm_segs->ds_addr; 1863 crq->req_dataseg[seglim].ds_count = 1864 dm_segs->ds_len; 1865 rq->req_seg_count++; 1866 dm_segs++; 1867 seglim++; 1868 datalen -= dm_segs->ds_len; 1869 } 1870 isp_put_cont_req(isp, crq, cqe); 1871 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1872 } 1873 *mp->nxtip = nxti; 1874 } 1875 1876 static int 1877 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1878 u_int16_t *nxtip, u_int16_t optr) 1879 { 1880 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1881 ispreq_t *qep; 1882 bus_dmamap_t *dp = NULL; 1883 mush_t mush, *mp; 1884 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1885 1886 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1887 #ifdef ISP_TARGET_MODE 1888 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1889 if (IS_FC(isp)) { 1890 eptr = tdma_mkfc; 1891 } else { 1892 eptr = tdma_mk; 1893 } 1894 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1895 (csio->dxfer_len == 0)) { 1896 mp = &mush; 1897 mp->isp = isp; 1898 mp->cmd_token = csio; 1899 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1900 mp->nxtip = nxtip; 1901 mp->optr = optr; 1902 mp->error = 0; 1903 (*eptr)(mp, NULL, 0, 0); 1904 goto mbxsync; 1905 } 1906 } else 1907 #endif 1908 eptr = dma2; 1909 1910 1911 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1912 (csio->dxfer_len == 0)) { 1913 rq->req_seg_count = 1; 1914 goto mbxsync; 1915 } 1916 1917 /* 1918 * Do a virtual grapevine step to collect info for 1919 * the callback dma allocation that we have to use... 1920 */ 1921 mp = &mush; 1922 mp->isp = isp; 1923 mp->cmd_token = csio; 1924 mp->rq = rq; 1925 mp->nxtip = nxtip; 1926 mp->optr = optr; 1927 mp->error = 0; 1928 1929 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1930 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1931 int error, s; 1932 dp = &pci->dmaps[isp_handle_index(rq->req_handle)]; 1933 s = splsoftvm(); 1934 error = bus_dmamap_load(pci->parent_dmat, *dp, 1935 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1936 if (error == EINPROGRESS) { 1937 bus_dmamap_unload(pci->parent_dmat, *dp); 1938 mp->error = EINVAL; 1939 isp_prt(isp, ISP_LOGERR, 1940 "deferred dma allocation not supported"); 1941 } else if (error && mp->error == 0) { 1942 #ifdef DIAGNOSTIC 1943 isp_prt(isp, ISP_LOGERR, 1944 "error %d in dma mapping code", error); 1945 #endif 1946 mp->error = error; 1947 } 1948 splx(s); 1949 } else { 1950 /* Pointer to physical buffer */ 1951 struct bus_dma_segment seg; 1952 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1953 seg.ds_len = csio->dxfer_len; 1954 (*eptr)(mp, &seg, 1, 0); 1955 } 1956 } else { 1957 struct bus_dma_segment *segs; 1958 1959 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1960 isp_prt(isp, ISP_LOGERR, 1961 "Physical segment pointers unsupported"); 1962 mp->error = EINVAL; 1963 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1964 isp_prt(isp, ISP_LOGERR, 1965 "Virtual segment addresses unsupported"); 1966 mp->error = EINVAL; 1967 } else { 1968 /* Just use the segments provided */ 1969 segs = (struct bus_dma_segment *) csio->data_ptr; 1970 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1971 } 1972 } 1973 if (mp->error) { 1974 int retval = CMD_COMPLETE; 1975 if (mp->error == MUSHERR_NOQENTRIES) { 1976 retval = CMD_EAGAIN; 1977 } else if (mp->error == EFBIG) { 1978 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1979 } else if (mp->error == EINVAL) { 1980 XS_SETERR(csio, CAM_REQ_INVALID); 1981 } else { 1982 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1983 } 1984 return (retval); 1985 } 1986 mbxsync: 1987 switch (rq->req_header.rqs_entry_type) { 1988 case RQSTYPE_REQUEST: 1989 isp_put_request(isp, rq, qep); 1990 break; 1991 case RQSTYPE_CMDONLY: 1992 isp_put_extended_request(isp, (ispextreq_t *)rq, 1993 (ispextreq_t *)qep); 1994 break; 1995 case RQSTYPE_T2RQS: 1996 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 1997 break; 1998 } 1999 return (CMD_QUEUED); 2000 } 2001 2002 static void 2003 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 2004 { 2005 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 2006 bus_dmamap_t *dp = &pci->dmaps[isp_handle_index(handle)]; 2007 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2008 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD); 2009 } else { 2010 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE); 2011 } 2012 bus_dmamap_unload(pci->parent_dmat, *dp); 2013 } 2014 2015 2016 static void 2017 isp_pci_reset1(struct ispsoftc *isp) 2018 { 2019 /* Make sure the BIOS is disabled */ 2020 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2021 /* and enable interrupts */ 2022 ENABLE_INTS(isp); 2023 } 2024 2025 static void 2026 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 2027 { 2028 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 2029 if (msg) 2030 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2031 else 2032 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2033 if (IS_SCSI(isp)) 2034 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2035 else 2036 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2037 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2038 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2039 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2040 2041 2042 if (IS_SCSI(isp)) { 2043 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2044 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2045 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2046 ISP_READ(isp, CDMA_FIFO_STS)); 2047 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2048 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2049 ISP_READ(isp, DDMA_FIFO_STS)); 2050 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2051 ISP_READ(isp, SXP_INTERRUPT), 2052 ISP_READ(isp, SXP_GROSS_ERR), 2053 ISP_READ(isp, SXP_PINS_CTRL)); 2054 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2055 } 2056 printf(" mbox regs: %x %x %x %x %x\n", 2057 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2058 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2059 ISP_READ(isp, OUTMAILBOX4)); 2060 printf(" PCI Status Command/Status=%x\n", 2061 pci_read_config(pci->pci_dev, PCIR_COMMAND, 1)); 2062 } 2063