1 /* $FreeBSD$ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/bus.h> 35 36 #include <pci/pcireg.h> 37 #include <pci/pcivar.h> 38 39 #include <machine/bus_memio.h> 40 #include <machine/bus_pio.h> 41 #include <machine/bus.h> 42 #include <machine/resource.h> 43 #include <sys/rman.h> 44 #include <sys/malloc.h> 45 46 #include <dev/isp/isp_freebsd.h> 47 48 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 49 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 50 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 51 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 52 static int 53 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 54 static int 55 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 56 static int isp_pci_mbxdma(struct ispsoftc *); 57 static int 58 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 59 static void 60 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 61 62 static void isp_pci_reset1(struct ispsoftc *); 63 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 64 65 #ifndef ISP_CODE_ORG 66 #define ISP_CODE_ORG 0x1000 67 #endif 68 69 static struct ispmdvec mdvec = { 70 isp_pci_rd_isr, 71 isp_pci_rd_reg, 72 isp_pci_wr_reg, 73 isp_pci_mbxdma, 74 isp_pci_dmasetup, 75 isp_pci_dmateardown, 76 NULL, 77 isp_pci_reset1, 78 isp_pci_dumpregs, 79 NULL, 80 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 81 }; 82 83 static struct ispmdvec mdvec_1080 = { 84 isp_pci_rd_isr, 85 isp_pci_rd_reg_1080, 86 isp_pci_wr_reg_1080, 87 isp_pci_mbxdma, 88 isp_pci_dmasetup, 89 isp_pci_dmateardown, 90 NULL, 91 isp_pci_reset1, 92 isp_pci_dumpregs, 93 NULL, 94 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 95 }; 96 97 static struct ispmdvec mdvec_12160 = { 98 isp_pci_rd_isr, 99 isp_pci_rd_reg_1080, 100 isp_pci_wr_reg_1080, 101 isp_pci_mbxdma, 102 isp_pci_dmasetup, 103 isp_pci_dmateardown, 104 NULL, 105 isp_pci_reset1, 106 isp_pci_dumpregs, 107 NULL, 108 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 109 }; 110 111 static struct ispmdvec mdvec_2100 = { 112 isp_pci_rd_isr, 113 isp_pci_rd_reg, 114 isp_pci_wr_reg, 115 isp_pci_mbxdma, 116 isp_pci_dmasetup, 117 isp_pci_dmateardown, 118 NULL, 119 isp_pci_reset1, 120 isp_pci_dumpregs 121 }; 122 123 static struct ispmdvec mdvec_2200 = { 124 isp_pci_rd_isr, 125 isp_pci_rd_reg, 126 isp_pci_wr_reg, 127 isp_pci_mbxdma, 128 isp_pci_dmasetup, 129 isp_pci_dmateardown, 130 NULL, 131 isp_pci_reset1, 132 isp_pci_dumpregs 133 }; 134 135 static struct ispmdvec mdvec_2300 = { 136 isp_pci_rd_isr_2300, 137 isp_pci_rd_reg, 138 isp_pci_wr_reg, 139 isp_pci_mbxdma, 140 isp_pci_dmasetup, 141 isp_pci_dmateardown, 142 NULL, 143 isp_pci_reset1, 144 isp_pci_dumpregs 145 }; 146 147 #ifndef PCIM_CMD_INVEN 148 #define PCIM_CMD_INVEN 0x10 149 #endif 150 #ifndef PCIM_CMD_BUSMASTEREN 151 #define PCIM_CMD_BUSMASTEREN 0x0004 152 #endif 153 #ifndef PCIM_CMD_PERRESPEN 154 #define PCIM_CMD_PERRESPEN 0x0040 155 #endif 156 #ifndef PCIM_CMD_SEREN 157 #define PCIM_CMD_SEREN 0x0100 158 #endif 159 160 #ifndef PCIR_COMMAND 161 #define PCIR_COMMAND 0x04 162 #endif 163 164 #ifndef PCIR_CACHELNSZ 165 #define PCIR_CACHELNSZ 0x0c 166 #endif 167 168 #ifndef PCIR_LATTIMER 169 #define PCIR_LATTIMER 0x0d 170 #endif 171 172 #ifndef PCIR_ROMADDR 173 #define PCIR_ROMADDR 0x30 174 #endif 175 176 #ifndef PCI_VENDOR_QLOGIC 177 #define PCI_VENDOR_QLOGIC 0x1077 178 #endif 179 180 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 181 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 182 #endif 183 184 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 185 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 186 #endif 187 188 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 189 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 190 #endif 191 192 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 193 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 194 #endif 195 196 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 197 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 198 #endif 199 200 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 201 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 202 #endif 203 204 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 205 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 206 #endif 207 208 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 209 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 210 #endif 211 212 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 213 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 214 #endif 215 216 #define PCI_QLOGIC_ISP1020 \ 217 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 218 219 #define PCI_QLOGIC_ISP1080 \ 220 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 221 222 #define PCI_QLOGIC_ISP12160 \ 223 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 224 225 #define PCI_QLOGIC_ISP1240 \ 226 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 227 228 #define PCI_QLOGIC_ISP1280 \ 229 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 230 231 #define PCI_QLOGIC_ISP2100 \ 232 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 233 234 #define PCI_QLOGIC_ISP2200 \ 235 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 236 237 #define PCI_QLOGIC_ISP2300 \ 238 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 239 240 #define PCI_QLOGIC_ISP2312 \ 241 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 242 243 /* 244 * Odd case for some AMI raid cards... We need to *not* attach to this. 245 */ 246 #define AMI_RAID_SUBVENDOR_ID 0x101e 247 248 #define IO_MAP_REG 0x10 249 #define MEM_MAP_REG 0x14 250 251 #define PCI_DFLT_LTNCY 0x40 252 #define PCI_DFLT_LNSZ 0x10 253 254 static int isp_pci_probe (device_t); 255 static int isp_pci_attach (device_t); 256 257 258 struct isp_pcisoftc { 259 struct ispsoftc pci_isp; 260 device_t pci_dev; 261 struct resource * pci_reg; 262 bus_space_tag_t pci_st; 263 bus_space_handle_t pci_sh; 264 void * ih; 265 int16_t pci_poff[_NREG_BLKS]; 266 bus_dma_tag_t dmat; 267 bus_dmamap_t *dmaps; 268 }; 269 ispfwfunc *isp_get_firmware_p = NULL; 270 271 static device_method_t isp_pci_methods[] = { 272 /* Device interface */ 273 DEVMETHOD(device_probe, isp_pci_probe), 274 DEVMETHOD(device_attach, isp_pci_attach), 275 { 0, 0 } 276 }; 277 static void isp_pci_intr(void *); 278 279 static driver_t isp_pci_driver = { 280 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 281 }; 282 static devclass_t isp_devclass; 283 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 284 MODULE_VERSION(isp, 1); 285 286 static int 287 isp_pci_probe(device_t dev) 288 { 289 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 290 case PCI_QLOGIC_ISP1020: 291 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 292 break; 293 case PCI_QLOGIC_ISP1080: 294 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 295 break; 296 case PCI_QLOGIC_ISP1240: 297 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 298 break; 299 case PCI_QLOGIC_ISP1280: 300 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 301 break; 302 case PCI_QLOGIC_ISP12160: 303 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 304 return (ENXIO); 305 } 306 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 307 break; 308 case PCI_QLOGIC_ISP2100: 309 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 310 break; 311 case PCI_QLOGIC_ISP2200: 312 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 313 break; 314 case PCI_QLOGIC_ISP2300: 315 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 316 break; 317 case PCI_QLOGIC_ISP2312: 318 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 319 break; 320 default: 321 return (ENXIO); 322 } 323 if (device_get_unit(dev) == 0 && bootverbose) { 324 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 325 "Core Version %d.%d\n", 326 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 327 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 328 } 329 /* 330 * XXXX: Here is where we might load the f/w module 331 * XXXX: (or increase a reference count to it). 332 */ 333 return (0); 334 } 335 336 static int 337 isp_pci_attach(device_t dev) 338 { 339 struct resource *regs, *irq; 340 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 341 u_int32_t data, cmd, linesz, psize, basetype; 342 struct isp_pcisoftc *pcs; 343 struct ispsoftc *isp = NULL; 344 struct ispmdvec *mdvp; 345 const char *sptr; 346 int locksetup = 0; 347 348 /* 349 * Figure out if we're supposed to skip this one. 350 * If we are, we actually go to ISP_ROLE_NONE. 351 */ 352 353 tval = 0; 354 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 355 "disable", &tval) == 0 && tval) { 356 device_printf(dev, "device is disabled\n"); 357 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 358 return (0); 359 } 360 361 role = 0; 362 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 363 "role", &role) == 0 && 364 ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) { 365 device_printf(dev, "setting role to 0x%x\n", role); 366 } else { 367 #ifdef ISP_TARGET_MODE 368 role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET; 369 #else 370 role = ISP_DEFAULT_ROLES; 371 #endif 372 } 373 374 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 375 if (pcs == NULL) { 376 device_printf(dev, "cannot allocate softc\n"); 377 return (ENOMEM); 378 } 379 380 /* 381 * Figure out which we should try first - memory mapping or i/o mapping? 382 */ 383 #ifdef __alpha__ 384 m1 = PCIM_CMD_MEMEN; 385 m2 = PCIM_CMD_PORTEN; 386 #else 387 m1 = PCIM_CMD_PORTEN; 388 m2 = PCIM_CMD_MEMEN; 389 #endif 390 391 tval = 0; 392 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 393 "prefer_iomap", &tval) == 0 && tval != 0) { 394 m1 = PCIM_CMD_PORTEN; 395 m2 = PCIM_CMD_MEMEN; 396 } 397 tval = 0; 398 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 399 "prefer_memmap", &tval) == 0 && tval != 0) { 400 m1 = PCIM_CMD_MEMEN; 401 m2 = PCIM_CMD_PORTEN; 402 } 403 404 linesz = PCI_DFLT_LNSZ; 405 irq = regs = NULL; 406 rgd = rtp = iqd = 0; 407 408 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 409 if (cmd & m1) { 410 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 411 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 412 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 413 } 414 if (regs == NULL && (cmd & m2)) { 415 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 416 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 417 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 418 } 419 if (regs == NULL) { 420 device_printf(dev, "unable to map any ports\n"); 421 goto bad; 422 } 423 if (bootverbose) 424 device_printf(dev, "using %s space register mapping\n", 425 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 426 pcs->pci_dev = dev; 427 pcs->pci_reg = regs; 428 pcs->pci_st = rman_get_bustag(regs); 429 pcs->pci_sh = rman_get_bushandle(regs); 430 431 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 432 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 433 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 434 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 435 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 436 mdvp = &mdvec; 437 basetype = ISP_HA_SCSI_UNKNOWN; 438 psize = sizeof (sdparam); 439 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 440 mdvp = &mdvec; 441 basetype = ISP_HA_SCSI_UNKNOWN; 442 psize = sizeof (sdparam); 443 } 444 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 445 mdvp = &mdvec_1080; 446 basetype = ISP_HA_SCSI_1080; 447 psize = sizeof (sdparam); 448 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 449 ISP1080_DMA_REGS_OFF; 450 } 451 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 452 mdvp = &mdvec_1080; 453 basetype = ISP_HA_SCSI_1240; 454 psize = 2 * sizeof (sdparam); 455 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 456 ISP1080_DMA_REGS_OFF; 457 } 458 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 459 mdvp = &mdvec_1080; 460 basetype = ISP_HA_SCSI_1280; 461 psize = 2 * sizeof (sdparam); 462 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 463 ISP1080_DMA_REGS_OFF; 464 } 465 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 466 mdvp = &mdvec_12160; 467 basetype = ISP_HA_SCSI_12160; 468 psize = 2 * sizeof (sdparam); 469 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 470 ISP1080_DMA_REGS_OFF; 471 } 472 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 473 mdvp = &mdvec_2100; 474 basetype = ISP_HA_FC_2100; 475 psize = sizeof (fcparam); 476 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 477 PCI_MBOX_REGS2100_OFF; 478 if (pci_get_revid(dev) < 3) { 479 /* 480 * XXX: Need to get the actual revision 481 * XXX: number of the 2100 FB. At any rate, 482 * XXX: lower cache line size for early revision 483 * XXX; boards. 484 */ 485 linesz = 1; 486 } 487 } 488 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 489 mdvp = &mdvec_2200; 490 basetype = ISP_HA_FC_2200; 491 psize = sizeof (fcparam); 492 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 493 PCI_MBOX_REGS2100_OFF; 494 } 495 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 496 mdvp = &mdvec_2300; 497 basetype = ISP_HA_FC_2300; 498 psize = sizeof (fcparam); 499 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 500 PCI_MBOX_REGS2300_OFF; 501 } 502 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 503 mdvp = &mdvec_2300; 504 basetype = ISP_HA_FC_2312; 505 psize = sizeof (fcparam); 506 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 507 PCI_MBOX_REGS2300_OFF; 508 } 509 isp = &pcs->pci_isp; 510 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 511 if (isp->isp_param == NULL) { 512 device_printf(dev, "cannot allocate parameter data\n"); 513 goto bad; 514 } 515 isp->isp_mdvec = mdvp; 516 isp->isp_type = basetype; 517 isp->isp_revision = pci_get_revid(dev); 518 isp->isp_role = role; 519 isp->isp_dev = dev; 520 521 /* 522 * Try and find firmware for this device. 523 */ 524 525 if (isp_get_firmware_p) { 526 int device = (int) pci_get_device(dev); 527 #ifdef ISP_TARGET_MODE 528 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 529 #else 530 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 531 #endif 532 } 533 534 /* 535 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 536 * are set. 537 */ 538 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 539 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 540 if (IS_2300(isp)) { /* per QLogic errata */ 541 cmd &= ~PCIM_CMD_INVEN; 542 } 543 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 544 545 /* 546 * Make sure the Cache Line Size register is set sensibly. 547 */ 548 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 549 if (data != linesz) { 550 data = PCI_DFLT_LNSZ; 551 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 552 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 553 } 554 555 /* 556 * Make sure the Latency Timer is sane. 557 */ 558 data = pci_read_config(dev, PCIR_LATTIMER, 1); 559 if (data < PCI_DFLT_LTNCY) { 560 data = PCI_DFLT_LTNCY; 561 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 562 pci_write_config(dev, PCIR_LATTIMER, data, 1); 563 } 564 565 /* 566 * Make sure we've disabled the ROM. 567 */ 568 data = pci_read_config(dev, PCIR_ROMADDR, 4); 569 data &= ~1; 570 pci_write_config(dev, PCIR_ROMADDR, data, 4); 571 572 iqd = 0; 573 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 574 1, RF_ACTIVE | RF_SHAREABLE); 575 if (irq == NULL) { 576 device_printf(dev, "could not allocate interrupt\n"); 577 goto bad; 578 } 579 580 tval = 0; 581 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 582 "fwload_disable", &tval) == 0 && tval != 0) { 583 isp->isp_confopts |= ISP_CFG_NORELOAD; 584 } 585 tval = 0; 586 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 587 "ignore_nvram", &tval) == 0 && tval != 0) { 588 isp->isp_confopts |= ISP_CFG_NONVRAM; 589 } 590 tval = 0; 591 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 592 "fullduplex", &tval) == 0 && tval != 0) { 593 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 594 } 595 #ifdef ISP_FW_CRASH_DUMP 596 tval = 0; 597 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 598 "fw_dump_enable", &tval) == 0 && tval != 0) { 599 size_t amt = 0; 600 if (IS_2200(isp)) { 601 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 602 } else if (IS_23XX(isp)) { 603 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 604 } 605 if (amt) { 606 FCPARAM(isp)->isp_dump_data = 607 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 608 } else { 609 device_printf(dev, 610 "f/w crash dumps not supported for this model\n"); 611 } 612 } 613 #endif 614 615 sptr = 0; 616 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 617 "topology", (const char **) &sptr) == 0 && sptr != 0) { 618 if (strcmp(sptr, "lport") == 0) { 619 isp->isp_confopts |= ISP_CFG_LPORT; 620 } else if (strcmp(sptr, "nport") == 0) { 621 isp->isp_confopts |= ISP_CFG_NPORT; 622 } else if (strcmp(sptr, "lport-only") == 0) { 623 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 624 } else if (strcmp(sptr, "nport-only") == 0) { 625 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 626 } 627 } 628 629 /* 630 * Because the resource_*_value functions can neither return 631 * 64 bit integer values, nor can they be directly coerced 632 * to interpret the right hand side of the assignment as 633 * you want them to interpret it, we have to force WWN 634 * hint replacement to specify WWN strings with a leading 635 * 'w' (e..g w50000000aaaa0001). Sigh. 636 */ 637 sptr = 0; 638 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 639 "portwwn", (const char **) &sptr); 640 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 641 char *eptr = 0; 642 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 643 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 644 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 645 isp->isp_osinfo.default_port_wwn = 0; 646 } else { 647 isp->isp_confopts |= ISP_CFG_OWNWWPN; 648 } 649 } 650 if (isp->isp_osinfo.default_port_wwn == 0) { 651 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 652 } 653 654 sptr = 0; 655 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 656 "nodewwn", (const char **) &sptr); 657 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 658 char *eptr = 0; 659 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 660 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 661 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 662 isp->isp_osinfo.default_node_wwn = 0; 663 } else { 664 isp->isp_confopts |= ISP_CFG_OWNWWNN; 665 } 666 } 667 if (isp->isp_osinfo.default_node_wwn == 0) { 668 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 669 } 670 671 isp_debug = 0; 672 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 673 "debug", &isp_debug); 674 675 /* Make sure the lock is set up. */ 676 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 677 locksetup++; 678 679 #ifdef ISP_SMPLOCK 680 #define INTR_FLAGS INTR_TYPE_CAM | INTR_MPSAFE | INTR_ENTROPY 681 #else 682 #define INTR_FLAGS INTR_TYPE_CAM | INTR_ENTROPY 683 #endif 684 if (bus_setup_intr(dev, irq, INTR_FLAGS, isp_pci_intr, isp, &pcs->ih)) { 685 device_printf(dev, "could not setup interrupt\n"); 686 goto bad; 687 } 688 689 /* 690 * Set up logging levels. 691 */ 692 if (isp_debug) { 693 isp->isp_dblev = isp_debug; 694 } else { 695 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 696 } 697 if (bootverbose) 698 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 699 700 /* 701 * Last minute checks... 702 */ 703 if (IS_2312(isp)) { 704 isp->isp_port = pci_get_function(dev); 705 } 706 707 /* 708 * Make sure we're in reset state. 709 */ 710 ISP_LOCK(isp); 711 isp_reset(isp); 712 if (isp->isp_state != ISP_RESETSTATE) { 713 ISP_UNLOCK(isp); 714 goto bad; 715 } 716 isp_init(isp); 717 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 718 isp_uninit(isp); 719 ISP_UNLOCK(isp); 720 goto bad; 721 } 722 isp_attach(isp); 723 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 724 isp_uninit(isp); 725 ISP_UNLOCK(isp); 726 goto bad; 727 } 728 /* 729 * XXXX: Here is where we might unload the f/w module 730 * XXXX: (or decrease the reference count to it). 731 */ 732 ISP_UNLOCK(isp); 733 return (0); 734 735 bad: 736 737 if (pcs && pcs->ih) { 738 (void) bus_teardown_intr(dev, irq, pcs->ih); 739 } 740 741 if (locksetup && isp) { 742 mtx_destroy(&isp->isp_osinfo.lock); 743 } 744 745 if (irq) { 746 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 747 } 748 749 750 if (regs) { 751 (void) bus_release_resource(dev, rtp, rgd, regs); 752 } 753 754 if (pcs) { 755 if (pcs->pci_isp.isp_param) 756 free(pcs->pci_isp.isp_param, M_DEVBUF); 757 free(pcs, M_DEVBUF); 758 } 759 760 /* 761 * XXXX: Here is where we might unload the f/w module 762 * XXXX: (or decrease the reference count to it). 763 */ 764 return (ENXIO); 765 } 766 767 static void 768 isp_pci_intr(void *arg) 769 { 770 struct ispsoftc *isp = arg; 771 u_int16_t isr, sema, mbox; 772 773 ISP_LOCK(isp); 774 isp->isp_intcnt++; 775 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 776 isp->isp_intbogus++; 777 } else { 778 int iok = isp->isp_osinfo.intsok; 779 isp->isp_osinfo.intsok = 0; 780 isp_intr(isp, isr, sema, mbox); 781 isp->isp_osinfo.intsok = iok; 782 } 783 ISP_UNLOCK(isp); 784 } 785 786 787 #define IspVirt2Off(a, x) \ 788 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 789 _BLK_REG_SHFT] + ((x) & 0xff)) 790 791 #define BXR2(pcs, off) \ 792 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 793 #define BXW2(pcs, off, v) \ 794 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 795 796 797 static INLINE int 798 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 799 { 800 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 801 u_int16_t val0, val1; 802 int i = 0; 803 804 do { 805 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 806 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 807 } while (val0 != val1 && ++i < 1000); 808 if (val0 != val1) { 809 return (1); 810 } 811 *rp = val0; 812 return (0); 813 } 814 815 static int 816 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 817 u_int16_t *semap, u_int16_t *mbp) 818 { 819 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 820 u_int16_t isr, sema; 821 822 if (IS_2100(isp)) { 823 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 824 return (0); 825 } 826 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 827 return (0); 828 } 829 } else { 830 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 831 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 832 } 833 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 834 isr &= INT_PENDING_MASK(isp); 835 sema &= BIU_SEMA_LOCK; 836 if (isr == 0 && sema == 0) { 837 return (0); 838 } 839 *isrp = isr; 840 if ((*semap = sema) != 0) { 841 if (IS_2100(isp)) { 842 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 843 return (0); 844 } 845 } else { 846 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 847 } 848 } 849 return (1); 850 } 851 852 static int 853 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 854 u_int16_t *semap, u_int16_t *mbox0p) 855 { 856 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 857 u_int32_t r2hisr; 858 859 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 860 *isrp = 0; 861 return (0); 862 } 863 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 864 IspVirt2Off(pcs, BIU_R2HSTSLO)); 865 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 866 if ((r2hisr & BIU_R2HST_INTR) == 0) { 867 *isrp = 0; 868 return (0); 869 } 870 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 871 case ISPR2HST_ROM_MBX_OK: 872 case ISPR2HST_ROM_MBX_FAIL: 873 case ISPR2HST_MBX_OK: 874 case ISPR2HST_MBX_FAIL: 875 case ISPR2HST_ASYNC_EVENT: 876 case ISPR2HST_RIO_16: 877 case ISPR2HST_FPOST: 878 case ISPR2HST_FPOST_CTIO: 879 *isrp = r2hisr & 0xffff; 880 *mbox0p = (r2hisr >> 16); 881 *semap = 1; 882 return (1); 883 case ISPR2HST_RSPQ_UPDATE: 884 *isrp = r2hisr & 0xffff; 885 *mbox0p = 0; 886 *semap = 0; 887 return (1); 888 default: 889 return (0); 890 } 891 } 892 893 static u_int16_t 894 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 895 { 896 u_int16_t rv; 897 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 898 int oldconf = 0; 899 900 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 901 /* 902 * We will assume that someone has paused the RISC processor. 903 */ 904 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 905 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 906 oldconf | BIU_PCI_CONF1_SXP); 907 } 908 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 909 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 910 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 911 } 912 return (rv); 913 } 914 915 static void 916 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 917 { 918 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 919 int oldconf = 0; 920 921 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 922 /* 923 * We will assume that someone has paused the RISC processor. 924 */ 925 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 926 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 927 oldconf | BIU_PCI_CONF1_SXP); 928 } 929 BXW2(pcs, IspVirt2Off(isp, regoff), val); 930 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 931 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 932 } 933 } 934 935 static u_int16_t 936 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 937 { 938 u_int16_t rv, oc = 0; 939 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 940 941 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 942 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 943 u_int16_t tc; 944 /* 945 * We will assume that someone has paused the RISC processor. 946 */ 947 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 948 tc = oc & ~BIU_PCI1080_CONF1_DMA; 949 if (regoff & SXP_BANK1_SELECT) 950 tc |= BIU_PCI1080_CONF1_SXP1; 951 else 952 tc |= BIU_PCI1080_CONF1_SXP0; 953 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 954 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 955 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 956 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 957 oc | BIU_PCI1080_CONF1_DMA); 958 } 959 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 960 if (oc) { 961 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 962 } 963 return (rv); 964 } 965 966 static void 967 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 968 { 969 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 970 int oc = 0; 971 972 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 973 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 974 u_int16_t tc; 975 /* 976 * We will assume that someone has paused the RISC processor. 977 */ 978 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 979 tc = oc & ~BIU_PCI1080_CONF1_DMA; 980 if (regoff & SXP_BANK1_SELECT) 981 tc |= BIU_PCI1080_CONF1_SXP1; 982 else 983 tc |= BIU_PCI1080_CONF1_SXP0; 984 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 985 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 986 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 987 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 988 oc | BIU_PCI1080_CONF1_DMA); 989 } 990 BXW2(pcs, IspVirt2Off(isp, regoff), val); 991 if (oc) { 992 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 993 } 994 } 995 996 997 struct imush { 998 struct ispsoftc *isp; 999 int error; 1000 }; 1001 1002 static void imc(void *, bus_dma_segment_t *, int, int); 1003 1004 static void 1005 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1006 { 1007 struct imush *imushp = (struct imush *) arg; 1008 if (error) { 1009 imushp->error = error; 1010 } else { 1011 struct ispsoftc *isp =imushp->isp; 1012 bus_addr_t addr = segs->ds_addr; 1013 1014 isp->isp_rquest_dma = addr; 1015 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1016 isp->isp_result_dma = addr; 1017 if (IS_FC(isp)) { 1018 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1019 FCPARAM(isp)->isp_scdma = addr; 1020 } 1021 } 1022 } 1023 1024 /* 1025 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1026 */ 1027 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1028 1029 static int 1030 isp_pci_mbxdma(struct ispsoftc *isp) 1031 { 1032 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1033 caddr_t base; 1034 u_int32_t len; 1035 int i, error, ns; 1036 bus_size_t bl; 1037 struct imush im; 1038 1039 /* 1040 * Already been here? If so, leave... 1041 */ 1042 if (isp->isp_rquest) { 1043 return (0); 1044 } 1045 1046 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1047 bl = BUS_SPACE_UNRESTRICTED; 1048 } else { 1049 bl = BUS_SPACE_MAXADDR_24BIT; 1050 } 1051 1052 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR, 1053 BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 1054 ISP_NSEGS, bl, 0, &pcs->dmat)) { 1055 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1056 return(1); 1057 } 1058 1059 1060 len = sizeof (XS_T **) * isp->isp_maxcmds; 1061 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1062 if (isp->isp_xflist == NULL) { 1063 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1064 return (1); 1065 } 1066 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1067 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1068 if (pcs->dmaps == NULL) { 1069 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1070 free(isp->isp_xflist, M_DEVBUF); 1071 return (1); 1072 } 1073 1074 /* 1075 * Allocate and map the request, result queues, plus FC scratch area. 1076 */ 1077 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1078 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1079 if (IS_FC(isp)) { 1080 len += ISP2100_SCRLEN; 1081 } 1082 1083 ns = (len / PAGE_SIZE) + 1; 1084 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, 0, BUS_SPACE_MAXADDR, 1085 BUS_SPACE_MAXADDR, NULL, NULL, len, ns, bl, 0, &isp->isp_cdmat)) { 1086 isp_prt(isp, ISP_LOGERR, 1087 "cannot create a dma tag for control spaces"); 1088 free(pcs->dmaps, M_DEVBUF); 1089 free(isp->isp_xflist, M_DEVBUF); 1090 return (1); 1091 } 1092 1093 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1094 &isp->isp_cdmap) != 0) { 1095 isp_prt(isp, ISP_LOGERR, 1096 "cannot allocate %d bytes of CCB memory", len); 1097 bus_dma_tag_destroy(isp->isp_cdmat); 1098 free(isp->isp_xflist, M_DEVBUF); 1099 free(pcs->dmaps, M_DEVBUF); 1100 return (1); 1101 } 1102 1103 for (i = 0; i < isp->isp_maxcmds; i++) { 1104 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1105 if (error) { 1106 isp_prt(isp, ISP_LOGERR, 1107 "error %d creating per-cmd DMA maps", error); 1108 while (--i >= 0) { 1109 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1110 } 1111 goto bad; 1112 } 1113 } 1114 1115 im.isp = isp; 1116 im.error = 0; 1117 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1118 if (im.error) { 1119 isp_prt(isp, ISP_LOGERR, 1120 "error %d loading dma map for control areas", im.error); 1121 goto bad; 1122 } 1123 1124 isp->isp_rquest = base; 1125 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1126 isp->isp_result = base; 1127 if (IS_FC(isp)) { 1128 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1129 FCPARAM(isp)->isp_scratch = base; 1130 } 1131 return (0); 1132 1133 bad: 1134 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1135 bus_dma_tag_destroy(isp->isp_cdmat); 1136 free(isp->isp_xflist, M_DEVBUF); 1137 free(pcs->dmaps, M_DEVBUF); 1138 isp->isp_rquest = NULL; 1139 return (1); 1140 } 1141 1142 typedef struct { 1143 struct ispsoftc *isp; 1144 void *cmd_token; 1145 void *rq; 1146 u_int16_t *nxtip; 1147 u_int16_t optr; 1148 u_int error; 1149 } mush_t; 1150 1151 #define MUSHERR_NOQENTRIES -2 1152 1153 #ifdef ISP_TARGET_MODE 1154 /* 1155 * We need to handle DMA for target mode differently from initiator mode. 1156 * 1157 * DMA mapping and construction and submission of CTIO Request Entries 1158 * and rendevous for completion are very tightly coupled because we start 1159 * out by knowing (per platform) how much data we have to move, but we 1160 * don't know, up front, how many DMA mapping segments will have to be used 1161 * cover that data, so we don't know how many CTIO Request Entries we 1162 * will end up using. Further, for performance reasons we may want to 1163 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1164 * 1165 * The standard vector still goes through isp_pci_dmasetup, but the callback 1166 * for the DMA mapping routines comes here instead with the whole transfer 1167 * mapped and a pointer to a partially filled in already allocated request 1168 * queue entry. We finish the job. 1169 */ 1170 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1171 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1172 1173 #define STATUS_WITH_DATA 1 1174 1175 static void 1176 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1177 { 1178 mush_t *mp; 1179 struct ccb_scsiio *csio; 1180 struct ispsoftc *isp; 1181 struct isp_pcisoftc *pcs; 1182 bus_dmamap_t *dp; 1183 ct_entry_t *cto, *qe; 1184 u_int8_t scsi_status; 1185 u_int16_t curi, nxti, handle; 1186 u_int32_t sflags; 1187 int32_t resid; 1188 int nth_ctio, nctios, send_status; 1189 1190 mp = (mush_t *) arg; 1191 if (error) { 1192 mp->error = error; 1193 return; 1194 } 1195 1196 isp = mp->isp; 1197 csio = mp->cmd_token; 1198 cto = mp->rq; 1199 curi = isp->isp_reqidx; 1200 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1201 1202 cto->ct_xfrlen = 0; 1203 cto->ct_seg_count = 0; 1204 cto->ct_header.rqs_entry_count = 1; 1205 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1206 1207 if (nseg == 0) { 1208 cto->ct_header.rqs_seqno = 1; 1209 isp_prt(isp, ISP_LOGTDEBUG1, 1210 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1211 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1212 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1213 cto->ct_scsi_status, cto->ct_resid); 1214 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1215 isp_put_ctio(isp, cto, qe); 1216 return; 1217 } 1218 1219 nctios = nseg / ISP_RQDSEG; 1220 if (nseg % ISP_RQDSEG) { 1221 nctios++; 1222 } 1223 1224 /* 1225 * Save syshandle, and potentially any SCSI status, which we'll 1226 * reinsert on the last CTIO we're going to send. 1227 */ 1228 1229 handle = cto->ct_syshandle; 1230 cto->ct_syshandle = 0; 1231 cto->ct_header.rqs_seqno = 0; 1232 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1233 1234 if (send_status) { 1235 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1236 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1237 /* 1238 * Preserve residual. 1239 */ 1240 resid = cto->ct_resid; 1241 1242 /* 1243 * Save actual SCSI status. 1244 */ 1245 scsi_status = cto->ct_scsi_status; 1246 1247 #ifndef STATUS_WITH_DATA 1248 sflags |= CT_NO_DATA; 1249 /* 1250 * We can't do a status at the same time as a data CTIO, so 1251 * we need to synthesize an extra CTIO at this level. 1252 */ 1253 nctios++; 1254 #endif 1255 } else { 1256 sflags = scsi_status = resid = 0; 1257 } 1258 1259 cto->ct_resid = 0; 1260 cto->ct_scsi_status = 0; 1261 1262 pcs = (struct isp_pcisoftc *)isp; 1263 dp = &pcs->dmaps[isp_handle_index(handle)]; 1264 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1265 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1266 } else { 1267 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1268 } 1269 1270 nxti = *mp->nxtip; 1271 1272 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1273 int seglim; 1274 1275 seglim = nseg; 1276 if (seglim) { 1277 int seg; 1278 1279 if (seglim > ISP_RQDSEG) 1280 seglim = ISP_RQDSEG; 1281 1282 for (seg = 0; seg < seglim; seg++, nseg--) { 1283 /* 1284 * Unlike normal initiator commands, we don't 1285 * do any swizzling here. 1286 */ 1287 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1288 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1289 cto->ct_xfrlen += dm_segs->ds_len; 1290 dm_segs++; 1291 } 1292 cto->ct_seg_count = seg; 1293 } else { 1294 /* 1295 * This case should only happen when we're sending an 1296 * extra CTIO with final status. 1297 */ 1298 if (send_status == 0) { 1299 isp_prt(isp, ISP_LOGWARN, 1300 "tdma_mk ran out of segments"); 1301 mp->error = EINVAL; 1302 return; 1303 } 1304 } 1305 1306 /* 1307 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1308 * ct_tagtype, and ct_timeout have been carried over 1309 * unchanged from what our caller had set. 1310 * 1311 * The dataseg fields and the seg_count fields we just got 1312 * through setting. The data direction we've preserved all 1313 * along and only clear it if we're now sending status. 1314 */ 1315 1316 if (nth_ctio == nctios - 1) { 1317 /* 1318 * We're the last in a sequence of CTIOs, so mark 1319 * this CTIO and save the handle to the CCB such that 1320 * when this CTIO completes we can free dma resources 1321 * and do whatever else we need to do to finish the 1322 * rest of the command. We *don't* give this to the 1323 * firmware to work on- the caller will do that. 1324 */ 1325 1326 cto->ct_syshandle = handle; 1327 cto->ct_header.rqs_seqno = 1; 1328 1329 if (send_status) { 1330 cto->ct_scsi_status = scsi_status; 1331 cto->ct_flags |= sflags; 1332 cto->ct_resid = resid; 1333 } 1334 if (send_status) { 1335 isp_prt(isp, ISP_LOGTDEBUG1, 1336 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1337 "scsi status %x resid %d", 1338 cto->ct_fwhandle, csio->ccb_h.target_lun, 1339 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1340 cto->ct_scsi_status, cto->ct_resid); 1341 } else { 1342 isp_prt(isp, ISP_LOGTDEBUG1, 1343 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1344 cto->ct_fwhandle, csio->ccb_h.target_lun, 1345 cto->ct_iid, cto->ct_tag_val, 1346 cto->ct_flags); 1347 } 1348 isp_put_ctio(isp, cto, qe); 1349 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1350 if (nctios > 1) { 1351 MEMORYBARRIER(isp, SYNC_REQUEST, 1352 curi, QENTRY_LEN); 1353 } 1354 } else { 1355 ct_entry_t *oqe = qe; 1356 1357 /* 1358 * Make sure syshandle fields are clean 1359 */ 1360 cto->ct_syshandle = 0; 1361 cto->ct_header.rqs_seqno = 0; 1362 1363 isp_prt(isp, ISP_LOGTDEBUG1, 1364 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1365 cto->ct_fwhandle, csio->ccb_h.target_lun, 1366 cto->ct_iid, cto->ct_flags); 1367 1368 /* 1369 * Get a new CTIO 1370 */ 1371 qe = (ct_entry_t *) 1372 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1373 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1374 if (nxti == mp->optr) { 1375 isp_prt(isp, ISP_LOGTDEBUG0, 1376 "Queue Overflow in tdma_mk"); 1377 mp->error = MUSHERR_NOQENTRIES; 1378 return; 1379 } 1380 1381 /* 1382 * Now that we're done with the old CTIO, 1383 * flush it out to the request queue. 1384 */ 1385 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1386 isp_put_ctio(isp, cto, oqe); 1387 if (nth_ctio != 0) { 1388 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1389 QENTRY_LEN); 1390 } 1391 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1392 1393 /* 1394 * Reset some fields in the CTIO so we can reuse 1395 * for the next one we'll flush to the request 1396 * queue. 1397 */ 1398 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1399 cto->ct_header.rqs_entry_count = 1; 1400 cto->ct_header.rqs_flags = 0; 1401 cto->ct_status = 0; 1402 cto->ct_scsi_status = 0; 1403 cto->ct_xfrlen = 0; 1404 cto->ct_resid = 0; 1405 cto->ct_seg_count = 0; 1406 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1407 } 1408 } 1409 *mp->nxtip = nxti; 1410 } 1411 1412 static void 1413 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1414 { 1415 mush_t *mp; 1416 u_int8_t sense[QLTM_SENSELEN]; 1417 struct ccb_scsiio *csio; 1418 struct ispsoftc *isp; 1419 struct isp_pcisoftc *pcs; 1420 bus_dmamap_t *dp; 1421 ct2_entry_t *cto, *qe; 1422 u_int16_t scsi_status, send_status, send_sense, handle; 1423 u_int16_t curi, nxti; 1424 int32_t resid; 1425 int nth_ctio, nctios; 1426 1427 mp = (mush_t *) arg; 1428 if (error) { 1429 mp->error = error; 1430 return; 1431 } 1432 1433 isp = mp->isp; 1434 csio = mp->cmd_token; 1435 cto = mp->rq; 1436 curi = isp->isp_reqidx; 1437 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1438 1439 if (nseg == 0) { 1440 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1441 isp_prt(isp, ISP_LOGWARN, 1442 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1443 "set (0x%x)", cto->ct_flags); 1444 mp->error = EINVAL; 1445 return; 1446 } 1447 cto->ct_header.rqs_entry_count = 1; 1448 cto->ct_header.rqs_seqno = 1; 1449 /* ct_syshandle contains the handle set by caller */ 1450 /* 1451 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1452 * flags to NO DATA and clear relative offset flags. 1453 * We preserve the ct_resid and the response area. 1454 */ 1455 cto->ct_flags |= CT2_NO_DATA; 1456 if (cto->ct_resid > 0) 1457 cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; 1458 else if (cto->ct_resid < 0) 1459 cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER; 1460 cto->ct_seg_count = 0; 1461 cto->ct_reloff = 0; 1462 isp_prt(isp, ISP_LOGTDEBUG1, 1463 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1464 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1465 cto->ct_iid, cto->ct_flags, cto->ct_status, 1466 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1467 isp_put_ctio2(isp, cto, qe); 1468 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1469 return; 1470 } 1471 1472 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1473 isp_prt(isp, ISP_LOGWARN, 1474 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1475 "(0x%x)", cto->ct_flags); 1476 mp->error = EINVAL; 1477 return; 1478 } 1479 1480 1481 nctios = nseg / ISP_RQDSEG_T2; 1482 if (nseg % ISP_RQDSEG_T2) { 1483 nctios++; 1484 } 1485 1486 /* 1487 * Save the handle, status, reloff, and residual. We'll reinsert the 1488 * handle into the last CTIO2 we're going to send, and reinsert status 1489 * and residual (and possibly sense data) if that's to be sent as well. 1490 * 1491 * We preserve ct_reloff and adjust it for each data CTIO2 we send past 1492 * the first one. This is needed so that the FCP DATA IUs being sent 1493 * out have the correct offset (they can arrive at the other end out 1494 * of order). 1495 */ 1496 1497 handle = cto->ct_syshandle; 1498 cto->ct_syshandle = 0; 1499 send_status = (cto->ct_flags & CT2_SENDSTATUS) != 0; 1500 1501 if (send_status) { 1502 cto->ct_flags &= ~(CT2_SENDSTATUS|CT2_CCINCR); 1503 1504 /* 1505 * Preserve residual. 1506 */ 1507 resid = cto->ct_resid; 1508 1509 /* 1510 * Save actual SCSI status. We'll reinsert the 1511 * CT2_SNSLEN_VALID later if appropriate. 1512 */ 1513 scsi_status = cto->rsp.m0.ct_scsi_status & 0xff; 1514 send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID; 1515 1516 /* 1517 * If we're sending status and have a CHECK CONDTION and 1518 * have sense data, we send one more CTIO2 with just the 1519 * status and sense data. The upper layers have stashed 1520 * the sense data in the dataseg structure for us. 1521 */ 1522 1523 if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND && 1524 send_sense) { 1525 bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN); 1526 nctios++; 1527 } 1528 } else { 1529 scsi_status = send_sense = resid = 0; 1530 } 1531 1532 cto->ct_resid = 0; 1533 cto->rsp.m0.ct_scsi_status = 0; 1534 MEMZERO(&cto->rsp, sizeof (cto->rsp)); 1535 1536 pcs = (struct isp_pcisoftc *)isp; 1537 dp = &pcs->dmaps[isp_handle_index(handle)]; 1538 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1539 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1540 } else { 1541 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1542 } 1543 1544 nxti = *mp->nxtip; 1545 1546 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1547 u_int32_t oxfrlen; 1548 int seglim; 1549 1550 seglim = nseg; 1551 if (seglim) { 1552 int seg; 1553 if (seglim > ISP_RQDSEG_T2) 1554 seglim = ISP_RQDSEG_T2; 1555 for (seg = 0; seg < seglim; seg++) { 1556 cto->rsp.m0.ct_dataseg[seg].ds_base = 1557 dm_segs->ds_addr; 1558 cto->rsp.m0.ct_dataseg[seg].ds_count = 1559 dm_segs->ds_len; 1560 cto->rsp.m0.ct_xfrlen += dm_segs->ds_len; 1561 dm_segs++; 1562 } 1563 cto->ct_seg_count = seg; 1564 oxfrlen = cto->rsp.m0.ct_xfrlen; 1565 } else { 1566 /* 1567 * This case should only happen when we're sending a 1568 * synthesized MODE1 final status with sense data. 1569 */ 1570 if (send_sense == 0) { 1571 isp_prt(isp, ISP_LOGWARN, 1572 "dma2_tgt_fc ran out of segments, " 1573 "no SENSE DATA"); 1574 mp->error = EINVAL; 1575 return; 1576 } 1577 oxfrlen = 0; 1578 } 1579 1580 1581 /* 1582 * At this point, the fields ct_lun, ct_iid, ct_rxid, 1583 * ct_timeout have been carried over unchanged from what 1584 * our caller had set. 1585 * 1586 * The field ct_reloff is either what the caller set, or 1587 * what we've added to below. 1588 * 1589 * The dataseg fields and the seg_count fields we just got 1590 * through setting. The data direction we've preserved all 1591 * along and only clear it if we're sending a MODE1 status 1592 * as the last CTIO. 1593 * 1594 */ 1595 1596 if (nth_ctio == nctios - 1) { 1597 /* 1598 * We're the last in a sequence of CTIO2s, so mark this 1599 * CTIO2 and save the handle to the CCB such that when 1600 * this CTIO2 completes we can free dma resources and 1601 * do whatever else we need to do to finish the rest 1602 * of the command. 1603 */ 1604 1605 cto->ct_syshandle = handle; 1606 cto->ct_header.rqs_seqno = 1; 1607 1608 if (send_status) { 1609 /* 1610 * Get 'real' residual and set flags based 1611 * on it. 1612 */ 1613 cto->ct_resid = resid; 1614 if (send_sense) { 1615 MEMCPY(cto->rsp.m1.ct_resp, sense, 1616 QLTM_SENSELEN); 1617 cto->rsp.m1.ct_senselen = 1618 QLTM_SENSELEN; 1619 scsi_status |= CT2_SNSLEN_VALID; 1620 cto->rsp.m1.ct_scsi_status = 1621 scsi_status; 1622 cto->ct_flags &= CT2_FLAG_MMASK; 1623 cto->ct_flags |= CT2_FLAG_MODE1 | 1624 CT2_NO_DATA | CT2_SENDSTATUS | 1625 CT2_CCINCR; 1626 if (cto->ct_resid > 0) 1627 cto->rsp.m1.ct_scsi_status |= 1628 CT2_DATA_UNDER; 1629 else if (cto->ct_resid < 0) 1630 cto->rsp.m1.ct_scsi_status |= 1631 CT2_DATA_OVER; 1632 } else { 1633 cto->rsp.m0.ct_scsi_status = 1634 scsi_status; 1635 cto->ct_flags |= 1636 CT2_SENDSTATUS | CT2_CCINCR; 1637 if (cto->ct_resid > 0) 1638 cto->rsp.m0.ct_scsi_status |= 1639 CT2_DATA_UNDER; 1640 else if (cto->ct_resid < 0) 1641 cto->rsp.m0.ct_scsi_status |= 1642 CT2_DATA_OVER; 1643 } 1644 } 1645 isp_prt(isp, ISP_LOGTDEBUG1, 1646 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x" 1647 " ssts 0x%x res %d", cto->ct_rxid, 1648 csio->ccb_h.target_lun, (int) cto->ct_iid, 1649 cto->ct_flags, cto->ct_status, 1650 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1651 isp_put_ctio2(isp, cto, qe); 1652 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1653 if (nctios > 1) { 1654 MEMORYBARRIER(isp, SYNC_REQUEST, 1655 curi, QENTRY_LEN); 1656 } 1657 } else { 1658 ct2_entry_t *oqe = qe; 1659 1660 /* 1661 * Make sure handle fields are clean 1662 */ 1663 cto->ct_syshandle = 0; 1664 cto->ct_header.rqs_seqno = 0; 1665 isp_prt(isp, ISP_LOGTDEBUG1, 1666 "CTIO2[%x] lun %d->iid%d flgs 0x%x", 1667 cto->ct_rxid, csio->ccb_h.target_lun, 1668 (int) cto->ct_iid, cto->ct_flags); 1669 /* 1670 * Get a new CTIO2 entry from the request queue. 1671 */ 1672 qe = (ct2_entry_t *) 1673 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1674 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1675 if (nxti == mp->optr) { 1676 isp_prt(isp, ISP_LOGWARN, 1677 "Queue Overflow in dma2_tgt_fc"); 1678 mp->error = MUSHERR_NOQENTRIES; 1679 return; 1680 } 1681 1682 /* 1683 * Now that we're done with the old CTIO2, 1684 * flush it out to the request queue. 1685 */ 1686 ISP_TDQE(isp, "tdma_mkfc", curi, cto); 1687 isp_put_ctio2(isp, cto, oqe); 1688 if (nth_ctio != 0) { 1689 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1690 QENTRY_LEN); 1691 } 1692 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1693 1694 /* 1695 * Reset some fields in the CTIO2 so we can reuse 1696 * for the next one we'll flush to the request 1697 * queue. 1698 */ 1699 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1700 cto->ct_header.rqs_entry_count = 1; 1701 cto->ct_header.rqs_flags = 0; 1702 cto->ct_status = 0; 1703 cto->ct_resid = 0; 1704 cto->ct_seg_count = 0; 1705 /* 1706 * Adjust the new relative offset by the amount which 1707 * is recorded in the data segment of the old CTIO2 we 1708 * just finished filling out. 1709 */ 1710 cto->ct_reloff += oxfrlen; 1711 MEMZERO(&cto->rsp, sizeof (cto->rsp)); 1712 } 1713 } 1714 *mp->nxtip = nxti; 1715 } 1716 #endif 1717 1718 static void dma2(void *, bus_dma_segment_t *, int, int); 1719 1720 static void 1721 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1722 { 1723 mush_t *mp; 1724 struct ispsoftc *isp; 1725 struct ccb_scsiio *csio; 1726 struct isp_pcisoftc *pcs; 1727 bus_dmamap_t *dp; 1728 bus_dma_segment_t *eseg; 1729 ispreq_t *rq; 1730 int seglim, datalen; 1731 u_int16_t nxti; 1732 1733 mp = (mush_t *) arg; 1734 if (error) { 1735 mp->error = error; 1736 return; 1737 } 1738 1739 if (nseg < 1) { 1740 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1741 mp->error = EFAULT; 1742 return; 1743 } 1744 csio = mp->cmd_token; 1745 isp = mp->isp; 1746 rq = mp->rq; 1747 pcs = (struct isp_pcisoftc *)mp->isp; 1748 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1749 nxti = *mp->nxtip; 1750 1751 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1752 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1753 } else { 1754 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1755 } 1756 1757 datalen = XS_XFRLEN(csio); 1758 1759 /* 1760 * We're passed an initial partially filled in entry that 1761 * has most fields filled in except for data transfer 1762 * related values. 1763 * 1764 * Our job is to fill in the initial request queue entry and 1765 * then to start allocating and filling in continuation entries 1766 * until we've covered the entire transfer. 1767 */ 1768 1769 if (IS_FC(isp)) { 1770 seglim = ISP_RQDSEG_T2; 1771 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1772 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1773 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1774 } else { 1775 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1776 } 1777 } else { 1778 if (csio->cdb_len > 12) { 1779 seglim = 0; 1780 } else { 1781 seglim = ISP_RQDSEG; 1782 } 1783 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1784 rq->req_flags |= REQFLAG_DATA_IN; 1785 } else { 1786 rq->req_flags |= REQFLAG_DATA_OUT; 1787 } 1788 } 1789 1790 eseg = dm_segs + nseg; 1791 1792 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1793 if (IS_FC(isp)) { 1794 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1795 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1796 dm_segs->ds_addr; 1797 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1798 dm_segs->ds_len; 1799 } else { 1800 rq->req_dataseg[rq->req_seg_count].ds_base = 1801 dm_segs->ds_addr; 1802 rq->req_dataseg[rq->req_seg_count].ds_count = 1803 dm_segs->ds_len; 1804 } 1805 datalen -= dm_segs->ds_len; 1806 rq->req_seg_count++; 1807 dm_segs++; 1808 } 1809 1810 while (datalen > 0 && dm_segs != eseg) { 1811 u_int16_t onxti; 1812 ispcontreq_t local, *crq = &local, *cqe; 1813 1814 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1815 onxti = nxti; 1816 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1817 if (nxti == mp->optr) { 1818 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1819 mp->error = MUSHERR_NOQENTRIES; 1820 return; 1821 } 1822 rq->req_header.rqs_entry_count++; 1823 MEMZERO((void *)crq, sizeof (*crq)); 1824 crq->req_header.rqs_entry_count = 1; 1825 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1826 1827 seglim = 0; 1828 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1829 crq->req_dataseg[seglim].ds_base = 1830 dm_segs->ds_addr; 1831 crq->req_dataseg[seglim].ds_count = 1832 dm_segs->ds_len; 1833 rq->req_seg_count++; 1834 dm_segs++; 1835 seglim++; 1836 datalen -= dm_segs->ds_len; 1837 } 1838 isp_put_cont_req(isp, crq, cqe); 1839 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1840 } 1841 *mp->nxtip = nxti; 1842 } 1843 1844 static int 1845 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1846 u_int16_t *nxtip, u_int16_t optr) 1847 { 1848 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1849 ispreq_t *qep; 1850 bus_dmamap_t *dp = NULL; 1851 mush_t mush, *mp; 1852 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1853 1854 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1855 #ifdef ISP_TARGET_MODE 1856 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1857 if (IS_FC(isp)) { 1858 eptr = tdma_mkfc; 1859 } else { 1860 eptr = tdma_mk; 1861 } 1862 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1863 (csio->dxfer_len == 0)) { 1864 mp = &mush; 1865 mp->isp = isp; 1866 mp->cmd_token = csio; 1867 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1868 mp->nxtip = nxtip; 1869 mp->optr = optr; 1870 mp->error = 0; 1871 (*eptr)(mp, NULL, 0, 0); 1872 goto mbxsync; 1873 } 1874 } else 1875 #endif 1876 eptr = dma2; 1877 1878 1879 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1880 (csio->dxfer_len == 0)) { 1881 rq->req_seg_count = 1; 1882 goto mbxsync; 1883 } 1884 1885 /* 1886 * Do a virtual grapevine step to collect info for 1887 * the callback dma allocation that we have to use... 1888 */ 1889 mp = &mush; 1890 mp->isp = isp; 1891 mp->cmd_token = csio; 1892 mp->rq = rq; 1893 mp->nxtip = nxtip; 1894 mp->optr = optr; 1895 mp->error = 0; 1896 1897 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1898 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1899 int error, s; 1900 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1901 s = splsoftvm(); 1902 error = bus_dmamap_load(pcs->dmat, *dp, 1903 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1904 if (error == EINPROGRESS) { 1905 bus_dmamap_unload(pcs->dmat, *dp); 1906 mp->error = EINVAL; 1907 isp_prt(isp, ISP_LOGERR, 1908 "deferred dma allocation not supported"); 1909 } else if (error && mp->error == 0) { 1910 #ifdef DIAGNOSTIC 1911 isp_prt(isp, ISP_LOGERR, 1912 "error %d in dma mapping code", error); 1913 #endif 1914 mp->error = error; 1915 } 1916 splx(s); 1917 } else { 1918 /* Pointer to physical buffer */ 1919 struct bus_dma_segment seg; 1920 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1921 seg.ds_len = csio->dxfer_len; 1922 (*eptr)(mp, &seg, 1, 0); 1923 } 1924 } else { 1925 struct bus_dma_segment *segs; 1926 1927 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1928 isp_prt(isp, ISP_LOGERR, 1929 "Physical segment pointers unsupported"); 1930 mp->error = EINVAL; 1931 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1932 isp_prt(isp, ISP_LOGERR, 1933 "Virtual segment addresses unsupported"); 1934 mp->error = EINVAL; 1935 } else { 1936 /* Just use the segments provided */ 1937 segs = (struct bus_dma_segment *) csio->data_ptr; 1938 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1939 } 1940 } 1941 if (mp->error) { 1942 int retval = CMD_COMPLETE; 1943 if (mp->error == MUSHERR_NOQENTRIES) { 1944 retval = CMD_EAGAIN; 1945 } else if (mp->error == EFBIG) { 1946 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1947 } else if (mp->error == EINVAL) { 1948 XS_SETERR(csio, CAM_REQ_INVALID); 1949 } else { 1950 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1951 } 1952 return (retval); 1953 } 1954 mbxsync: 1955 switch (rq->req_header.rqs_entry_type) { 1956 case RQSTYPE_REQUEST: 1957 isp_put_request(isp, rq, qep); 1958 break; 1959 case RQSTYPE_CMDONLY: 1960 isp_put_extended_request(isp, (ispextreq_t *)rq, 1961 (ispextreq_t *)qep); 1962 break; 1963 case RQSTYPE_T2RQS: 1964 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 1965 break; 1966 } 1967 return (CMD_QUEUED); 1968 } 1969 1970 static void 1971 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 1972 { 1973 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1974 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 1975 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1976 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 1977 } else { 1978 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 1979 } 1980 bus_dmamap_unload(pcs->dmat, *dp); 1981 } 1982 1983 1984 static void 1985 isp_pci_reset1(struct ispsoftc *isp) 1986 { 1987 /* Make sure the BIOS is disabled */ 1988 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1989 /* and enable interrupts */ 1990 ENABLE_INTS(isp); 1991 } 1992 1993 static void 1994 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1995 { 1996 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1997 if (msg) 1998 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1999 else 2000 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2001 if (IS_SCSI(isp)) 2002 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2003 else 2004 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2005 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2006 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2007 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2008 2009 2010 if (IS_SCSI(isp)) { 2011 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2012 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2013 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2014 ISP_READ(isp, CDMA_FIFO_STS)); 2015 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2016 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2017 ISP_READ(isp, DDMA_FIFO_STS)); 2018 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2019 ISP_READ(isp, SXP_INTERRUPT), 2020 ISP_READ(isp, SXP_GROSS_ERR), 2021 ISP_READ(isp, SXP_PINS_CTRL)); 2022 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2023 } 2024 printf(" mbox regs: %x %x %x %x %x\n", 2025 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2026 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2027 ISP_READ(isp, OUTMAILBOX4)); 2028 printf(" PCI Status Command/Status=%x\n", 2029 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2030 } 2031