1 /* $FreeBSD$ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/bus.h> 35 36 #include <pci/pcireg.h> 37 #include <pci/pcivar.h> 38 39 #include <machine/bus_memio.h> 40 #include <machine/bus_pio.h> 41 #include <machine/bus.h> 42 #include <machine/resource.h> 43 #include <sys/rman.h> 44 #include <sys/malloc.h> 45 46 #include <dev/isp/isp_freebsd.h> 47 48 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 49 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 50 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 51 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 52 static int 53 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 54 static int 55 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 56 static int isp_pci_mbxdma(struct ispsoftc *); 57 static int 58 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 59 static void 60 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 61 62 static void isp_pci_reset1(struct ispsoftc *); 63 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 64 65 #ifndef ISP_CODE_ORG 66 #define ISP_CODE_ORG 0x1000 67 #endif 68 69 static struct ispmdvec mdvec = { 70 isp_pci_rd_isr, 71 isp_pci_rd_reg, 72 isp_pci_wr_reg, 73 isp_pci_mbxdma, 74 isp_pci_dmasetup, 75 isp_pci_dmateardown, 76 NULL, 77 isp_pci_reset1, 78 isp_pci_dumpregs, 79 NULL, 80 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 81 }; 82 83 static struct ispmdvec mdvec_1080 = { 84 isp_pci_rd_isr, 85 isp_pci_rd_reg_1080, 86 isp_pci_wr_reg_1080, 87 isp_pci_mbxdma, 88 isp_pci_dmasetup, 89 isp_pci_dmateardown, 90 NULL, 91 isp_pci_reset1, 92 isp_pci_dumpregs, 93 NULL, 94 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 95 }; 96 97 static struct ispmdvec mdvec_12160 = { 98 isp_pci_rd_isr, 99 isp_pci_rd_reg_1080, 100 isp_pci_wr_reg_1080, 101 isp_pci_mbxdma, 102 isp_pci_dmasetup, 103 isp_pci_dmateardown, 104 NULL, 105 isp_pci_reset1, 106 isp_pci_dumpregs, 107 NULL, 108 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 109 }; 110 111 static struct ispmdvec mdvec_2100 = { 112 isp_pci_rd_isr, 113 isp_pci_rd_reg, 114 isp_pci_wr_reg, 115 isp_pci_mbxdma, 116 isp_pci_dmasetup, 117 isp_pci_dmateardown, 118 NULL, 119 isp_pci_reset1, 120 isp_pci_dumpregs 121 }; 122 123 static struct ispmdvec mdvec_2200 = { 124 isp_pci_rd_isr, 125 isp_pci_rd_reg, 126 isp_pci_wr_reg, 127 isp_pci_mbxdma, 128 isp_pci_dmasetup, 129 isp_pci_dmateardown, 130 NULL, 131 isp_pci_reset1, 132 isp_pci_dumpregs 133 }; 134 135 static struct ispmdvec mdvec_2300 = { 136 isp_pci_rd_isr_2300, 137 isp_pci_rd_reg, 138 isp_pci_wr_reg, 139 isp_pci_mbxdma, 140 isp_pci_dmasetup, 141 isp_pci_dmateardown, 142 NULL, 143 isp_pci_reset1, 144 isp_pci_dumpregs 145 }; 146 147 #ifndef PCIM_CMD_INVEN 148 #define PCIM_CMD_INVEN 0x10 149 #endif 150 #ifndef PCIM_CMD_BUSMASTEREN 151 #define PCIM_CMD_BUSMASTEREN 0x0004 152 #endif 153 #ifndef PCIM_CMD_PERRESPEN 154 #define PCIM_CMD_PERRESPEN 0x0040 155 #endif 156 #ifndef PCIM_CMD_SEREN 157 #define PCIM_CMD_SEREN 0x0100 158 #endif 159 160 #ifndef PCIR_COMMAND 161 #define PCIR_COMMAND 0x04 162 #endif 163 164 #ifndef PCIR_CACHELNSZ 165 #define PCIR_CACHELNSZ 0x0c 166 #endif 167 168 #ifndef PCIR_LATTIMER 169 #define PCIR_LATTIMER 0x0d 170 #endif 171 172 #ifndef PCIR_ROMADDR 173 #define PCIR_ROMADDR 0x30 174 #endif 175 176 #ifndef PCI_VENDOR_QLOGIC 177 #define PCI_VENDOR_QLOGIC 0x1077 178 #endif 179 180 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 181 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 182 #endif 183 184 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 185 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 186 #endif 187 188 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 189 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 190 #endif 191 192 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 193 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 194 #endif 195 196 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 197 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 198 #endif 199 200 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 201 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 202 #endif 203 204 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 205 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 206 #endif 207 208 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 209 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 210 #endif 211 212 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 213 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 214 #endif 215 216 #define PCI_QLOGIC_ISP1020 \ 217 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 218 219 #define PCI_QLOGIC_ISP1080 \ 220 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 221 222 #define PCI_QLOGIC_ISP12160 \ 223 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 224 225 #define PCI_QLOGIC_ISP1240 \ 226 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 227 228 #define PCI_QLOGIC_ISP1280 \ 229 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 230 231 #define PCI_QLOGIC_ISP2100 \ 232 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 233 234 #define PCI_QLOGIC_ISP2200 \ 235 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 236 237 #define PCI_QLOGIC_ISP2300 \ 238 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 239 240 #define PCI_QLOGIC_ISP2312 \ 241 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 242 243 /* 244 * Odd case for some AMI raid cards... We need to *not* attach to this. 245 */ 246 #define AMI_RAID_SUBVENDOR_ID 0x101e 247 248 #define IO_MAP_REG 0x10 249 #define MEM_MAP_REG 0x14 250 251 #define PCI_DFLT_LTNCY 0x40 252 #define PCI_DFLT_LNSZ 0x10 253 254 static int isp_pci_probe (device_t); 255 static int isp_pci_attach (device_t); 256 257 struct isp_pcisoftc { 258 struct ispsoftc pci_isp; 259 device_t pci_dev; 260 struct resource * pci_reg; 261 bus_space_tag_t pci_st; 262 bus_space_handle_t pci_sh; 263 void * ih; 264 int16_t pci_poff[_NREG_BLKS]; 265 bus_dma_tag_t parent_dmat; 266 bus_dma_tag_t cntrol_dmat; 267 bus_dmamap_t cntrol_dmap; 268 bus_dmamap_t *dmaps; 269 }; 270 ispfwfunc *isp_get_firmware_p = NULL; 271 272 static device_method_t isp_pci_methods[] = { 273 /* Device interface */ 274 DEVMETHOD(device_probe, isp_pci_probe), 275 DEVMETHOD(device_attach, isp_pci_attach), 276 { 0, 0 } 277 }; 278 static void isp_pci_intr(void *); 279 280 static driver_t isp_pci_driver = { 281 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 282 }; 283 static devclass_t isp_devclass; 284 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 285 MODULE_VERSION(isp, 1); 286 287 static int 288 isp_pci_probe(device_t dev) 289 { 290 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 291 case PCI_QLOGIC_ISP1020: 292 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 293 break; 294 case PCI_QLOGIC_ISP1080: 295 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 296 break; 297 case PCI_QLOGIC_ISP1240: 298 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 299 break; 300 case PCI_QLOGIC_ISP1280: 301 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 302 break; 303 case PCI_QLOGIC_ISP12160: 304 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 305 return (ENXIO); 306 } 307 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 308 break; 309 case PCI_QLOGIC_ISP2100: 310 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 311 break; 312 case PCI_QLOGIC_ISP2200: 313 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 314 break; 315 case PCI_QLOGIC_ISP2300: 316 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 317 break; 318 case PCI_QLOGIC_ISP2312: 319 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 320 break; 321 default: 322 return (ENXIO); 323 } 324 if (device_get_unit(dev) == 0 && bootverbose) { 325 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 326 "Core Version %d.%d\n", 327 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 328 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 329 } 330 /* 331 * XXXX: Here is where we might load the f/w module 332 * XXXX: (or increase a reference count to it). 333 */ 334 return (0); 335 } 336 337 static int 338 isp_pci_attach(device_t dev) 339 { 340 struct resource *regs, *irq; 341 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 342 u_int32_t data, cmd, linesz, psize, basetype; 343 struct isp_pcisoftc *pcs; 344 struct ispsoftc *isp = NULL; 345 struct ispmdvec *mdvp; 346 bus_size_t lim; 347 const char *sptr; 348 int locksetup = 0; 349 350 /* 351 * Figure out if we're supposed to skip this one. 352 * If we are, we actually go to ISP_ROLE_NONE. 353 */ 354 355 tval = 0; 356 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 357 "disable", &tval) == 0 && tval) { 358 device_printf(dev, "device is disabled\n"); 359 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 360 return (0); 361 } 362 363 role = 0; 364 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 365 "role", &role) == 0 && 366 ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) { 367 device_printf(dev, "setting role to 0x%x\n", role); 368 } else { 369 #ifdef ISP_TARGET_MODE 370 role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET; 371 #else 372 role = ISP_DEFAULT_ROLES; 373 #endif 374 } 375 376 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 377 if (pcs == NULL) { 378 device_printf(dev, "cannot allocate softc\n"); 379 return (ENOMEM); 380 } 381 382 /* 383 * Figure out which we should try first - memory mapping or i/o mapping? 384 */ 385 #ifdef __alpha__ 386 m1 = PCIM_CMD_MEMEN; 387 m2 = PCIM_CMD_PORTEN; 388 #else 389 m1 = PCIM_CMD_PORTEN; 390 m2 = PCIM_CMD_MEMEN; 391 #endif 392 393 tval = 0; 394 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 395 "prefer_iomap", &tval) == 0 && tval != 0) { 396 m1 = PCIM_CMD_PORTEN; 397 m2 = PCIM_CMD_MEMEN; 398 } 399 tval = 0; 400 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 401 "prefer_memmap", &tval) == 0 && tval != 0) { 402 m1 = PCIM_CMD_MEMEN; 403 m2 = PCIM_CMD_PORTEN; 404 } 405 406 linesz = PCI_DFLT_LNSZ; 407 irq = regs = NULL; 408 rgd = rtp = iqd = 0; 409 410 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 411 if (cmd & m1) { 412 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 413 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 414 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 415 } 416 if (regs == NULL && (cmd & m2)) { 417 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 418 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 419 regs = bus_alloc_resource(dev, rtp, &rgd, 0, ~0, 1, RF_ACTIVE); 420 } 421 if (regs == NULL) { 422 device_printf(dev, "unable to map any ports\n"); 423 goto bad; 424 } 425 if (bootverbose) 426 device_printf(dev, "using %s space register mapping\n", 427 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 428 pcs->pci_dev = dev; 429 pcs->pci_reg = regs; 430 pcs->pci_st = rman_get_bustag(regs); 431 pcs->pci_sh = rman_get_bushandle(regs); 432 433 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 434 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 435 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 436 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 437 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 438 mdvp = &mdvec; 439 basetype = ISP_HA_SCSI_UNKNOWN; 440 psize = sizeof (sdparam); 441 lim = BUS_SPACE_MAXSIZE_32BIT; 442 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 443 mdvp = &mdvec; 444 basetype = ISP_HA_SCSI_UNKNOWN; 445 psize = sizeof (sdparam); 446 lim = BUS_SPACE_MAXSIZE_24BIT; 447 } 448 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 449 mdvp = &mdvec_1080; 450 basetype = ISP_HA_SCSI_1080; 451 psize = sizeof (sdparam); 452 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 453 ISP1080_DMA_REGS_OFF; 454 } 455 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 456 mdvp = &mdvec_1080; 457 basetype = ISP_HA_SCSI_1240; 458 psize = 2 * sizeof (sdparam); 459 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 460 ISP1080_DMA_REGS_OFF; 461 } 462 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 463 mdvp = &mdvec_1080; 464 basetype = ISP_HA_SCSI_1280; 465 psize = 2 * sizeof (sdparam); 466 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 467 ISP1080_DMA_REGS_OFF; 468 } 469 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 470 mdvp = &mdvec_12160; 471 basetype = ISP_HA_SCSI_12160; 472 psize = 2 * sizeof (sdparam); 473 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 474 ISP1080_DMA_REGS_OFF; 475 } 476 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 477 mdvp = &mdvec_2100; 478 basetype = ISP_HA_FC_2100; 479 psize = sizeof (fcparam); 480 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 481 PCI_MBOX_REGS2100_OFF; 482 if (pci_get_revid(dev) < 3) { 483 /* 484 * XXX: Need to get the actual revision 485 * XXX: number of the 2100 FB. At any rate, 486 * XXX: lower cache line size for early revision 487 * XXX; boards. 488 */ 489 linesz = 1; 490 } 491 } 492 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 493 mdvp = &mdvec_2200; 494 basetype = ISP_HA_FC_2200; 495 psize = sizeof (fcparam); 496 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 497 PCI_MBOX_REGS2100_OFF; 498 } 499 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300 || 500 pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 501 mdvp = &mdvec_2300; 502 basetype = ISP_HA_FC_2300; 503 psize = sizeof (fcparam); 504 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 505 PCI_MBOX_REGS2300_OFF; 506 } 507 isp = &pcs->pci_isp; 508 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 509 if (isp->isp_param == NULL) { 510 device_printf(dev, "cannot allocate parameter data\n"); 511 goto bad; 512 } 513 isp->isp_mdvec = mdvp; 514 isp->isp_type = basetype; 515 isp->isp_revision = pci_get_revid(dev); 516 isp->isp_role = role; 517 isp->isp_dev = dev; 518 519 /* 520 * Try and find firmware for this device. 521 */ 522 523 if (isp_get_firmware_p) { 524 int device = (int) pci_get_device(dev); 525 #ifdef ISP_TARGET_MODE 526 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 527 #else 528 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 529 #endif 530 } 531 532 /* 533 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 534 * are set. 535 */ 536 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 537 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 538 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 539 540 /* 541 * Make sure the Cache Line Size register is set sensibly. 542 */ 543 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 544 if (data != linesz) { 545 data = PCI_DFLT_LNSZ; 546 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 547 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 548 } 549 550 /* 551 * Make sure the Latency Timer is sane. 552 */ 553 data = pci_read_config(dev, PCIR_LATTIMER, 1); 554 if (data < PCI_DFLT_LTNCY) { 555 data = PCI_DFLT_LTNCY; 556 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 557 pci_write_config(dev, PCIR_LATTIMER, data, 1); 558 } 559 560 /* 561 * Make sure we've disabled the ROM. 562 */ 563 data = pci_read_config(dev, PCIR_ROMADDR, 4); 564 data &= ~1; 565 pci_write_config(dev, PCIR_ROMADDR, data, 4); 566 567 568 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, 569 BUS_SPACE_MAXADDR, NULL, NULL, lim + 1, 570 255, lim, 0, &pcs->parent_dmat) != 0) { 571 device_printf(dev, "could not create master dma tag\n"); 572 free(isp->isp_param, M_DEVBUF); 573 free(pcs, M_DEVBUF); 574 return (ENXIO); 575 } 576 577 iqd = 0; 578 irq = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 579 1, RF_ACTIVE | RF_SHAREABLE); 580 if (irq == NULL) { 581 device_printf(dev, "could not allocate interrupt\n"); 582 goto bad; 583 } 584 585 tval = 0; 586 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 587 "fwload_disable", &tval) == 0 && tval != 0) { 588 isp->isp_confopts |= ISP_CFG_NORELOAD; 589 } 590 tval = 0; 591 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 592 "ignore_nvram", &tval) == 0 && tval != 0) { 593 isp->isp_confopts |= ISP_CFG_NONVRAM; 594 } 595 tval = 0; 596 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 597 "fullduplex", &tval) == 0 && tval != 0) { 598 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 599 } 600 601 sptr = 0; 602 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 603 "topology", (const char **) &sptr) == 0 && sptr != 0) { 604 if (strcmp(sptr, "lport") == 0) { 605 isp->isp_confopts |= ISP_CFG_LPORT; 606 } else if (strcmp(sptr, "nport") == 0) { 607 isp->isp_confopts |= ISP_CFG_NPORT; 608 } else if (strcmp(sptr, "lport-only") == 0) { 609 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 610 } else if (strcmp(sptr, "nport-only") == 0) { 611 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 612 } 613 } 614 615 /* 616 * Because the resource_*_value functions can neither return 617 * 64 bit integer values, nor can they be directly coerced 618 * to interpret the right hand side of the assignment as 619 * you want them to interpret it, we have to force WWN 620 * hint replacement to specify WWN strings with a leading 621 * 'w' (e..g w50000000aaaa0001). Sigh. 622 */ 623 sptr = 0; 624 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 625 "portwwn", (const char **) &sptr); 626 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 627 char *eptr = 0; 628 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 629 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 630 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 631 isp->isp_osinfo.default_port_wwn = 0; 632 } else { 633 isp->isp_confopts |= ISP_CFG_OWNWWN; 634 } 635 } 636 if (isp->isp_osinfo.default_port_wwn == 0) { 637 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 638 } 639 640 sptr = 0; 641 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 642 "nodewwn", (const char **) &sptr); 643 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 644 char *eptr = 0; 645 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 646 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 647 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 648 isp->isp_osinfo.default_node_wwn = 0; 649 } else { 650 isp->isp_confopts |= ISP_CFG_OWNWWN; 651 } 652 } 653 if (isp->isp_osinfo.default_node_wwn == 0) { 654 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 655 } 656 657 isp_debug = 0; 658 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 659 "debug", &isp_debug); 660 661 /* Make sure the lock is set up. */ 662 mtx_init(&isp->isp_osinfo.lock, "isp", MTX_DEF); 663 locksetup++; 664 665 #ifdef ISP_SMPLOCK 666 #define INTR_FLAGS INTR_TYPE_CAM | INTR_MPSAFE | INTR_ENTROPY 667 #else 668 #define INTR_FLAGS INTR_TYPE_CAM | INTR_ENTROPY 669 #endif 670 if (bus_setup_intr(dev, irq, INTR_FLAGS, isp_pci_intr, isp, &pcs->ih)) { 671 device_printf(dev, "could not setup interrupt\n"); 672 goto bad; 673 } 674 675 /* 676 * Set up logging levels. 677 */ 678 if (isp_debug) { 679 isp->isp_dblev = isp_debug; 680 } else { 681 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 682 } 683 if (bootverbose) 684 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 685 686 /* 687 * Make sure we're in reset state. 688 */ 689 ISP_LOCK(isp); 690 isp_reset(isp); 691 if (isp->isp_state != ISP_RESETSTATE) { 692 ISP_UNLOCK(isp); 693 goto bad; 694 } 695 isp_init(isp); 696 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 697 isp_uninit(isp); 698 ISP_UNLOCK(isp); 699 goto bad; 700 } 701 isp_attach(isp); 702 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 703 isp_uninit(isp); 704 ISP_UNLOCK(isp); 705 goto bad; 706 } 707 /* 708 * XXXX: Here is where we might unload the f/w module 709 * XXXX: (or decrease the reference count to it). 710 */ 711 ISP_UNLOCK(isp); 712 return (0); 713 714 bad: 715 716 if (pcs && pcs->ih) { 717 (void) bus_teardown_intr(dev, irq, pcs->ih); 718 } 719 720 if (locksetup && isp) { 721 mtx_destroy(&isp->isp_osinfo.lock); 722 } 723 724 if (irq) { 725 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 726 } 727 728 729 if (regs) { 730 (void) bus_release_resource(dev, rtp, rgd, regs); 731 } 732 733 if (pcs) { 734 if (pcs->pci_isp.isp_param) 735 free(pcs->pci_isp.isp_param, M_DEVBUF); 736 free(pcs, M_DEVBUF); 737 } 738 739 /* 740 * XXXX: Here is where we might unload the f/w module 741 * XXXX: (or decrease the reference count to it). 742 */ 743 return (ENXIO); 744 } 745 746 static void 747 isp_pci_intr(void *arg) 748 { 749 struct ispsoftc *isp = arg; 750 u_int16_t isr, sema, mbox; 751 752 ISP_LOCK(isp); 753 isp->isp_intcnt++; 754 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 755 isp->isp_intbogus++; 756 } else { 757 int iok = isp->isp_osinfo.intsok; 758 isp->isp_osinfo.intsok = 0; 759 isp_intr(isp, isr, sema, mbox); 760 isp->isp_osinfo.intsok = iok; 761 } 762 ISP_UNLOCK(isp); 763 } 764 765 766 #define IspVirt2Off(a, x) \ 767 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 768 _BLK_REG_SHFT] + ((x) & 0xff)) 769 770 #define BXR2(pcs, off) \ 771 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 772 #define BXW2(pcs, off, v) \ 773 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 774 775 776 static INLINE int 777 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 778 { 779 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 780 u_int16_t val0, val1; 781 int i = 0; 782 783 do { 784 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 785 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 786 } while (val0 != val1 && ++i < 1000); 787 if (val0 != val1) { 788 return (1); 789 } 790 *rp = val0; 791 return (0); 792 } 793 794 static int 795 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 796 u_int16_t *semap, u_int16_t *mbp) 797 { 798 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 799 u_int16_t isr, sema; 800 801 if (IS_2100(isp)) { 802 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 803 return (0); 804 } 805 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 806 return (0); 807 } 808 } else { 809 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 810 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 811 } 812 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 813 isr &= INT_PENDING_MASK(isp); 814 sema &= BIU_SEMA_LOCK; 815 if (isr == 0 && sema == 0) { 816 return (0); 817 } 818 *isrp = isr; 819 if ((*semap = sema) != 0) { 820 if (IS_2100(isp)) { 821 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 822 return (0); 823 } 824 } else { 825 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 826 } 827 } 828 return (1); 829 } 830 831 static int 832 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 833 u_int16_t *semap, u_int16_t *mbox0p) 834 { 835 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 836 u_int32_t r2hisr; 837 838 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 839 *isrp = 0; 840 return (0); 841 } 842 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 843 IspVirt2Off(pcs, BIU_R2HSTSLO)); 844 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 845 if ((r2hisr & BIU_R2HST_INTR) == 0) { 846 *isrp = 0; 847 return (0); 848 } 849 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 850 case ISPR2HST_ROM_MBX_OK: 851 case ISPR2HST_ROM_MBX_FAIL: 852 case ISPR2HST_MBX_OK: 853 case ISPR2HST_MBX_FAIL: 854 case ISPR2HST_ASYNC_EVENT: 855 case ISPR2HST_FPOST: 856 case ISPR2HST_FPOST_CTIO: 857 *isrp = r2hisr & 0xffff; 858 *mbox0p = (r2hisr >> 16); 859 *semap = 1; 860 return (1); 861 case ISPR2HST_RSPQ_UPDATE: 862 *isrp = r2hisr & 0xffff; 863 *mbox0p = 0; 864 *semap = 0; 865 return (1); 866 default: 867 return (0); 868 } 869 } 870 871 static u_int16_t 872 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 873 { 874 u_int16_t rv; 875 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 876 int oldconf = 0; 877 878 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 879 /* 880 * We will assume that someone has paused the RISC processor. 881 */ 882 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 883 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 884 oldconf | BIU_PCI_CONF1_SXP); 885 } 886 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 887 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 888 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 889 } 890 return (rv); 891 } 892 893 static void 894 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 895 { 896 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 897 int oldconf = 0; 898 899 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 900 /* 901 * We will assume that someone has paused the RISC processor. 902 */ 903 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 904 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 905 oldconf | BIU_PCI_CONF1_SXP); 906 } 907 BXW2(pcs, IspVirt2Off(isp, regoff), val); 908 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 909 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 910 } 911 } 912 913 static u_int16_t 914 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 915 { 916 u_int16_t rv, oc = 0; 917 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 918 919 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 920 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 921 u_int16_t tc; 922 /* 923 * We will assume that someone has paused the RISC processor. 924 */ 925 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 926 tc = oc & ~BIU_PCI1080_CONF1_DMA; 927 if (regoff & SXP_BANK1_SELECT) 928 tc |= BIU_PCI1080_CONF1_SXP1; 929 else 930 tc |= BIU_PCI1080_CONF1_SXP0; 931 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 932 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 933 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 934 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 935 oc | BIU_PCI1080_CONF1_DMA); 936 } 937 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 938 if (oc) { 939 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 940 } 941 return (rv); 942 } 943 944 static void 945 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 946 { 947 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 948 int oc = 0; 949 950 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 951 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 952 u_int16_t tc; 953 /* 954 * We will assume that someone has paused the RISC processor. 955 */ 956 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 957 tc = oc & ~BIU_PCI1080_CONF1_DMA; 958 if (regoff & SXP_BANK1_SELECT) 959 tc |= BIU_PCI1080_CONF1_SXP1; 960 else 961 tc |= BIU_PCI1080_CONF1_SXP0; 962 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 963 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 964 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 965 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 966 oc | BIU_PCI1080_CONF1_DMA); 967 } 968 BXW2(pcs, IspVirt2Off(isp, regoff), val); 969 if (oc) { 970 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 971 } 972 } 973 974 static void isp_map_rquest(void *, bus_dma_segment_t *, int, int); 975 static void isp_map_result(void *, bus_dma_segment_t *, int, int); 976 static void isp_map_fcscrt(void *, bus_dma_segment_t *, int, int); 977 978 struct imush { 979 struct ispsoftc *isp; 980 int error; 981 }; 982 983 static void 984 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 985 { 986 struct imush *imushp = (struct imush *) arg; 987 if (error) { 988 imushp->error = error; 989 } else { 990 imushp->isp->isp_rquest_dma = segs->ds_addr; 991 } 992 } 993 994 static void 995 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error) 996 { 997 struct imush *imushp = (struct imush *) arg; 998 if (error) { 999 imushp->error = error; 1000 } else { 1001 imushp->isp->isp_result_dma = segs->ds_addr; 1002 } 1003 } 1004 1005 static void 1006 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1007 { 1008 struct imush *imushp = (struct imush *) arg; 1009 if (error) { 1010 imushp->error = error; 1011 } else { 1012 fcparam *fcp = imushp->isp->isp_param; 1013 fcp->isp_scdma = segs->ds_addr; 1014 } 1015 } 1016 1017 static int 1018 isp_pci_mbxdma(struct ispsoftc *isp) 1019 { 1020 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1021 caddr_t base; 1022 u_int32_t len; 1023 int i, error; 1024 bus_size_t lim; 1025 struct imush im; 1026 1027 1028 /* 1029 * Already been here? If so, leave... 1030 */ 1031 if (isp->isp_rquest) { 1032 return (0); 1033 } 1034 1035 len = sizeof (XS_T **) * isp->isp_maxcmds; 1036 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1037 if (isp->isp_xflist == NULL) { 1038 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1039 return (1); 1040 } 1041 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1042 pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1043 if (pci->dmaps == NULL) { 1044 isp_prt(isp, ISP_LOGERR, "can't alloc dma maps"); 1045 free(isp->isp_xflist, M_DEVBUF); 1046 return (1); 1047 } 1048 1049 if (IS_FC(isp) || IS_ULTRA2(isp)) 1050 lim = BUS_SPACE_MAXADDR + 1; 1051 else 1052 lim = BUS_SPACE_MAXADDR_24BIT + 1; 1053 1054 /* 1055 * Allocate and map the request, result queues, plus FC scratch area. 1056 */ 1057 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1058 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1059 if (IS_FC(isp)) { 1060 len += ISP2100_SCRLEN; 1061 } 1062 if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim, 1063 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, 1064 BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) { 1065 isp_prt(isp, ISP_LOGERR, 1066 "cannot create a dma tag for control spaces"); 1067 free(isp->isp_xflist, M_DEVBUF); 1068 free(pci->dmaps, M_DEVBUF); 1069 return (1); 1070 } 1071 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base, 1072 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) { 1073 isp_prt(isp, ISP_LOGERR, 1074 "cannot allocate %d bytes of CCB memory", len); 1075 free(isp->isp_xflist, M_DEVBUF); 1076 free(pci->dmaps, M_DEVBUF); 1077 return (1); 1078 } 1079 1080 isp->isp_rquest = base; 1081 im.isp = isp; 1082 im.error = 0; 1083 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest, 1084 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)), isp_map_rquest, &im, 0); 1085 if (im.error) { 1086 isp_prt(isp, ISP_LOGERR, 1087 "error %d loading dma map for DMA request queue", im.error); 1088 free(isp->isp_xflist, M_DEVBUF); 1089 free(pci->dmaps, M_DEVBUF); 1090 isp->isp_rquest = NULL; 1091 return (1); 1092 } 1093 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1094 im.error = 0; 1095 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result, 1096 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)), isp_map_result, &im, 0); 1097 if (im.error) { 1098 isp_prt(isp, ISP_LOGERR, 1099 "error %d loading dma map for DMA result queue", im.error); 1100 free(isp->isp_xflist, M_DEVBUF); 1101 free(pci->dmaps, M_DEVBUF); 1102 isp->isp_rquest = NULL; 1103 return (1); 1104 } 1105 1106 for (i = 0; i < isp->isp_maxcmds; i++) { 1107 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]); 1108 if (error) { 1109 isp_prt(isp, ISP_LOGERR, 1110 "error %d creating per-cmd DMA maps", error); 1111 free(isp->isp_xflist, M_DEVBUF); 1112 free(pci->dmaps, M_DEVBUF); 1113 isp->isp_rquest = NULL; 1114 return (1); 1115 } 1116 } 1117 1118 if (IS_FC(isp)) { 1119 fcparam *fcp = (fcparam *) isp->isp_param; 1120 fcp->isp_scratch = base + 1121 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)) + 1122 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1123 im.error = 0; 1124 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, 1125 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0); 1126 if (im.error) { 1127 isp_prt(isp, ISP_LOGERR, 1128 "error %d loading FC scratch area", im.error); 1129 free(isp->isp_xflist, M_DEVBUF); 1130 free(pci->dmaps, M_DEVBUF); 1131 isp->isp_rquest = NULL; 1132 return (1); 1133 } 1134 } 1135 return (0); 1136 } 1137 1138 typedef struct { 1139 struct ispsoftc *isp; 1140 void *cmd_token; 1141 void *rq; 1142 u_int16_t *iptrp; 1143 u_int16_t optr; 1144 u_int error; 1145 } mush_t; 1146 1147 #define MUSHERR_NOQENTRIES -2 1148 1149 #ifdef ISP_TARGET_MODE 1150 /* 1151 * We need to handle DMA for target mode differently from initiator mode. 1152 * 1153 * DMA mapping and construction and submission of CTIO Request Entries 1154 * and rendevous for completion are very tightly coupled because we start 1155 * out by knowing (per platform) how much data we have to move, but we 1156 * don't know, up front, how many DMA mapping segments will have to be used 1157 * cover that data, so we don't know how many CTIO Request Entries we 1158 * will end up using. Further, for performance reasons we may want to 1159 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1160 * 1161 * The standard vector still goes through isp_pci_dmasetup, but the callback 1162 * for the DMA mapping routines comes here instead with the whole transfer 1163 * mapped and a pointer to a partially filled in already allocated request 1164 * queue entry. We finish the job. 1165 */ 1166 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1167 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1168 1169 #define STATUS_WITH_DATA 1 1170 1171 static void 1172 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1173 { 1174 mush_t *mp; 1175 struct ccb_scsiio *csio; 1176 struct isp_pcisoftc *pci; 1177 bus_dmamap_t *dp; 1178 u_int8_t scsi_status; 1179 ct_entry_t *cto; 1180 u_int16_t handle; 1181 u_int32_t sflags; 1182 int nctios, send_status; 1183 int32_t resid; 1184 int i, j; 1185 1186 mp = (mush_t *) arg; 1187 if (error) { 1188 mp->error = error; 1189 return; 1190 } 1191 csio = mp->cmd_token; 1192 cto = mp->rq; 1193 1194 cto->ct_xfrlen = 0; 1195 cto->ct_seg_count = 0; 1196 cto->ct_header.rqs_entry_count = 1; 1197 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1198 1199 if (nseg == 0) { 1200 cto->ct_header.rqs_seqno = 1; 1201 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1202 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1203 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1204 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1205 cto->ct_scsi_status, cto->ct_resid); 1206 ISP_TDQE(mp->isp, "tdma_mk[no data]", *mp->iptrp, cto); 1207 ISP_SWIZ_CTIO(mp->isp, cto, cto); 1208 return; 1209 } 1210 1211 nctios = nseg / ISP_RQDSEG; 1212 if (nseg % ISP_RQDSEG) { 1213 nctios++; 1214 } 1215 1216 /* 1217 * Check to see that we don't overflow. 1218 */ 1219 for (i = 0, j = *mp->iptrp; i < nctios; i++) { 1220 j = ISP_NXT_QENTRY(j, RQUEST_QUEUE_LEN(isp)); 1221 if (j == mp->optr) { 1222 isp_prt(mp->isp, ISP_LOGWARN, 1223 "Request Queue Overflow [tdma_mk]"); 1224 mp->error = MUSHERR_NOQENTRIES; 1225 return; 1226 } 1227 } 1228 1229 /* 1230 * Save syshandle, and potentially any SCSI status, which we'll 1231 * reinsert on the last CTIO we're going to send. 1232 */ 1233 handle = cto->ct_syshandle; 1234 cto->ct_syshandle = 0; 1235 cto->ct_header.rqs_seqno = 0; 1236 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1237 1238 if (send_status) { 1239 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1240 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1241 /* 1242 * Preserve residual. 1243 */ 1244 resid = cto->ct_resid; 1245 1246 /* 1247 * Save actual SCSI status. 1248 */ 1249 scsi_status = cto->ct_scsi_status; 1250 1251 #ifndef STATUS_WITH_DATA 1252 sflags |= CT_NO_DATA; 1253 /* 1254 * We can't do a status at the same time as a data CTIO, so 1255 * we need to synthesize an extra CTIO at this level. 1256 */ 1257 nctios++; 1258 #endif 1259 } else { 1260 sflags = scsi_status = resid = 0; 1261 } 1262 1263 cto->ct_resid = 0; 1264 cto->ct_scsi_status = 0; 1265 1266 pci = (struct isp_pcisoftc *)mp->isp; 1267 dp = &pci->dmaps[isp_handle_index(handle)]; 1268 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1269 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1270 } else { 1271 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1272 } 1273 1274 1275 while (nctios--) { 1276 int seglim; 1277 1278 seglim = nseg; 1279 if (seglim) { 1280 int seg; 1281 1282 if (seglim > ISP_RQDSEG) 1283 seglim = ISP_RQDSEG; 1284 1285 for (seg = 0; seg < seglim; seg++, nseg--) { 1286 /* 1287 * Unlike normal initiator commands, we don't 1288 * do any swizzling here. 1289 */ 1290 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1291 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1292 cto->ct_xfrlen += dm_segs->ds_len; 1293 dm_segs++; 1294 } 1295 cto->ct_seg_count = seg; 1296 } else { 1297 /* 1298 * This case should only happen when we're sending an 1299 * extra CTIO with final status. 1300 */ 1301 if (send_status == 0) { 1302 isp_prt(mp->isp, ISP_LOGWARN, 1303 "tdma_mk ran out of segments"); 1304 mp->error = EINVAL; 1305 return; 1306 } 1307 } 1308 1309 /* 1310 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1311 * ct_tagtype, and ct_timeout have been carried over 1312 * unchanged from what our caller had set. 1313 * 1314 * The dataseg fields and the seg_count fields we just got 1315 * through setting. The data direction we've preserved all 1316 * along and only clear it if we're now sending status. 1317 */ 1318 1319 if (nctios == 0) { 1320 /* 1321 * We're the last in a sequence of CTIOs, so mark 1322 * this CTIO and save the handle to the CCB such that 1323 * when this CTIO completes we can free dma resources 1324 * and do whatever else we need to do to finish the 1325 * rest of the command. We *don't* give this to the 1326 * firmware to work on- the caller will do that. 1327 */ 1328 cto->ct_syshandle = handle; 1329 cto->ct_header.rqs_seqno = 1; 1330 1331 if (send_status) { 1332 cto->ct_scsi_status = scsi_status; 1333 cto->ct_flags |= sflags; 1334 cto->ct_resid = resid; 1335 } 1336 if (send_status) { 1337 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1338 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1339 "scsi status %x resid %d", 1340 cto->ct_fwhandle, csio->ccb_h.target_lun, 1341 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1342 cto->ct_scsi_status, cto->ct_resid); 1343 } else { 1344 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1345 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1346 cto->ct_fwhandle, csio->ccb_h.target_lun, 1347 cto->ct_iid, cto->ct_tag_val, 1348 cto->ct_flags); 1349 } 1350 ISP_TDQE(mp->isp, "last tdma_mk", *mp->iptrp, cto); 1351 ISP_SWIZ_CTIO(mp->isp, cto, cto); 1352 } else { 1353 ct_entry_t *octo = cto; 1354 1355 /* 1356 * Make sure syshandle fields are clean 1357 */ 1358 cto->ct_syshandle = 0; 1359 cto->ct_header.rqs_seqno = 0; 1360 1361 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1362 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1363 cto->ct_fwhandle, csio->ccb_h.target_lun, 1364 cto->ct_iid, cto->ct_flags); 1365 ISP_TDQE(mp->isp, "tdma_mk", *mp->iptrp, cto); 1366 1367 /* 1368 * Get a new CTIO 1369 */ 1370 cto = (ct_entry_t *) 1371 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1372 j = *mp->iptrp; 1373 *mp->iptrp = 1374 ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp)); 1375 if (*mp->iptrp == mp->optr) { 1376 isp_prt(mp->isp, ISP_LOGTDEBUG0, 1377 "Queue Overflow in tdma_mk"); 1378 mp->error = MUSHERR_NOQENTRIES; 1379 return; 1380 } 1381 /* 1382 * Fill in the new CTIO with info from the old one. 1383 */ 1384 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1385 cto->ct_header.rqs_entry_count = 1; 1386 cto->ct_fwhandle = octo->ct_fwhandle; 1387 cto->ct_header.rqs_flags = 0; 1388 cto->ct_lun = octo->ct_lun; 1389 cto->ct_iid = octo->ct_iid; 1390 cto->ct_reserved2 = octo->ct_reserved2; 1391 cto->ct_tgt = octo->ct_tgt; 1392 cto->ct_flags = octo->ct_flags; 1393 cto->ct_status = 0; 1394 cto->ct_scsi_status = 0; 1395 cto->ct_tag_val = octo->ct_tag_val; 1396 cto->ct_tag_type = octo->ct_tag_type; 1397 cto->ct_xfrlen = 0; 1398 cto->ct_resid = 0; 1399 cto->ct_timeout = octo->ct_timeout; 1400 cto->ct_seg_count = 0; 1401 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1402 /* 1403 * Now swizzle the old one for the consumption 1404 * of the chip and give it to the firmware to 1405 * work on while we do the next. 1406 */ 1407 ISP_SWIZ_CTIO(mp->isp, octo, octo); 1408 ISP_ADD_REQUEST(mp->isp, j); 1409 } 1410 } 1411 } 1412 1413 static void 1414 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1415 { 1416 mush_t *mp; 1417 struct ccb_scsiio *csio; 1418 struct isp_pcisoftc *pci; 1419 bus_dmamap_t *dp; 1420 ct2_entry_t *cto; 1421 u_int16_t scsi_status, send_status, send_sense, handle; 1422 int32_t resid; 1423 u_int8_t sense[QLTM_SENSELEN]; 1424 int nctios, j; 1425 1426 mp = (mush_t *) arg; 1427 if (error) { 1428 mp->error = error; 1429 return; 1430 } 1431 1432 csio = mp->cmd_token; 1433 cto = mp->rq; 1434 1435 if (nseg == 0) { 1436 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1437 isp_prt(mp->isp, ISP_LOGWARN, 1438 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1439 "set (0x%x)", cto->ct_flags); 1440 mp->error = EINVAL; 1441 return; 1442 } 1443 cto->ct_header.rqs_entry_count = 1; 1444 cto->ct_header.rqs_seqno = 1; 1445 /* ct_syshandle contains the handle set by caller */ 1446 /* 1447 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1448 * flags to NO DATA and clear relative offset flags. 1449 * We preserve the ct_resid and the response area. 1450 */ 1451 cto->ct_flags |= CT2_NO_DATA; 1452 if (cto->ct_resid > 0) 1453 cto->rsp.m1.ct_scsi_status |= CT2_DATA_UNDER; 1454 else if (cto->ct_resid < 0) 1455 cto->rsp.m1.ct_scsi_status |= CT2_DATA_OVER; 1456 cto->ct_seg_count = 0; 1457 cto->ct_reloff = 0; 1458 ISP_TDQE(mp->isp, "dma2_tgt_fc[no data]", *mp->iptrp, cto); 1459 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1460 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1461 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1462 cto->ct_iid, cto->ct_flags, cto->ct_status, 1463 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1464 ISP_SWIZ_CTIO2(isp, cto, cto); 1465 return; 1466 } 1467 1468 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1469 isp_prt(mp->isp, ISP_LOGWARN, 1470 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1471 "(0x%x)", cto->ct_flags); 1472 mp->error = EINVAL; 1473 return; 1474 } 1475 1476 1477 nctios = nseg / ISP_RQDSEG_T2; 1478 if (nseg % ISP_RQDSEG_T2) { 1479 nctios++; 1480 } 1481 1482 /* 1483 * Save the handle, status, reloff, and residual. We'll reinsert the 1484 * handle into the last CTIO2 we're going to send, and reinsert status 1485 * and residual (and possibly sense data) if that's to be sent as well. 1486 * 1487 * We preserve ct_reloff and adjust it for each data CTIO2 we send past 1488 * the first one. This is needed so that the FCP DATA IUs being sent 1489 * out have the correct offset (they can arrive at the other end out 1490 * of order). 1491 */ 1492 1493 handle = cto->ct_syshandle; 1494 cto->ct_syshandle = 0; 1495 send_status = (cto->ct_flags & CT2_SENDSTATUS) != 0; 1496 1497 if (send_status) { 1498 cto->ct_flags &= ~(CT2_SENDSTATUS|CT2_CCINCR); 1499 1500 /* 1501 * Preserve residual. 1502 */ 1503 resid = cto->ct_resid; 1504 1505 /* 1506 * Save actual SCSI status. We'll reinsert the 1507 * CT2_SNSLEN_VALID later if appropriate. 1508 */ 1509 scsi_status = cto->rsp.m0.ct_scsi_status & 0xff; 1510 send_sense = cto->rsp.m0.ct_scsi_status & CT2_SNSLEN_VALID; 1511 1512 /* 1513 * If we're sending status and have a CHECK CONDTION and 1514 * have sense data, we send one more CTIO2 with just the 1515 * status and sense data. The upper layers have stashed 1516 * the sense data in the dataseg structure for us. 1517 */ 1518 1519 if ((scsi_status & 0xf) == SCSI_STATUS_CHECK_COND && 1520 send_sense) { 1521 bcopy(cto->rsp.m0.ct_dataseg, sense, QLTM_SENSELEN); 1522 nctios++; 1523 } 1524 } else { 1525 scsi_status = send_sense = resid = 0; 1526 } 1527 1528 cto->ct_resid = 0; 1529 cto->rsp.m0.ct_scsi_status = 0; 1530 MEMZERO(&cto->rsp, sizeof (cto->rsp)); 1531 1532 pci = (struct isp_pcisoftc *)mp->isp; 1533 dp = &pci->dmaps[isp_handle_index(handle)]; 1534 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1535 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1536 } else { 1537 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1538 } 1539 1540 while (nctios--) { 1541 int seg, seglim; 1542 1543 seglim = nseg; 1544 if (seglim) { 1545 if (seglim > ISP_RQDSEG_T2) 1546 seglim = ISP_RQDSEG_T2; 1547 1548 for (seg = 0; seg < seglim; seg++) { 1549 cto->rsp.m0.ct_dataseg[seg].ds_base = 1550 dm_segs->ds_addr; 1551 cto->rsp.m0.ct_dataseg[seg].ds_count = 1552 dm_segs->ds_len; 1553 cto->rsp.m0.ct_xfrlen += dm_segs->ds_len; 1554 dm_segs++; 1555 } 1556 cto->ct_seg_count = seg; 1557 } else { 1558 /* 1559 * This case should only happen when we're sending a 1560 * synthesized MODE1 final status with sense data. 1561 */ 1562 if (send_sense == 0) { 1563 isp_prt(mp->isp, ISP_LOGWARN, 1564 "dma2_tgt_fc ran out of segments, " 1565 "no SENSE DATA"); 1566 mp->error = EINVAL; 1567 return; 1568 } 1569 } 1570 1571 /* 1572 * At this point, the fields ct_lun, ct_iid, ct_rxid, 1573 * ct_timeout have been carried over unchanged from what 1574 * our caller had set. 1575 * 1576 * The field ct_reloff is either what the caller set, or 1577 * what we've added to below. 1578 * 1579 * The dataseg fields and the seg_count fields we just got 1580 * through setting. The data direction we've preserved all 1581 * along and only clear it if we're sending a MODE1 status 1582 * as the last CTIO. 1583 * 1584 */ 1585 1586 if (nctios == 0) { 1587 1588 /* 1589 * We're the last in a sequence of CTIO2s, so mark this 1590 * CTIO2 and save the handle to the CCB such that when 1591 * this CTIO2 completes we can free dma resources and 1592 * do whatever else we need to do to finish the rest 1593 * of the command. 1594 */ 1595 1596 cto->ct_syshandle = handle; 1597 cto->ct_header.rqs_seqno = 1; 1598 1599 if (send_status) { 1600 /* 1601 * Get 'real' residual and set flags based 1602 * on it. 1603 */ 1604 cto->ct_resid = resid; 1605 if (send_sense) { 1606 MEMCPY(cto->rsp.m1.ct_resp, sense, 1607 QLTM_SENSELEN); 1608 cto->rsp.m1.ct_senselen = 1609 QLTM_SENSELEN; 1610 scsi_status |= CT2_SNSLEN_VALID; 1611 cto->rsp.m1.ct_scsi_status = 1612 scsi_status; 1613 cto->ct_flags &= CT2_FLAG_MMASK; 1614 cto->ct_flags |= CT2_FLAG_MODE1 | 1615 CT2_NO_DATA | CT2_SENDSTATUS | 1616 CT2_CCINCR; 1617 if (cto->ct_resid > 0) 1618 cto->rsp.m1.ct_scsi_status |= 1619 CT2_DATA_UNDER; 1620 else if (cto->ct_resid < 0) 1621 cto->rsp.m1.ct_scsi_status |= 1622 CT2_DATA_OVER; 1623 } else { 1624 cto->rsp.m0.ct_scsi_status = 1625 scsi_status; 1626 cto->ct_flags |= 1627 CT2_SENDSTATUS | CT2_CCINCR; 1628 if (cto->ct_resid > 0) 1629 cto->rsp.m0.ct_scsi_status |= 1630 CT2_DATA_UNDER; 1631 else if (cto->ct_resid < 0) 1632 cto->rsp.m0.ct_scsi_status |= 1633 CT2_DATA_OVER; 1634 } 1635 } 1636 ISP_TDQE(mp->isp, "last dma2_tgt_fc", *mp->iptrp, cto); 1637 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1638 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x" 1639 " ssts 0x%x res %d", cto->ct_rxid, 1640 csio->ccb_h.target_lun, (int) cto->ct_iid, 1641 cto->ct_flags, cto->ct_status, 1642 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1643 ISP_SWIZ_CTIO2(isp, cto, cto); 1644 } else { 1645 ct2_entry_t *octo = cto; 1646 1647 /* 1648 * Make sure handle fields are clean 1649 */ 1650 cto->ct_syshandle = 0; 1651 cto->ct_header.rqs_seqno = 0; 1652 1653 ISP_TDQE(mp->isp, "dma2_tgt_fc", *mp->iptrp, cto); 1654 isp_prt(mp->isp, ISP_LOGTDEBUG1, 1655 "CTIO2[%x] lun %d->iid%d flgs 0x%x", 1656 cto->ct_rxid, csio->ccb_h.target_lun, 1657 (int) cto->ct_iid, cto->ct_flags); 1658 /* 1659 * Get a new CTIO2 1660 */ 1661 cto = (ct2_entry_t *) 1662 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1663 j = *mp->iptrp; 1664 *mp->iptrp = 1665 ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp)); 1666 if (*mp->iptrp == mp->optr) { 1667 isp_prt(mp->isp, ISP_LOGWARN, 1668 "Queue Overflow in dma2_tgt_fc"); 1669 mp->error = MUSHERR_NOQENTRIES; 1670 return; 1671 } 1672 1673 /* 1674 * Fill in the new CTIO2 with info from the old one. 1675 */ 1676 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2; 1677 cto->ct_header.rqs_entry_count = 1; 1678 cto->ct_header.rqs_flags = 0; 1679 /* ct_header.rqs_seqno && ct_syshandle done later */ 1680 cto->ct_fwhandle = octo->ct_fwhandle; 1681 cto->ct_lun = octo->ct_lun; 1682 cto->ct_iid = octo->ct_iid; 1683 cto->ct_rxid = octo->ct_rxid; 1684 cto->ct_flags = octo->ct_flags; 1685 cto->ct_status = 0; 1686 cto->ct_resid = 0; 1687 cto->ct_timeout = octo->ct_timeout; 1688 cto->ct_seg_count = 0; 1689 /* 1690 * Adjust the new relative offset by the amount which 1691 * is recorded in the data segment of the old CTIO2 we 1692 * just finished filling out. 1693 */ 1694 cto->ct_reloff += octo->rsp.m0.ct_xfrlen; 1695 MEMZERO(&cto->rsp, sizeof (cto->rsp)); 1696 ISP_SWIZ_CTIO2(isp, octo, octo); 1697 ISP_ADD_REQUEST(mp->isp, j); 1698 } 1699 } 1700 } 1701 #endif 1702 1703 static void dma2(void *, bus_dma_segment_t *, int, int); 1704 1705 static void 1706 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1707 { 1708 mush_t *mp; 1709 struct ccb_scsiio *csio; 1710 struct isp_pcisoftc *pci; 1711 bus_dmamap_t *dp; 1712 bus_dma_segment_t *eseg; 1713 ispreq_t *rq; 1714 ispcontreq_t *crq; 1715 int seglim, datalen; 1716 1717 mp = (mush_t *) arg; 1718 if (error) { 1719 mp->error = error; 1720 return; 1721 } 1722 1723 if (nseg < 1) { 1724 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1725 mp->error = EFAULT; 1726 return; 1727 } 1728 csio = mp->cmd_token; 1729 rq = mp->rq; 1730 pci = (struct isp_pcisoftc *)mp->isp; 1731 dp = &pci->dmaps[isp_handle_index(rq->req_handle)]; 1732 1733 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1734 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 1735 } else { 1736 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 1737 } 1738 1739 datalen = XS_XFRLEN(csio); 1740 1741 /* 1742 * We're passed an initial partially filled in entry that 1743 * has most fields filled in except for data transfer 1744 * related values. 1745 * 1746 * Our job is to fill in the initial request queue entry and 1747 * then to start allocating and filling in continuation entries 1748 * until we've covered the entire transfer. 1749 */ 1750 1751 if (IS_FC(mp->isp)) { 1752 seglim = ISP_RQDSEG_T2; 1753 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1754 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1755 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1756 } else { 1757 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1758 } 1759 } else { 1760 if (csio->cdb_len > 12) { 1761 seglim = 0; 1762 } else { 1763 seglim = ISP_RQDSEG; 1764 } 1765 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1766 rq->req_flags |= REQFLAG_DATA_IN; 1767 } else { 1768 rq->req_flags |= REQFLAG_DATA_OUT; 1769 } 1770 } 1771 1772 eseg = dm_segs + nseg; 1773 1774 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1775 if (IS_FC(mp->isp)) { 1776 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1777 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1778 dm_segs->ds_addr; 1779 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1780 dm_segs->ds_len; 1781 } else { 1782 rq->req_dataseg[rq->req_seg_count].ds_base = 1783 dm_segs->ds_addr; 1784 rq->req_dataseg[rq->req_seg_count].ds_count = 1785 dm_segs->ds_len; 1786 } 1787 datalen -= dm_segs->ds_len; 1788 #if 0 1789 if (IS_FC(mp->isp)) { 1790 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1791 device_printf(mp->isp->isp_dev, 1792 "seg0[%d] cnt 0x%x paddr 0x%08x\n", 1793 rq->req_seg_count, 1794 rq2->req_dataseg[rq2->req_seg_count].ds_count, 1795 rq2->req_dataseg[rq2->req_seg_count].ds_base); 1796 } else { 1797 device_printf(mp->isp->isp_dev, 1798 "seg0[%d] cnt 0x%x paddr 0x%08x\n", 1799 rq->req_seg_count, 1800 rq->req_dataseg[rq->req_seg_count].ds_count, 1801 rq->req_dataseg[rq->req_seg_count].ds_base); 1802 } 1803 #endif 1804 rq->req_seg_count++; 1805 dm_segs++; 1806 } 1807 1808 while (datalen > 0 && dm_segs != eseg) { 1809 crq = (ispcontreq_t *) 1810 ISP_QUEUE_ENTRY(mp->isp->isp_rquest, *mp->iptrp); 1811 *mp->iptrp = ISP_NXT_QENTRY(*mp->iptrp, RQUEST_QUEUE_LEN(isp)); 1812 if (*mp->iptrp == mp->optr) { 1813 isp_prt(mp->isp, 1814 ISP_LOGDEBUG0, "Request Queue Overflow++"); 1815 mp->error = MUSHERR_NOQENTRIES; 1816 return; 1817 } 1818 rq->req_header.rqs_entry_count++; 1819 bzero((void *)crq, sizeof (*crq)); 1820 crq->req_header.rqs_entry_count = 1; 1821 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1822 1823 seglim = 0; 1824 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1825 crq->req_dataseg[seglim].ds_base = 1826 dm_segs->ds_addr; 1827 crq->req_dataseg[seglim].ds_count = 1828 dm_segs->ds_len; 1829 #if 0 1830 device_printf(mp->isp->isp_dev, 1831 "seg%d[%d] cnt 0x%x paddr 0x%08x\n", 1832 rq->req_header.rqs_entry_count-1, 1833 seglim, crq->req_dataseg[seglim].ds_count, 1834 crq->req_dataseg[seglim].ds_base); 1835 #endif 1836 rq->req_seg_count++; 1837 dm_segs++; 1838 seglim++; 1839 datalen -= dm_segs->ds_len; 1840 } 1841 } 1842 } 1843 1844 static int 1845 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1846 u_int16_t *iptrp, u_int16_t optr) 1847 { 1848 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1849 bus_dmamap_t *dp = NULL; 1850 mush_t mush, *mp; 1851 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1852 1853 #ifdef ISP_TARGET_MODE 1854 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1855 if (IS_FC(isp)) { 1856 eptr = tdma_mkfc; 1857 } else { 1858 eptr = tdma_mk; 1859 } 1860 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1861 (csio->dxfer_len == 0)) { 1862 mp = &mush; 1863 mp->isp = isp; 1864 mp->cmd_token = csio; 1865 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1866 mp->iptrp = iptrp; 1867 mp->optr = optr; 1868 mp->error = 0; 1869 (*eptr)(mp, NULL, 0, 0); 1870 goto exit; 1871 } 1872 } else 1873 #endif 1874 eptr = dma2; 1875 1876 /* 1877 * NB: if we need to do request queue entry swizzling, 1878 * NB: this is where it would need to be done for cmds 1879 * NB: that move no data. For commands that move data, 1880 * NB: swizzling would take place in those functions. 1881 */ 1882 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1883 (csio->dxfer_len == 0)) { 1884 rq->req_seg_count = 1; 1885 return (CMD_QUEUED); 1886 } 1887 1888 /* 1889 * Do a virtual grapevine step to collect info for 1890 * the callback dma allocation that we have to use... 1891 */ 1892 mp = &mush; 1893 mp->isp = isp; 1894 mp->cmd_token = csio; 1895 mp->rq = rq; 1896 mp->iptrp = iptrp; 1897 mp->optr = optr; 1898 mp->error = 0; 1899 1900 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1901 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1902 int error, s; 1903 dp = &pci->dmaps[isp_handle_index(rq->req_handle)]; 1904 s = splsoftvm(); 1905 error = bus_dmamap_load(pci->parent_dmat, *dp, 1906 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1907 if (error == EINPROGRESS) { 1908 bus_dmamap_unload(pci->parent_dmat, *dp); 1909 mp->error = EINVAL; 1910 isp_prt(isp, ISP_LOGERR, 1911 "deferred dma allocation not supported"); 1912 } else if (error && mp->error == 0) { 1913 #ifdef DIAGNOSTIC 1914 isp_prt(isp, ISP_LOGERR, 1915 "error %d in dma mapping code", error); 1916 #endif 1917 mp->error = error; 1918 } 1919 splx(s); 1920 } else { 1921 /* Pointer to physical buffer */ 1922 struct bus_dma_segment seg; 1923 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1924 seg.ds_len = csio->dxfer_len; 1925 (*eptr)(mp, &seg, 1, 0); 1926 } 1927 } else { 1928 struct bus_dma_segment *segs; 1929 1930 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1931 isp_prt(isp, ISP_LOGERR, 1932 "Physical segment pointers unsupported"); 1933 mp->error = EINVAL; 1934 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1935 isp_prt(isp, ISP_LOGERR, 1936 "Virtual segment addresses unsupported"); 1937 mp->error = EINVAL; 1938 } else { 1939 /* Just use the segments provided */ 1940 segs = (struct bus_dma_segment *) csio->data_ptr; 1941 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1942 } 1943 } 1944 #ifdef ISP_TARGET_MODE 1945 exit: 1946 #endif 1947 if (mp->error) { 1948 int retval = CMD_COMPLETE; 1949 if (mp->error == MUSHERR_NOQENTRIES) { 1950 retval = CMD_EAGAIN; 1951 } else if (mp->error == EFBIG) { 1952 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1953 } else if (mp->error == EINVAL) { 1954 XS_SETERR(csio, CAM_REQ_INVALID); 1955 } else { 1956 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1957 } 1958 return (retval); 1959 } else { 1960 /* 1961 * Check to see if we weren't cancelled while sleeping on 1962 * getting DMA resources... 1963 */ 1964 if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1965 if (dp) { 1966 bus_dmamap_unload(pci->parent_dmat, *dp); 1967 } 1968 return (CMD_COMPLETE); 1969 } 1970 return (CMD_QUEUED); 1971 } 1972 } 1973 1974 static void 1975 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 1976 { 1977 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 1978 bus_dmamap_t *dp = &pci->dmaps[isp_handle_index(handle)]; 1979 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1980 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD); 1981 } else { 1982 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE); 1983 } 1984 bus_dmamap_unload(pci->parent_dmat, *dp); 1985 } 1986 1987 1988 static void 1989 isp_pci_reset1(struct ispsoftc *isp) 1990 { 1991 /* Make sure the BIOS is disabled */ 1992 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1993 /* and enable interrupts */ 1994 ENABLE_INTS(isp); 1995 } 1996 1997 static void 1998 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 1999 { 2000 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 2001 if (msg) 2002 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2003 else 2004 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2005 if (IS_SCSI(isp)) 2006 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2007 else 2008 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2009 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2010 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2011 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2012 2013 2014 if (IS_SCSI(isp)) { 2015 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2016 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2017 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2018 ISP_READ(isp, CDMA_FIFO_STS)); 2019 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2020 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2021 ISP_READ(isp, DDMA_FIFO_STS)); 2022 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2023 ISP_READ(isp, SXP_INTERRUPT), 2024 ISP_READ(isp, SXP_GROSS_ERR), 2025 ISP_READ(isp, SXP_PINS_CTRL)); 2026 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2027 } 2028 printf(" mbox regs: %x %x %x %x %x\n", 2029 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2030 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2031 ISP_READ(isp, OUTMAILBOX4)); 2032 printf(" PCI Status Command/Status=%x\n", 2033 pci_read_config(pci->pci_dev, PCIR_COMMAND, 1)); 2034 } 2035