1 /*- 2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/bus.h> 37 38 #include <dev/pci/pcireg.h> 39 #include <dev/pci/pcivar.h> 40 41 #include <machine/bus_memio.h> 42 #include <machine/bus_pio.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 48 #ifdef ISP_TARGET_MODE 49 #ifdef PAE 50 #error "PAE and ISP_TARGET_MODE not supported yet" 51 #endif 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 57 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 58 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 59 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 60 static int 61 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 62 static int 63 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 64 static int isp_pci_mbxdma(struct ispsoftc *); 65 static int 66 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 67 static void 68 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 69 70 static void isp_pci_reset1(struct ispsoftc *); 71 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 72 73 static struct ispmdvec mdvec = { 74 isp_pci_rd_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_pci_dmateardown, 80 NULL, 81 isp_pci_reset1, 82 isp_pci_dumpregs, 83 NULL, 84 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 85 }; 86 87 static struct ispmdvec mdvec_1080 = { 88 isp_pci_rd_isr, 89 isp_pci_rd_reg_1080, 90 isp_pci_wr_reg_1080, 91 isp_pci_mbxdma, 92 isp_pci_dmasetup, 93 isp_pci_dmateardown, 94 NULL, 95 isp_pci_reset1, 96 isp_pci_dumpregs, 97 NULL, 98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 99 }; 100 101 static struct ispmdvec mdvec_12160 = { 102 isp_pci_rd_isr, 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_pci_dmateardown, 108 NULL, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 NULL, 112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 113 }; 114 115 static struct ispmdvec mdvec_2100 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg, 118 isp_pci_wr_reg, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_pci_dmateardown, 122 NULL, 123 isp_pci_reset1, 124 isp_pci_dumpregs 125 }; 126 127 static struct ispmdvec mdvec_2200 = { 128 isp_pci_rd_isr, 129 isp_pci_rd_reg, 130 isp_pci_wr_reg, 131 isp_pci_mbxdma, 132 isp_pci_dmasetup, 133 isp_pci_dmateardown, 134 NULL, 135 isp_pci_reset1, 136 isp_pci_dumpregs 137 }; 138 139 static struct ispmdvec mdvec_2300 = { 140 isp_pci_rd_isr_2300, 141 isp_pci_rd_reg, 142 isp_pci_wr_reg, 143 isp_pci_mbxdma, 144 isp_pci_dmasetup, 145 isp_pci_dmateardown, 146 NULL, 147 isp_pci_reset1, 148 isp_pci_dumpregs 149 }; 150 151 #ifndef PCIM_CMD_INVEN 152 #define PCIM_CMD_INVEN 0x10 153 #endif 154 #ifndef PCIM_CMD_BUSMASTEREN 155 #define PCIM_CMD_BUSMASTEREN 0x0004 156 #endif 157 #ifndef PCIM_CMD_PERRESPEN 158 #define PCIM_CMD_PERRESPEN 0x0040 159 #endif 160 #ifndef PCIM_CMD_SEREN 161 #define PCIM_CMD_SEREN 0x0100 162 #endif 163 164 #ifndef PCIR_COMMAND 165 #define PCIR_COMMAND 0x04 166 #endif 167 168 #ifndef PCIR_CACHELNSZ 169 #define PCIR_CACHELNSZ 0x0c 170 #endif 171 172 #ifndef PCIR_LATTIMER 173 #define PCIR_LATTIMER 0x0d 174 #endif 175 176 #ifndef PCIR_ROMADDR 177 #define PCIR_ROMADDR 0x30 178 #endif 179 180 #ifndef PCI_VENDOR_QLOGIC 181 #define PCI_VENDOR_QLOGIC 0x1077 182 #endif 183 184 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 185 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 186 #endif 187 188 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 189 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 190 #endif 191 192 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 193 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 194 #endif 195 196 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 197 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 198 #endif 199 200 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 201 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 202 #endif 203 204 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 205 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 206 #endif 207 208 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 209 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 210 #endif 211 212 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 213 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 214 #endif 215 216 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 217 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 218 #endif 219 220 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 221 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 222 #endif 223 224 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 225 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 226 #endif 227 228 #define PCI_QLOGIC_ISP1020 \ 229 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 230 231 #define PCI_QLOGIC_ISP1080 \ 232 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 233 234 #define PCI_QLOGIC_ISP10160 \ 235 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 236 237 #define PCI_QLOGIC_ISP12160 \ 238 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 239 240 #define PCI_QLOGIC_ISP1240 \ 241 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 242 243 #define PCI_QLOGIC_ISP1280 \ 244 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 245 246 #define PCI_QLOGIC_ISP2100 \ 247 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 248 249 #define PCI_QLOGIC_ISP2200 \ 250 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 251 252 #define PCI_QLOGIC_ISP2300 \ 253 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 254 255 #define PCI_QLOGIC_ISP2312 \ 256 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 257 258 #define PCI_QLOGIC_ISP6312 \ 259 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 260 261 /* 262 * Odd case for some AMI raid cards... We need to *not* attach to this. 263 */ 264 #define AMI_RAID_SUBVENDOR_ID 0x101e 265 266 #define IO_MAP_REG 0x10 267 #define MEM_MAP_REG 0x14 268 269 #define PCI_DFLT_LTNCY 0x40 270 #define PCI_DFLT_LNSZ 0x10 271 272 static int isp_pci_probe (device_t); 273 static int isp_pci_attach (device_t); 274 275 276 struct isp_pcisoftc { 277 struct ispsoftc pci_isp; 278 device_t pci_dev; 279 struct resource * pci_reg; 280 bus_space_tag_t pci_st; 281 bus_space_handle_t pci_sh; 282 void * ih; 283 int16_t pci_poff[_NREG_BLKS]; 284 bus_dma_tag_t dmat; 285 bus_dmamap_t *dmaps; 286 }; 287 extern ispfwfunc *isp_get_firmware_p; 288 289 static device_method_t isp_pci_methods[] = { 290 /* Device interface */ 291 DEVMETHOD(device_probe, isp_pci_probe), 292 DEVMETHOD(device_attach, isp_pci_attach), 293 { 0, 0 } 294 }; 295 static void isp_pci_intr(void *); 296 297 static driver_t isp_pci_driver = { 298 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 299 }; 300 static devclass_t isp_devclass; 301 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 302 303 static int 304 isp_pci_probe(device_t dev) 305 { 306 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 307 case PCI_QLOGIC_ISP1020: 308 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 309 break; 310 case PCI_QLOGIC_ISP1080: 311 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 312 break; 313 case PCI_QLOGIC_ISP1240: 314 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 315 break; 316 case PCI_QLOGIC_ISP1280: 317 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 318 break; 319 case PCI_QLOGIC_ISP10160: 320 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 321 break; 322 case PCI_QLOGIC_ISP12160: 323 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 324 return (ENXIO); 325 } 326 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 327 break; 328 case PCI_QLOGIC_ISP2100: 329 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 330 break; 331 case PCI_QLOGIC_ISP2200: 332 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 333 break; 334 case PCI_QLOGIC_ISP2300: 335 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 336 break; 337 case PCI_QLOGIC_ISP2312: 338 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 339 break; 340 case PCI_QLOGIC_ISP6312: 341 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 342 break; 343 default: 344 return (ENXIO); 345 } 346 if (isp_announced == 0 && bootverbose) { 347 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 348 "Core Version %d.%d\n", 349 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 350 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 351 isp_announced++; 352 } 353 /* 354 * XXXX: Here is where we might load the f/w module 355 * XXXX: (or increase a reference count to it). 356 */ 357 return (0); 358 } 359 360 static int 361 isp_pci_attach(device_t dev) 362 { 363 struct resource *regs, *irq; 364 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 365 u_int32_t data, cmd, linesz, psize, basetype; 366 struct isp_pcisoftc *pcs; 367 struct ispsoftc *isp = NULL; 368 struct ispmdvec *mdvp; 369 const char *sptr; 370 int locksetup = 0; 371 372 /* 373 * Figure out if we're supposed to skip this one. 374 */ 375 376 tval = 0; 377 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 378 "disable", &tval) == 0 && tval) { 379 device_printf(dev, "device is disabled\n"); 380 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 381 return (0); 382 } 383 384 role = -1; 385 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 386 "role", &role) == 0 && role != -1) { 387 role &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 388 device_printf(dev, "setting role to 0x%x\n", role); 389 } else { 390 #ifdef ISP_TARGET_MODE 391 role = ISP_ROLE_TARGET; 392 #else 393 role = ISP_DEFAULT_ROLES; 394 #endif 395 } 396 397 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 398 if (pcs == NULL) { 399 device_printf(dev, "cannot allocate softc\n"); 400 return (ENOMEM); 401 } 402 403 /* 404 * Which we should try first - memory mapping or i/o mapping? 405 * 406 * We used to try memory first followed by i/o on alpha, otherwise 407 * the reverse, but we should just try memory first all the time now. 408 */ 409 m1 = PCIM_CMD_MEMEN; 410 m2 = PCIM_CMD_PORTEN; 411 412 tval = 0; 413 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 414 "prefer_iomap", &tval) == 0 && tval != 0) { 415 m1 = PCIM_CMD_PORTEN; 416 m2 = PCIM_CMD_MEMEN; 417 } 418 tval = 0; 419 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 420 "prefer_memmap", &tval) == 0 && tval != 0) { 421 m1 = PCIM_CMD_MEMEN; 422 m2 = PCIM_CMD_PORTEN; 423 } 424 425 linesz = PCI_DFLT_LNSZ; 426 irq = regs = NULL; 427 rgd = rtp = iqd = 0; 428 429 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 430 if (cmd & m1) { 431 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 432 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 433 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 434 } 435 if (regs == NULL && (cmd & m2)) { 436 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 437 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 438 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 439 } 440 if (regs == NULL) { 441 device_printf(dev, "unable to map any ports\n"); 442 goto bad; 443 } 444 if (bootverbose) 445 device_printf(dev, "using %s space register mapping\n", 446 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 447 pcs->pci_dev = dev; 448 pcs->pci_reg = regs; 449 pcs->pci_st = rman_get_bustag(regs); 450 pcs->pci_sh = rman_get_bushandle(regs); 451 452 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 453 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 454 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 455 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 456 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 457 mdvp = &mdvec; 458 basetype = ISP_HA_SCSI_UNKNOWN; 459 psize = sizeof (sdparam); 460 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 461 mdvp = &mdvec; 462 basetype = ISP_HA_SCSI_UNKNOWN; 463 psize = sizeof (sdparam); 464 } 465 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 466 mdvp = &mdvec_1080; 467 basetype = ISP_HA_SCSI_1080; 468 psize = sizeof (sdparam); 469 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 470 ISP1080_DMA_REGS_OFF; 471 } 472 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 473 mdvp = &mdvec_1080; 474 basetype = ISP_HA_SCSI_1240; 475 psize = 2 * sizeof (sdparam); 476 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 477 ISP1080_DMA_REGS_OFF; 478 } 479 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 480 mdvp = &mdvec_1080; 481 basetype = ISP_HA_SCSI_1280; 482 psize = 2 * sizeof (sdparam); 483 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 484 ISP1080_DMA_REGS_OFF; 485 } 486 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 487 mdvp = &mdvec_12160; 488 basetype = ISP_HA_SCSI_10160; 489 psize = sizeof (sdparam); 490 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 491 ISP1080_DMA_REGS_OFF; 492 } 493 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 494 mdvp = &mdvec_12160; 495 basetype = ISP_HA_SCSI_12160; 496 psize = 2 * sizeof (sdparam); 497 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 498 ISP1080_DMA_REGS_OFF; 499 } 500 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 501 mdvp = &mdvec_2100; 502 basetype = ISP_HA_FC_2100; 503 psize = sizeof (fcparam); 504 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 505 PCI_MBOX_REGS2100_OFF; 506 if (pci_get_revid(dev) < 3) { 507 /* 508 * XXX: Need to get the actual revision 509 * XXX: number of the 2100 FB. At any rate, 510 * XXX: lower cache line size for early revision 511 * XXX; boards. 512 */ 513 linesz = 1; 514 } 515 } 516 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 517 mdvp = &mdvec_2200; 518 basetype = ISP_HA_FC_2200; 519 psize = sizeof (fcparam); 520 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 521 PCI_MBOX_REGS2100_OFF; 522 } 523 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 524 mdvp = &mdvec_2300; 525 basetype = ISP_HA_FC_2300; 526 psize = sizeof (fcparam); 527 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 528 PCI_MBOX_REGS2300_OFF; 529 } 530 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 531 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 532 mdvp = &mdvec_2300; 533 basetype = ISP_HA_FC_2312; 534 psize = sizeof (fcparam); 535 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 536 PCI_MBOX_REGS2300_OFF; 537 } 538 isp = &pcs->pci_isp; 539 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 540 if (isp->isp_param == NULL) { 541 device_printf(dev, "cannot allocate parameter data\n"); 542 goto bad; 543 } 544 isp->isp_mdvec = mdvp; 545 isp->isp_type = basetype; 546 isp->isp_revision = pci_get_revid(dev); 547 isp->isp_role = role; 548 isp->isp_dev = dev; 549 550 /* 551 * Try and find firmware for this device. 552 */ 553 554 if (isp_get_firmware_p) { 555 int device = (int) pci_get_device(dev); 556 #ifdef ISP_TARGET_MODE 557 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 558 #else 559 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 560 #endif 561 } 562 563 /* 564 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 565 * are set. 566 */ 567 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 568 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 569 if (IS_2300(isp)) { /* per QLogic errata */ 570 cmd &= ~PCIM_CMD_INVEN; 571 } 572 if (IS_23XX(isp)) { 573 /* 574 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 575 */ 576 isp->isp_touched = 1; 577 578 } 579 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 580 581 /* 582 * Make sure the Cache Line Size register is set sensibly. 583 */ 584 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 585 if (data != linesz) { 586 data = PCI_DFLT_LNSZ; 587 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 588 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 589 } 590 591 /* 592 * Make sure the Latency Timer is sane. 593 */ 594 data = pci_read_config(dev, PCIR_LATTIMER, 1); 595 if (data < PCI_DFLT_LTNCY) { 596 data = PCI_DFLT_LTNCY; 597 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 598 pci_write_config(dev, PCIR_LATTIMER, data, 1); 599 } 600 601 /* 602 * Make sure we've disabled the ROM. 603 */ 604 data = pci_read_config(dev, PCIR_ROMADDR, 4); 605 data &= ~1; 606 pci_write_config(dev, PCIR_ROMADDR, data, 4); 607 608 iqd = 0; 609 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 610 RF_ACTIVE | RF_SHAREABLE); 611 if (irq == NULL) { 612 device_printf(dev, "could not allocate interrupt\n"); 613 goto bad; 614 } 615 616 tval = 0; 617 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 618 "fwload_disable", &tval) == 0 && tval != 0) { 619 isp->isp_confopts |= ISP_CFG_NORELOAD; 620 } 621 tval = 0; 622 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 623 "ignore_nvram", &tval) == 0 && tval != 0) { 624 isp->isp_confopts |= ISP_CFG_NONVRAM; 625 } 626 tval = 0; 627 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 628 "fullduplex", &tval) == 0 && tval != 0) { 629 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 630 } 631 #ifdef ISP_FW_CRASH_DUMP 632 tval = 0; 633 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 634 "fw_dump_enable", &tval) == 0 && tval != 0) { 635 size_t amt = 0; 636 if (IS_2200(isp)) { 637 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 638 } else if (IS_23XX(isp)) { 639 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 640 } 641 if (amt) { 642 FCPARAM(isp)->isp_dump_data = 643 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 644 } else { 645 device_printf(dev, 646 "f/w crash dumps not supported for this model\n"); 647 } 648 } 649 #endif 650 651 sptr = 0; 652 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 653 "topology", (const char **) &sptr) == 0 && sptr != 0) { 654 if (strcmp(sptr, "lport") == 0) { 655 isp->isp_confopts |= ISP_CFG_LPORT; 656 } else if (strcmp(sptr, "nport") == 0) { 657 isp->isp_confopts |= ISP_CFG_NPORT; 658 } else if (strcmp(sptr, "lport-only") == 0) { 659 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 660 } else if (strcmp(sptr, "nport-only") == 0) { 661 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 662 } 663 } 664 665 /* 666 * Because the resource_*_value functions can neither return 667 * 64 bit integer values, nor can they be directly coerced 668 * to interpret the right hand side of the assignment as 669 * you want them to interpret it, we have to force WWN 670 * hint replacement to specify WWN strings with a leading 671 * 'w' (e..g w50000000aaaa0001). Sigh. 672 */ 673 sptr = 0; 674 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 675 "portwwn", (const char **) &sptr); 676 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 677 char *eptr = 0; 678 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 679 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 680 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 681 isp->isp_osinfo.default_port_wwn = 0; 682 } else { 683 isp->isp_confopts |= ISP_CFG_OWNWWPN; 684 } 685 } 686 if (isp->isp_osinfo.default_port_wwn == 0) { 687 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 688 } 689 690 sptr = 0; 691 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 692 "nodewwn", (const char **) &sptr); 693 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 694 char *eptr = 0; 695 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 696 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 697 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 698 isp->isp_osinfo.default_node_wwn = 0; 699 } else { 700 isp->isp_confopts |= ISP_CFG_OWNWWNN; 701 } 702 } 703 if (isp->isp_osinfo.default_node_wwn == 0) { 704 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 705 } 706 707 isp->isp_osinfo.default_id = -1; 708 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 709 "iid", &tval) == 0) { 710 isp->isp_osinfo.default_id = tval; 711 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 712 } 713 if (isp->isp_osinfo.default_id == -1) { 714 if (IS_FC(isp)) { 715 isp->isp_osinfo.default_id = 109; 716 } else { 717 isp->isp_osinfo.default_id = 7; 718 } 719 } 720 721 isp_debug = 0; 722 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 723 "debug", &isp_debug); 724 725 /* Make sure the lock is set up. */ 726 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 727 locksetup++; 728 729 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 730 device_printf(dev, "could not setup interrupt\n"); 731 goto bad; 732 } 733 734 /* 735 * Set up logging levels. 736 */ 737 if (isp_debug) { 738 isp->isp_dblev = isp_debug; 739 } else { 740 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 741 } 742 if (bootverbose) 743 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 744 745 /* 746 * Last minute checks... 747 */ 748 if (IS_2312(isp)) { 749 isp->isp_port = pci_get_function(dev); 750 } 751 752 /* 753 * Make sure we're in reset state. 754 */ 755 ISP_LOCK(isp); 756 isp_reset(isp); 757 if (isp->isp_state != ISP_RESETSTATE) { 758 ISP_UNLOCK(isp); 759 goto bad; 760 } 761 isp_init(isp); 762 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 763 isp_uninit(isp); 764 ISP_UNLOCK(isp); 765 goto bad; 766 } 767 isp_attach(isp); 768 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 769 isp_uninit(isp); 770 ISP_UNLOCK(isp); 771 goto bad; 772 } 773 /* 774 * XXXX: Here is where we might unload the f/w module 775 * XXXX: (or decrease the reference count to it). 776 */ 777 ISP_UNLOCK(isp); 778 return (0); 779 780 bad: 781 782 if (pcs && pcs->ih) { 783 (void) bus_teardown_intr(dev, irq, pcs->ih); 784 } 785 786 if (locksetup && isp) { 787 mtx_destroy(&isp->isp_osinfo.lock); 788 } 789 790 if (irq) { 791 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 792 } 793 794 795 if (regs) { 796 (void) bus_release_resource(dev, rtp, rgd, regs); 797 } 798 799 if (pcs) { 800 if (pcs->pci_isp.isp_param) 801 free(pcs->pci_isp.isp_param, M_DEVBUF); 802 free(pcs, M_DEVBUF); 803 } 804 805 /* 806 * XXXX: Here is where we might unload the f/w module 807 * XXXX: (or decrease the reference count to it). 808 */ 809 return (ENXIO); 810 } 811 812 static void 813 isp_pci_intr(void *arg) 814 { 815 struct ispsoftc *isp = arg; 816 u_int16_t isr, sema, mbox; 817 818 ISP_LOCK(isp); 819 isp->isp_intcnt++; 820 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 821 isp->isp_intbogus++; 822 } else { 823 int iok = isp->isp_osinfo.intsok; 824 isp->isp_osinfo.intsok = 0; 825 isp_intr(isp, isr, sema, mbox); 826 isp->isp_osinfo.intsok = iok; 827 } 828 ISP_UNLOCK(isp); 829 } 830 831 832 #define IspVirt2Off(a, x) \ 833 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 834 _BLK_REG_SHFT] + ((x) & 0xff)) 835 836 #define BXR2(pcs, off) \ 837 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 838 #define BXW2(pcs, off, v) \ 839 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 840 841 842 static INLINE int 843 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 844 { 845 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 846 u_int16_t val0, val1; 847 int i = 0; 848 849 do { 850 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 851 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 852 } while (val0 != val1 && ++i < 1000); 853 if (val0 != val1) { 854 return (1); 855 } 856 *rp = val0; 857 return (0); 858 } 859 860 static int 861 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 862 u_int16_t *semap, u_int16_t *mbp) 863 { 864 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 865 u_int16_t isr, sema; 866 867 if (IS_2100(isp)) { 868 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 869 return (0); 870 } 871 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 872 return (0); 873 } 874 } else { 875 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 876 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 877 } 878 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 879 isr &= INT_PENDING_MASK(isp); 880 sema &= BIU_SEMA_LOCK; 881 if (isr == 0 && sema == 0) { 882 return (0); 883 } 884 *isrp = isr; 885 if ((*semap = sema) != 0) { 886 if (IS_2100(isp)) { 887 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 888 return (0); 889 } 890 } else { 891 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 892 } 893 } 894 return (1); 895 } 896 897 static int 898 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 899 u_int16_t *semap, u_int16_t *mbox0p) 900 { 901 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 902 u_int32_t r2hisr; 903 904 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 905 *isrp = 0; 906 return (0); 907 } 908 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 909 IspVirt2Off(pcs, BIU_R2HSTSLO)); 910 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 911 if ((r2hisr & BIU_R2HST_INTR) == 0) { 912 *isrp = 0; 913 return (0); 914 } 915 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 916 case ISPR2HST_ROM_MBX_OK: 917 case ISPR2HST_ROM_MBX_FAIL: 918 case ISPR2HST_MBX_OK: 919 case ISPR2HST_MBX_FAIL: 920 case ISPR2HST_ASYNC_EVENT: 921 *isrp = r2hisr & 0xffff; 922 *mbox0p = (r2hisr >> 16); 923 *semap = 1; 924 return (1); 925 case ISPR2HST_RIO_16: 926 *isrp = r2hisr & 0xffff; 927 *mbox0p = ASYNC_RIO1; 928 *semap = 1; 929 return (1); 930 case ISPR2HST_FPOST: 931 *isrp = r2hisr & 0xffff; 932 *mbox0p = ASYNC_CMD_CMPLT; 933 *semap = 1; 934 return (1); 935 case ISPR2HST_FPOST_CTIO: 936 *isrp = r2hisr & 0xffff; 937 *mbox0p = ASYNC_CTIO_DONE; 938 *semap = 1; 939 return (1); 940 case ISPR2HST_RSPQ_UPDATE: 941 *isrp = r2hisr & 0xffff; 942 *mbox0p = 0; 943 *semap = 0; 944 return (1); 945 default: 946 return (0); 947 } 948 } 949 950 static u_int16_t 951 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 952 { 953 u_int16_t rv; 954 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 955 int oldconf = 0; 956 957 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 958 /* 959 * We will assume that someone has paused the RISC processor. 960 */ 961 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 962 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 963 oldconf | BIU_PCI_CONF1_SXP); 964 } 965 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 966 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 967 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 968 } 969 return (rv); 970 } 971 972 static void 973 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 974 { 975 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 976 int oldconf = 0; 977 978 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 979 /* 980 * We will assume that someone has paused the RISC processor. 981 */ 982 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 983 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 984 oldconf | BIU_PCI_CONF1_SXP); 985 } 986 BXW2(pcs, IspVirt2Off(isp, regoff), val); 987 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 988 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 989 } 990 } 991 992 static u_int16_t 993 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 994 { 995 u_int16_t rv, oc = 0; 996 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 997 998 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 999 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1000 u_int16_t tc; 1001 /* 1002 * We will assume that someone has paused the RISC processor. 1003 */ 1004 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1005 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1006 if (regoff & SXP_BANK1_SELECT) 1007 tc |= BIU_PCI1080_CONF1_SXP1; 1008 else 1009 tc |= BIU_PCI1080_CONF1_SXP0; 1010 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1011 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1012 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1013 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1014 oc | BIU_PCI1080_CONF1_DMA); 1015 } 1016 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1017 if (oc) { 1018 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1019 } 1020 return (rv); 1021 } 1022 1023 static void 1024 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 1025 { 1026 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1027 int oc = 0; 1028 1029 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1030 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1031 u_int16_t tc; 1032 /* 1033 * We will assume that someone has paused the RISC processor. 1034 */ 1035 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1036 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1037 if (regoff & SXP_BANK1_SELECT) 1038 tc |= BIU_PCI1080_CONF1_SXP1; 1039 else 1040 tc |= BIU_PCI1080_CONF1_SXP0; 1041 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1042 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1043 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1044 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1045 oc | BIU_PCI1080_CONF1_DMA); 1046 } 1047 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1048 if (oc) { 1049 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1050 } 1051 } 1052 1053 1054 struct imush { 1055 struct ispsoftc *isp; 1056 int error; 1057 }; 1058 1059 static void imc(void *, bus_dma_segment_t *, int, int); 1060 1061 static void 1062 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1063 { 1064 struct imush *imushp = (struct imush *) arg; 1065 if (error) { 1066 imushp->error = error; 1067 } else { 1068 struct ispsoftc *isp =imushp->isp; 1069 bus_addr_t addr = segs->ds_addr; 1070 1071 isp->isp_rquest_dma = addr; 1072 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1073 isp->isp_result_dma = addr; 1074 if (IS_FC(isp)) { 1075 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1076 FCPARAM(isp)->isp_scdma = addr; 1077 } 1078 } 1079 } 1080 1081 /* 1082 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1083 */ 1084 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1085 1086 static int 1087 isp_pci_mbxdma(struct ispsoftc *isp) 1088 { 1089 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1090 caddr_t base; 1091 u_int32_t len; 1092 int i, error, ns; 1093 bus_size_t alim, slim, xlim; 1094 struct imush im; 1095 1096 /* 1097 * Already been here? If so, leave... 1098 */ 1099 if (isp->isp_rquest) { 1100 return (0); 1101 } 1102 1103 #ifdef ISP_DAC_SUPPORTED 1104 alim = BUS_SPACE_UNRESTRICTED; 1105 xlim = BUS_SPACE_MAXADDR_32BIT; 1106 #else 1107 xlim = alim = BUS_SPACE_MAXADDR_32BIT; 1108 #endif 1109 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1110 slim = BUS_SPACE_MAXADDR_32BIT; 1111 } else { 1112 slim = BUS_SPACE_MAXADDR_24BIT; 1113 } 1114 1115 ISP_UNLOCK(isp); 1116 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1117 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1118 busdma_lock_mutex, &Giant, &pcs->dmat)) { 1119 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1120 ISP_LOCK(isp); 1121 return(1); 1122 } 1123 1124 1125 len = sizeof (XS_T **) * isp->isp_maxcmds; 1126 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1127 if (isp->isp_xflist == NULL) { 1128 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1129 ISP_LOCK(isp); 1130 return (1); 1131 } 1132 #ifdef ISP_TARGET_MODE 1133 len = sizeof (void **) * isp->isp_maxcmds; 1134 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1135 if (isp->isp_tgtlist == NULL) { 1136 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1137 ISP_LOCK(isp); 1138 return (1); 1139 } 1140 #endif 1141 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1142 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1143 if (pcs->dmaps == NULL) { 1144 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1145 free(isp->isp_xflist, M_DEVBUF); 1146 #ifdef ISP_TARGET_MODE 1147 free(isp->isp_tgtlist, M_DEVBUF); 1148 #endif 1149 ISP_LOCK(isp); 1150 return (1); 1151 } 1152 1153 /* 1154 * Allocate and map the request, result queues, plus FC scratch area. 1155 */ 1156 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1157 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1158 if (IS_FC(isp)) { 1159 len += ISP2100_SCRLEN; 1160 } 1161 1162 ns = (len / PAGE_SIZE) + 1; 1163 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, xlim, xlim, 1164 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant, 1165 &isp->isp_cdmat)) { 1166 isp_prt(isp, ISP_LOGERR, 1167 "cannot create a dma tag for control spaces"); 1168 free(pcs->dmaps, M_DEVBUF); 1169 free(isp->isp_xflist, M_DEVBUF); 1170 #ifdef ISP_TARGET_MODE 1171 free(isp->isp_tgtlist, M_DEVBUF); 1172 #endif 1173 ISP_LOCK(isp); 1174 return (1); 1175 } 1176 1177 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1178 &isp->isp_cdmap) != 0) { 1179 isp_prt(isp, ISP_LOGERR, 1180 "cannot allocate %d bytes of CCB memory", len); 1181 bus_dma_tag_destroy(isp->isp_cdmat); 1182 free(isp->isp_xflist, M_DEVBUF); 1183 #ifdef ISP_TARGET_MODE 1184 free(isp->isp_tgtlist, M_DEVBUF); 1185 #endif 1186 free(pcs->dmaps, M_DEVBUF); 1187 ISP_LOCK(isp); 1188 return (1); 1189 } 1190 1191 for (i = 0; i < isp->isp_maxcmds; i++) { 1192 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1193 if (error) { 1194 isp_prt(isp, ISP_LOGERR, 1195 "error %d creating per-cmd DMA maps", error); 1196 while (--i >= 0) { 1197 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1198 } 1199 goto bad; 1200 } 1201 } 1202 1203 im.isp = isp; 1204 im.error = 0; 1205 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1206 if (im.error) { 1207 isp_prt(isp, ISP_LOGERR, 1208 "error %d loading dma map for control areas", im.error); 1209 goto bad; 1210 } 1211 1212 isp->isp_rquest = base; 1213 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1214 isp->isp_result = base; 1215 if (IS_FC(isp)) { 1216 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1217 FCPARAM(isp)->isp_scratch = base; 1218 } 1219 ISP_LOCK(isp); 1220 return (0); 1221 1222 bad: 1223 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1224 bus_dma_tag_destroy(isp->isp_cdmat); 1225 free(isp->isp_xflist, M_DEVBUF); 1226 #ifdef ISP_TARGET_MODE 1227 free(isp->isp_tgtlist, M_DEVBUF); 1228 #endif 1229 free(pcs->dmaps, M_DEVBUF); 1230 ISP_LOCK(isp); 1231 isp->isp_rquest = NULL; 1232 return (1); 1233 } 1234 1235 typedef struct { 1236 struct ispsoftc *isp; 1237 void *cmd_token; 1238 void *rq; 1239 u_int16_t *nxtip; 1240 u_int16_t optr; 1241 u_int error; 1242 } mush_t; 1243 1244 #define MUSHERR_NOQENTRIES -2 1245 1246 #ifdef ISP_TARGET_MODE 1247 /* 1248 * We need to handle DMA for target mode differently from initiator mode. 1249 * 1250 * DMA mapping and construction and submission of CTIO Request Entries 1251 * and rendevous for completion are very tightly coupled because we start 1252 * out by knowing (per platform) how much data we have to move, but we 1253 * don't know, up front, how many DMA mapping segments will have to be used 1254 * cover that data, so we don't know how many CTIO Request Entries we 1255 * will end up using. Further, for performance reasons we may want to 1256 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1257 * 1258 * The standard vector still goes through isp_pci_dmasetup, but the callback 1259 * for the DMA mapping routines comes here instead with the whole transfer 1260 * mapped and a pointer to a partially filled in already allocated request 1261 * queue entry. We finish the job. 1262 */ 1263 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1264 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1265 1266 #define STATUS_WITH_DATA 1 1267 1268 static void 1269 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1270 { 1271 mush_t *mp; 1272 struct ccb_scsiio *csio; 1273 struct ispsoftc *isp; 1274 struct isp_pcisoftc *pcs; 1275 bus_dmamap_t *dp; 1276 ct_entry_t *cto, *qe; 1277 u_int8_t scsi_status; 1278 u_int16_t curi, nxti, handle; 1279 u_int32_t sflags; 1280 int32_t resid; 1281 int nth_ctio, nctios, send_status; 1282 1283 mp = (mush_t *) arg; 1284 if (error) { 1285 mp->error = error; 1286 return; 1287 } 1288 1289 isp = mp->isp; 1290 csio = mp->cmd_token; 1291 cto = mp->rq; 1292 curi = isp->isp_reqidx; 1293 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1294 1295 cto->ct_xfrlen = 0; 1296 cto->ct_seg_count = 0; 1297 cto->ct_header.rqs_entry_count = 1; 1298 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1299 1300 if (nseg == 0) { 1301 cto->ct_header.rqs_seqno = 1; 1302 isp_prt(isp, ISP_LOGTDEBUG1, 1303 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1304 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1305 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1306 cto->ct_scsi_status, cto->ct_resid); 1307 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1308 isp_put_ctio(isp, cto, qe); 1309 return; 1310 } 1311 1312 nctios = nseg / ISP_RQDSEG; 1313 if (nseg % ISP_RQDSEG) { 1314 nctios++; 1315 } 1316 1317 /* 1318 * Save syshandle, and potentially any SCSI status, which we'll 1319 * reinsert on the last CTIO we're going to send. 1320 */ 1321 1322 handle = cto->ct_syshandle; 1323 cto->ct_syshandle = 0; 1324 cto->ct_header.rqs_seqno = 0; 1325 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1326 1327 if (send_status) { 1328 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1329 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1330 /* 1331 * Preserve residual. 1332 */ 1333 resid = cto->ct_resid; 1334 1335 /* 1336 * Save actual SCSI status. 1337 */ 1338 scsi_status = cto->ct_scsi_status; 1339 1340 #ifndef STATUS_WITH_DATA 1341 sflags |= CT_NO_DATA; 1342 /* 1343 * We can't do a status at the same time as a data CTIO, so 1344 * we need to synthesize an extra CTIO at this level. 1345 */ 1346 nctios++; 1347 #endif 1348 } else { 1349 sflags = scsi_status = resid = 0; 1350 } 1351 1352 cto->ct_resid = 0; 1353 cto->ct_scsi_status = 0; 1354 1355 pcs = (struct isp_pcisoftc *)isp; 1356 dp = &pcs->dmaps[isp_handle_index(handle)]; 1357 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1358 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1359 } else { 1360 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1361 } 1362 1363 nxti = *mp->nxtip; 1364 1365 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1366 int seglim; 1367 1368 seglim = nseg; 1369 if (seglim) { 1370 int seg; 1371 1372 if (seglim > ISP_RQDSEG) 1373 seglim = ISP_RQDSEG; 1374 1375 for (seg = 0; seg < seglim; seg++, nseg--) { 1376 /* 1377 * Unlike normal initiator commands, we don't 1378 * do any swizzling here. 1379 */ 1380 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1381 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1382 cto->ct_xfrlen += dm_segs->ds_len; 1383 dm_segs++; 1384 } 1385 cto->ct_seg_count = seg; 1386 } else { 1387 /* 1388 * This case should only happen when we're sending an 1389 * extra CTIO with final status. 1390 */ 1391 if (send_status == 0) { 1392 isp_prt(isp, ISP_LOGWARN, 1393 "tdma_mk ran out of segments"); 1394 mp->error = EINVAL; 1395 return; 1396 } 1397 } 1398 1399 /* 1400 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1401 * ct_tagtype, and ct_timeout have been carried over 1402 * unchanged from what our caller had set. 1403 * 1404 * The dataseg fields and the seg_count fields we just got 1405 * through setting. The data direction we've preserved all 1406 * along and only clear it if we're now sending status. 1407 */ 1408 1409 if (nth_ctio == nctios - 1) { 1410 /* 1411 * We're the last in a sequence of CTIOs, so mark 1412 * this CTIO and save the handle to the CCB such that 1413 * when this CTIO completes we can free dma resources 1414 * and do whatever else we need to do to finish the 1415 * rest of the command. We *don't* give this to the 1416 * firmware to work on- the caller will do that. 1417 */ 1418 1419 cto->ct_syshandle = handle; 1420 cto->ct_header.rqs_seqno = 1; 1421 1422 if (send_status) { 1423 cto->ct_scsi_status = scsi_status; 1424 cto->ct_flags |= sflags; 1425 cto->ct_resid = resid; 1426 } 1427 if (send_status) { 1428 isp_prt(isp, ISP_LOGTDEBUG1, 1429 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1430 "scsi status %x resid %d", 1431 cto->ct_fwhandle, csio->ccb_h.target_lun, 1432 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1433 cto->ct_scsi_status, cto->ct_resid); 1434 } else { 1435 isp_prt(isp, ISP_LOGTDEBUG1, 1436 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1437 cto->ct_fwhandle, csio->ccb_h.target_lun, 1438 cto->ct_iid, cto->ct_tag_val, 1439 cto->ct_flags); 1440 } 1441 isp_put_ctio(isp, cto, qe); 1442 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1443 if (nctios > 1) { 1444 MEMORYBARRIER(isp, SYNC_REQUEST, 1445 curi, QENTRY_LEN); 1446 } 1447 } else { 1448 ct_entry_t *oqe = qe; 1449 1450 /* 1451 * Make sure syshandle fields are clean 1452 */ 1453 cto->ct_syshandle = 0; 1454 cto->ct_header.rqs_seqno = 0; 1455 1456 isp_prt(isp, ISP_LOGTDEBUG1, 1457 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1458 cto->ct_fwhandle, csio->ccb_h.target_lun, 1459 cto->ct_iid, cto->ct_flags); 1460 1461 /* 1462 * Get a new CTIO 1463 */ 1464 qe = (ct_entry_t *) 1465 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1466 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1467 if (nxti == mp->optr) { 1468 isp_prt(isp, ISP_LOGTDEBUG0, 1469 "Queue Overflow in tdma_mk"); 1470 mp->error = MUSHERR_NOQENTRIES; 1471 return; 1472 } 1473 1474 /* 1475 * Now that we're done with the old CTIO, 1476 * flush it out to the request queue. 1477 */ 1478 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1479 isp_put_ctio(isp, cto, oqe); 1480 if (nth_ctio != 0) { 1481 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1482 QENTRY_LEN); 1483 } 1484 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1485 1486 /* 1487 * Reset some fields in the CTIO so we can reuse 1488 * for the next one we'll flush to the request 1489 * queue. 1490 */ 1491 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1492 cto->ct_header.rqs_entry_count = 1; 1493 cto->ct_header.rqs_flags = 0; 1494 cto->ct_status = 0; 1495 cto->ct_scsi_status = 0; 1496 cto->ct_xfrlen = 0; 1497 cto->ct_resid = 0; 1498 cto->ct_seg_count = 0; 1499 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1500 } 1501 } 1502 *mp->nxtip = nxti; 1503 } 1504 1505 /* 1506 * We don't have to do multiple CTIOs here. Instead, we can just do 1507 * continuation segments as needed. This greatly simplifies the code 1508 * improves performance. 1509 */ 1510 1511 static void 1512 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1513 { 1514 mush_t *mp; 1515 struct ccb_scsiio *csio; 1516 struct ispsoftc *isp; 1517 ct2_entry_t *cto, *qe; 1518 u_int16_t curi, nxti; 1519 int segcnt; 1520 1521 mp = (mush_t *) arg; 1522 if (error) { 1523 mp->error = error; 1524 return; 1525 } 1526 1527 isp = mp->isp; 1528 csio = mp->cmd_token; 1529 cto = mp->rq; 1530 1531 curi = isp->isp_reqidx; 1532 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1533 1534 if (nseg == 0) { 1535 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1536 isp_prt(isp, ISP_LOGWARN, 1537 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1538 "set (0x%x)", cto->ct_flags); 1539 mp->error = EINVAL; 1540 return; 1541 } 1542 /* 1543 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1544 * flags to NO DATA and clear relative offset flags. 1545 * We preserve the ct_resid and the response area. 1546 */ 1547 cto->ct_header.rqs_seqno = 1; 1548 cto->ct_seg_count = 0; 1549 cto->ct_reloff = 0; 1550 isp_prt(isp, ISP_LOGTDEBUG1, 1551 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1552 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1553 cto->ct_iid, cto->ct_flags, cto->ct_status, 1554 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1555 isp_put_ctio2(isp, cto, qe); 1556 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1557 return; 1558 } 1559 1560 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1561 isp_prt(isp, ISP_LOGERR, 1562 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1563 "(0x%x)", cto->ct_flags); 1564 mp->error = EINVAL; 1565 return; 1566 } 1567 1568 1569 nxti = *mp->nxtip; 1570 1571 /* 1572 * Set up the CTIO2 data segments. 1573 */ 1574 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1575 cto->ct_seg_count++, segcnt++) { 1576 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1577 dm_segs[segcnt].ds_addr; 1578 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1579 dm_segs[segcnt].ds_len; 1580 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1581 isp_prt(isp, ISP_LOGTDEBUG1, 1582 "isp_send_ctio2: ent0[%d]0x%llx:%lld", 1583 cto->ct_seg_count, (long long)dm_segs[segcnt].ds_addr, 1584 (long long)dm_segs[segcnt].ds_len); 1585 } 1586 1587 while (segcnt < nseg) { 1588 u_int16_t curip; 1589 int seg; 1590 ispcontreq_t local, *crq = &local, *qep; 1591 1592 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1593 curip = nxti; 1594 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1595 if (nxti == mp->optr) { 1596 ISP_UNLOCK(isp); 1597 isp_prt(isp, ISP_LOGTDEBUG0, 1598 "tdma_mkfc: request queue overflow"); 1599 mp->error = MUSHERR_NOQENTRIES; 1600 return; 1601 } 1602 cto->ct_header.rqs_entry_count++; 1603 MEMZERO((void *)crq, sizeof (*crq)); 1604 crq->req_header.rqs_entry_count = 1; 1605 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1606 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1607 segcnt++, seg++) { 1608 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1609 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1610 isp_prt(isp, ISP_LOGTDEBUG1, 1611 "isp_send_ctio2: ent%d[%d]0x%llx:%lld", 1612 cto->ct_header.rqs_entry_count-1, seg, 1613 (long long) dm_segs[segcnt].ds_addr, 1614 (long long) dm_segs[segcnt].ds_len); 1615 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1616 cto->ct_seg_count++; 1617 } 1618 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1619 isp_put_cont_req(isp, crq, qep); 1620 ISP_TDQE(isp, "cont entry", curi, qep); 1621 } 1622 1623 /* 1624 * No do final twiddling for the CTIO itself. 1625 */ 1626 cto->ct_header.rqs_seqno = 1; 1627 isp_prt(isp, ISP_LOGTDEBUG1, 1628 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1629 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1630 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1631 cto->ct_resid); 1632 isp_put_ctio2(isp, cto, qe); 1633 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1634 *mp->nxtip = nxti; 1635 } 1636 #endif 1637 1638 static void dma2(void *, bus_dma_segment_t *, int, int); 1639 1640 #ifdef PAE 1641 #define LOWD(x) ((uint32_t) x) 1642 #define HIWD(x) ((uint32_t) (x >> 32)) 1643 1644 static void 1645 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1646 { 1647 mush_t *mp; 1648 struct ispsoftc *isp; 1649 struct ccb_scsiio *csio; 1650 struct isp_pcisoftc *pcs; 1651 bus_dmamap_t *dp; 1652 bus_dma_segment_t *eseg; 1653 ispreq64_t *rq; 1654 int seglim, datalen; 1655 u_int16_t nxti; 1656 1657 mp = (mush_t *) arg; 1658 if (error) { 1659 mp->error = error; 1660 return; 1661 } 1662 1663 if (nseg < 1) { 1664 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1665 mp->error = EFAULT; 1666 return; 1667 } 1668 csio = mp->cmd_token; 1669 isp = mp->isp; 1670 rq = mp->rq; 1671 pcs = (struct isp_pcisoftc *)mp->isp; 1672 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1673 nxti = *mp->nxtip; 1674 1675 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1676 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1677 } else { 1678 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1679 } 1680 datalen = XS_XFRLEN(csio); 1681 1682 /* 1683 * We're passed an initial partially filled in entry that 1684 * has most fields filled in except for data transfer 1685 * related values. 1686 * 1687 * Our job is to fill in the initial request queue entry and 1688 * then to start allocating and filling in continuation entries 1689 * until we've covered the entire transfer. 1690 */ 1691 1692 if (IS_FC(isp)) { 1693 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1694 seglim = ISP_RQDSEG_T3; 1695 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 1696 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1697 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1698 } else { 1699 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1700 } 1701 } else { 1702 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1703 if (csio->cdb_len > 12) { 1704 seglim = 0; 1705 } else { 1706 seglim = ISP_RQDSEG_A64; 1707 } 1708 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1709 rq->req_flags |= REQFLAG_DATA_IN; 1710 } else { 1711 rq->req_flags |= REQFLAG_DATA_OUT; 1712 } 1713 } 1714 1715 eseg = dm_segs + nseg; 1716 1717 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1718 if (IS_FC(isp)) { 1719 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 1720 rq3->req_dataseg[rq3->req_seg_count].ds_base = 1721 LOWD(dm_segs->ds_addr); 1722 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 1723 HIWD(dm_segs->ds_addr); 1724 rq3->req_dataseg[rq3->req_seg_count].ds_count = 1725 dm_segs->ds_len; 1726 } else { 1727 rq->req_dataseg[rq->req_seg_count].ds_base = 1728 LOWD(dm_segs->ds_addr); 1729 rq->req_dataseg[rq->req_seg_count].ds_basehi = 1730 HIWD(dm_segs->ds_addr); 1731 rq->req_dataseg[rq->req_seg_count].ds_count = 1732 dm_segs->ds_len; 1733 } 1734 datalen -= dm_segs->ds_len; 1735 rq->req_seg_count++; 1736 dm_segs++; 1737 } 1738 1739 while (datalen > 0 && dm_segs != eseg) { 1740 u_int16_t onxti; 1741 ispcontreq64_t local, *crq = &local, *cqe; 1742 1743 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1744 onxti = nxti; 1745 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1746 if (nxti == mp->optr) { 1747 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1748 mp->error = MUSHERR_NOQENTRIES; 1749 return; 1750 } 1751 rq->req_header.rqs_entry_count++; 1752 MEMZERO((void *)crq, sizeof (*crq)); 1753 crq->req_header.rqs_entry_count = 1; 1754 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 1755 1756 seglim = 0; 1757 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 1758 crq->req_dataseg[seglim].ds_base = 1759 LOWD(dm_segs->ds_addr); 1760 crq->req_dataseg[seglim].ds_basehi = 1761 HIWD(dm_segs->ds_addr); 1762 crq->req_dataseg[seglim].ds_count = 1763 dm_segs->ds_len; 1764 rq->req_seg_count++; 1765 dm_segs++; 1766 seglim++; 1767 datalen -= dm_segs->ds_len; 1768 } 1769 isp_put_cont64_req(isp, crq, cqe); 1770 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1771 } 1772 *mp->nxtip = nxti; 1773 } 1774 #else 1775 static void 1776 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1777 { 1778 mush_t *mp; 1779 struct ispsoftc *isp; 1780 struct ccb_scsiio *csio; 1781 struct isp_pcisoftc *pcs; 1782 bus_dmamap_t *dp; 1783 bus_dma_segment_t *eseg; 1784 ispreq_t *rq; 1785 int seglim, datalen; 1786 u_int16_t nxti; 1787 1788 mp = (mush_t *) arg; 1789 if (error) { 1790 mp->error = error; 1791 return; 1792 } 1793 1794 if (nseg < 1) { 1795 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1796 mp->error = EFAULT; 1797 return; 1798 } 1799 csio = mp->cmd_token; 1800 isp = mp->isp; 1801 rq = mp->rq; 1802 pcs = (struct isp_pcisoftc *)mp->isp; 1803 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1804 nxti = *mp->nxtip; 1805 1806 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1807 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1808 } else { 1809 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1810 } 1811 1812 datalen = XS_XFRLEN(csio); 1813 1814 /* 1815 * We're passed an initial partially filled in entry that 1816 * has most fields filled in except for data transfer 1817 * related values. 1818 * 1819 * Our job is to fill in the initial request queue entry and 1820 * then to start allocating and filling in continuation entries 1821 * until we've covered the entire transfer. 1822 */ 1823 1824 if (IS_FC(isp)) { 1825 seglim = ISP_RQDSEG_T2; 1826 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1827 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1828 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1829 } else { 1830 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1831 } 1832 } else { 1833 if (csio->cdb_len > 12) { 1834 seglim = 0; 1835 } else { 1836 seglim = ISP_RQDSEG; 1837 } 1838 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1839 rq->req_flags |= REQFLAG_DATA_IN; 1840 } else { 1841 rq->req_flags |= REQFLAG_DATA_OUT; 1842 } 1843 } 1844 1845 eseg = dm_segs + nseg; 1846 1847 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1848 if (IS_FC(isp)) { 1849 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1850 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1851 dm_segs->ds_addr; 1852 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1853 dm_segs->ds_len; 1854 } else { 1855 rq->req_dataseg[rq->req_seg_count].ds_base = 1856 dm_segs->ds_addr; 1857 rq->req_dataseg[rq->req_seg_count].ds_count = 1858 dm_segs->ds_len; 1859 } 1860 datalen -= dm_segs->ds_len; 1861 rq->req_seg_count++; 1862 dm_segs++; 1863 } 1864 1865 while (datalen > 0 && dm_segs != eseg) { 1866 u_int16_t onxti; 1867 ispcontreq_t local, *crq = &local, *cqe; 1868 1869 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1870 onxti = nxti; 1871 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1872 if (nxti == mp->optr) { 1873 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1874 mp->error = MUSHERR_NOQENTRIES; 1875 return; 1876 } 1877 rq->req_header.rqs_entry_count++; 1878 MEMZERO((void *)crq, sizeof (*crq)); 1879 crq->req_header.rqs_entry_count = 1; 1880 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1881 1882 seglim = 0; 1883 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1884 crq->req_dataseg[seglim].ds_base = 1885 dm_segs->ds_addr; 1886 crq->req_dataseg[seglim].ds_count = 1887 dm_segs->ds_len; 1888 rq->req_seg_count++; 1889 dm_segs++; 1890 seglim++; 1891 datalen -= dm_segs->ds_len; 1892 } 1893 isp_put_cont_req(isp, crq, cqe); 1894 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1895 } 1896 *mp->nxtip = nxti; 1897 } 1898 #endif 1899 1900 static int 1901 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1902 u_int16_t *nxtip, u_int16_t optr) 1903 { 1904 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1905 ispreq_t *qep; 1906 bus_dmamap_t *dp = NULL; 1907 mush_t mush, *mp; 1908 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1909 1910 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1911 #ifdef ISP_TARGET_MODE 1912 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1913 if (IS_FC(isp)) { 1914 eptr = tdma_mkfc; 1915 } else { 1916 eptr = tdma_mk; 1917 } 1918 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1919 (csio->dxfer_len == 0)) { 1920 mp = &mush; 1921 mp->isp = isp; 1922 mp->cmd_token = csio; 1923 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1924 mp->nxtip = nxtip; 1925 mp->optr = optr; 1926 mp->error = 0; 1927 (*eptr)(mp, NULL, 0, 0); 1928 goto mbxsync; 1929 } 1930 } else 1931 #endif 1932 eptr = dma2; 1933 1934 1935 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1936 (csio->dxfer_len == 0)) { 1937 rq->req_seg_count = 1; 1938 goto mbxsync; 1939 } 1940 1941 /* 1942 * Do a virtual grapevine step to collect info for 1943 * the callback dma allocation that we have to use... 1944 */ 1945 mp = &mush; 1946 mp->isp = isp; 1947 mp->cmd_token = csio; 1948 mp->rq = rq; 1949 mp->nxtip = nxtip; 1950 mp->optr = optr; 1951 mp->error = 0; 1952 1953 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1954 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1955 int error, s; 1956 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1957 s = splsoftvm(); 1958 error = bus_dmamap_load(pcs->dmat, *dp, 1959 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1960 if (error == EINPROGRESS) { 1961 bus_dmamap_unload(pcs->dmat, *dp); 1962 mp->error = EINVAL; 1963 isp_prt(isp, ISP_LOGERR, 1964 "deferred dma allocation not supported"); 1965 } else if (error && mp->error == 0) { 1966 #ifdef DIAGNOSTIC 1967 isp_prt(isp, ISP_LOGERR, 1968 "error %d in dma mapping code", error); 1969 #endif 1970 mp->error = error; 1971 } 1972 splx(s); 1973 } else { 1974 /* Pointer to physical buffer */ 1975 struct bus_dma_segment seg; 1976 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 1977 seg.ds_len = csio->dxfer_len; 1978 (*eptr)(mp, &seg, 1, 0); 1979 } 1980 } else { 1981 struct bus_dma_segment *segs; 1982 1983 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1984 isp_prt(isp, ISP_LOGERR, 1985 "Physical segment pointers unsupported"); 1986 mp->error = EINVAL; 1987 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1988 isp_prt(isp, ISP_LOGERR, 1989 "Virtual segment addresses unsupported"); 1990 mp->error = EINVAL; 1991 } else { 1992 /* Just use the segments provided */ 1993 segs = (struct bus_dma_segment *) csio->data_ptr; 1994 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1995 } 1996 } 1997 if (mp->error) { 1998 int retval = CMD_COMPLETE; 1999 if (mp->error == MUSHERR_NOQENTRIES) { 2000 retval = CMD_EAGAIN; 2001 } else if (mp->error == EFBIG) { 2002 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2003 } else if (mp->error == EINVAL) { 2004 XS_SETERR(csio, CAM_REQ_INVALID); 2005 } else { 2006 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2007 } 2008 return (retval); 2009 } 2010 mbxsync: 2011 switch (rq->req_header.rqs_entry_type) { 2012 case RQSTYPE_REQUEST: 2013 isp_put_request(isp, rq, qep); 2014 break; 2015 case RQSTYPE_CMDONLY: 2016 isp_put_extended_request(isp, (ispextreq_t *)rq, 2017 (ispextreq_t *)qep); 2018 break; 2019 case RQSTYPE_T2RQS: 2020 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2021 break; 2022 case RQSTYPE_A64: 2023 case RQSTYPE_T3RQS: 2024 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2025 break; 2026 } 2027 return (CMD_QUEUED); 2028 } 2029 2030 static void 2031 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 2032 { 2033 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2034 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2035 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2036 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2037 } else { 2038 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2039 } 2040 bus_dmamap_unload(pcs->dmat, *dp); 2041 } 2042 2043 2044 static void 2045 isp_pci_reset1(struct ispsoftc *isp) 2046 { 2047 /* Make sure the BIOS is disabled */ 2048 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2049 /* and enable interrupts */ 2050 ENABLE_INTS(isp); 2051 } 2052 2053 static void 2054 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 2055 { 2056 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2057 if (msg) 2058 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2059 else 2060 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2061 if (IS_SCSI(isp)) 2062 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2063 else 2064 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2065 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2066 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2067 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2068 2069 2070 if (IS_SCSI(isp)) { 2071 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2072 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2073 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2074 ISP_READ(isp, CDMA_FIFO_STS)); 2075 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2076 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2077 ISP_READ(isp, DDMA_FIFO_STS)); 2078 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2079 ISP_READ(isp, SXP_INTERRUPT), 2080 ISP_READ(isp, SXP_GROSS_ERR), 2081 ISP_READ(isp, SXP_PINS_CTRL)); 2082 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2083 } 2084 printf(" mbox regs: %x %x %x %x %x\n", 2085 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2086 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2087 ISP_READ(isp, OUTMAILBOX4)); 2088 printf(" PCI Status Command/Status=%x\n", 2089 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2090 } 2091