1 /*- 2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/bus.h> 37 #include <sys/stdint.h> 38 39 #include <dev/pci/pcireg.h> 40 #include <dev/pci/pcivar.h> 41 42 #include <machine/bus.h> 43 #include <machine/resource.h> 44 #include <sys/rman.h> 45 #include <sys/malloc.h> 46 47 #ifdef ISP_TARGET_MODE 48 #ifdef PAE 49 #error "PAE and ISP_TARGET_MODE not supported yet" 50 #endif 51 #endif 52 53 #include <dev/isp/isp_freebsd.h> 54 55 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 56 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 57 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 58 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 59 static int 60 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 61 static int 62 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 63 static int isp_pci_mbxdma(struct ispsoftc *); 64 static int 65 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 66 static void 67 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 68 69 static void isp_pci_reset1(struct ispsoftc *); 70 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 71 72 static struct ispmdvec mdvec = { 73 isp_pci_rd_isr, 74 isp_pci_rd_reg, 75 isp_pci_wr_reg, 76 isp_pci_mbxdma, 77 isp_pci_dmasetup, 78 isp_pci_dmateardown, 79 NULL, 80 isp_pci_reset1, 81 isp_pci_dumpregs, 82 NULL, 83 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 84 }; 85 86 static struct ispmdvec mdvec_1080 = { 87 isp_pci_rd_isr, 88 isp_pci_rd_reg_1080, 89 isp_pci_wr_reg_1080, 90 isp_pci_mbxdma, 91 isp_pci_dmasetup, 92 isp_pci_dmateardown, 93 NULL, 94 isp_pci_reset1, 95 isp_pci_dumpregs, 96 NULL, 97 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 98 }; 99 100 static struct ispmdvec mdvec_12160 = { 101 isp_pci_rd_isr, 102 isp_pci_rd_reg_1080, 103 isp_pci_wr_reg_1080, 104 isp_pci_mbxdma, 105 isp_pci_dmasetup, 106 isp_pci_dmateardown, 107 NULL, 108 isp_pci_reset1, 109 isp_pci_dumpregs, 110 NULL, 111 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 112 }; 113 114 static struct ispmdvec mdvec_2100 = { 115 isp_pci_rd_isr, 116 isp_pci_rd_reg, 117 isp_pci_wr_reg, 118 isp_pci_mbxdma, 119 isp_pci_dmasetup, 120 isp_pci_dmateardown, 121 NULL, 122 isp_pci_reset1, 123 isp_pci_dumpregs 124 }; 125 126 static struct ispmdvec mdvec_2200 = { 127 isp_pci_rd_isr, 128 isp_pci_rd_reg, 129 isp_pci_wr_reg, 130 isp_pci_mbxdma, 131 isp_pci_dmasetup, 132 isp_pci_dmateardown, 133 NULL, 134 isp_pci_reset1, 135 isp_pci_dumpregs 136 }; 137 138 static struct ispmdvec mdvec_2300 = { 139 isp_pci_rd_isr_2300, 140 isp_pci_rd_reg, 141 isp_pci_wr_reg, 142 isp_pci_mbxdma, 143 isp_pci_dmasetup, 144 isp_pci_dmateardown, 145 NULL, 146 isp_pci_reset1, 147 isp_pci_dumpregs 148 }; 149 150 #ifndef PCIM_CMD_INVEN 151 #define PCIM_CMD_INVEN 0x10 152 #endif 153 #ifndef PCIM_CMD_BUSMASTEREN 154 #define PCIM_CMD_BUSMASTEREN 0x0004 155 #endif 156 #ifndef PCIM_CMD_PERRESPEN 157 #define PCIM_CMD_PERRESPEN 0x0040 158 #endif 159 #ifndef PCIM_CMD_SEREN 160 #define PCIM_CMD_SEREN 0x0100 161 #endif 162 163 #ifndef PCIR_COMMAND 164 #define PCIR_COMMAND 0x04 165 #endif 166 167 #ifndef PCIR_CACHELNSZ 168 #define PCIR_CACHELNSZ 0x0c 169 #endif 170 171 #ifndef PCIR_LATTIMER 172 #define PCIR_LATTIMER 0x0d 173 #endif 174 175 #ifndef PCIR_ROMADDR 176 #define PCIR_ROMADDR 0x30 177 #endif 178 179 #ifndef PCI_VENDOR_QLOGIC 180 #define PCI_VENDOR_QLOGIC 0x1077 181 #endif 182 183 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 184 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 185 #endif 186 187 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 188 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 189 #endif 190 191 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 192 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 193 #endif 194 195 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 196 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 197 #endif 198 199 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 200 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 201 #endif 202 203 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 204 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 205 #endif 206 207 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 208 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 209 #endif 210 211 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 212 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 213 #endif 214 215 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 216 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 217 #endif 218 219 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 220 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 221 #endif 222 223 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 224 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 225 #endif 226 227 #define PCI_QLOGIC_ISP1020 \ 228 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 229 230 #define PCI_QLOGIC_ISP1080 \ 231 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 232 233 #define PCI_QLOGIC_ISP10160 \ 234 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 235 236 #define PCI_QLOGIC_ISP12160 \ 237 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 238 239 #define PCI_QLOGIC_ISP1240 \ 240 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 241 242 #define PCI_QLOGIC_ISP1280 \ 243 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 244 245 #define PCI_QLOGIC_ISP2100 \ 246 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 247 248 #define PCI_QLOGIC_ISP2200 \ 249 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 250 251 #define PCI_QLOGIC_ISP2300 \ 252 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 253 254 #define PCI_QLOGIC_ISP2312 \ 255 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 256 257 #define PCI_QLOGIC_ISP6312 \ 258 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 259 260 /* 261 * Odd case for some AMI raid cards... We need to *not* attach to this. 262 */ 263 #define AMI_RAID_SUBVENDOR_ID 0x101e 264 265 #define IO_MAP_REG 0x10 266 #define MEM_MAP_REG 0x14 267 268 #define PCI_DFLT_LTNCY 0x40 269 #define PCI_DFLT_LNSZ 0x10 270 271 static int isp_pci_probe (device_t); 272 static int isp_pci_attach (device_t); 273 274 275 struct isp_pcisoftc { 276 struct ispsoftc pci_isp; 277 device_t pci_dev; 278 struct resource * pci_reg; 279 bus_space_tag_t pci_st; 280 bus_space_handle_t pci_sh; 281 void * ih; 282 int16_t pci_poff[_NREG_BLKS]; 283 bus_dma_tag_t dmat; 284 bus_dmamap_t *dmaps; 285 }; 286 extern ispfwfunc *isp_get_firmware_p; 287 288 static device_method_t isp_pci_methods[] = { 289 /* Device interface */ 290 DEVMETHOD(device_probe, isp_pci_probe), 291 DEVMETHOD(device_attach, isp_pci_attach), 292 { 0, 0 } 293 }; 294 static void isp_pci_intr(void *); 295 296 static driver_t isp_pci_driver = { 297 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 298 }; 299 static devclass_t isp_devclass; 300 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 301 302 static int 303 isp_pci_probe(device_t dev) 304 { 305 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 306 case PCI_QLOGIC_ISP1020: 307 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 308 break; 309 case PCI_QLOGIC_ISP1080: 310 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 311 break; 312 case PCI_QLOGIC_ISP1240: 313 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 314 break; 315 case PCI_QLOGIC_ISP1280: 316 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 317 break; 318 case PCI_QLOGIC_ISP10160: 319 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 320 break; 321 case PCI_QLOGIC_ISP12160: 322 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 323 return (ENXIO); 324 } 325 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 326 break; 327 case PCI_QLOGIC_ISP2100: 328 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 329 break; 330 case PCI_QLOGIC_ISP2200: 331 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 332 break; 333 case PCI_QLOGIC_ISP2300: 334 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 335 break; 336 case PCI_QLOGIC_ISP2312: 337 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 338 break; 339 case PCI_QLOGIC_ISP6312: 340 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 341 break; 342 default: 343 return (ENXIO); 344 } 345 if (isp_announced == 0 && bootverbose) { 346 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 347 "Core Version %d.%d\n", 348 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 349 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 350 isp_announced++; 351 } 352 /* 353 * XXXX: Here is where we might load the f/w module 354 * XXXX: (or increase a reference count to it). 355 */ 356 return (BUS_PROBE_DEFAULT); 357 } 358 359 static int 360 isp_pci_attach(device_t dev) 361 { 362 struct resource *regs, *irq; 363 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 364 u_int32_t data, cmd, linesz, psize, basetype; 365 struct isp_pcisoftc *pcs; 366 struct ispsoftc *isp = NULL; 367 struct ispmdvec *mdvp; 368 const char *sptr; 369 int locksetup = 0; 370 371 /* 372 * Figure out if we're supposed to skip this one. 373 */ 374 375 tval = 0; 376 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 377 "disable", &tval) == 0 && tval) { 378 device_printf(dev, "device is disabled\n"); 379 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 380 return (0); 381 } 382 383 role = -1; 384 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 385 "role", &role) == 0 && role != -1) { 386 role &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 387 device_printf(dev, "setting role to 0x%x\n", role); 388 } else { 389 #ifdef ISP_TARGET_MODE 390 role = ISP_ROLE_TARGET; 391 #else 392 role = ISP_DEFAULT_ROLES; 393 #endif 394 } 395 396 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 397 if (pcs == NULL) { 398 device_printf(dev, "cannot allocate softc\n"); 399 return (ENOMEM); 400 } 401 402 /* 403 * Which we should try first - memory mapping or i/o mapping? 404 * 405 * We used to try memory first followed by i/o on alpha, otherwise 406 * the reverse, but we should just try memory first all the time now. 407 */ 408 m1 = PCIM_CMD_MEMEN; 409 m2 = PCIM_CMD_PORTEN; 410 411 tval = 0; 412 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 413 "prefer_iomap", &tval) == 0 && tval != 0) { 414 m1 = PCIM_CMD_PORTEN; 415 m2 = PCIM_CMD_MEMEN; 416 } 417 tval = 0; 418 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 419 "prefer_memmap", &tval) == 0 && tval != 0) { 420 m1 = PCIM_CMD_MEMEN; 421 m2 = PCIM_CMD_PORTEN; 422 } 423 424 linesz = PCI_DFLT_LNSZ; 425 irq = regs = NULL; 426 rgd = rtp = iqd = 0; 427 428 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 429 if (cmd & m1) { 430 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 431 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 432 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 433 } 434 if (regs == NULL && (cmd & m2)) { 435 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 436 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 437 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 438 } 439 if (regs == NULL) { 440 device_printf(dev, "unable to map any ports\n"); 441 goto bad; 442 } 443 if (bootverbose) 444 device_printf(dev, "using %s space register mapping\n", 445 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 446 pcs->pci_dev = dev; 447 pcs->pci_reg = regs; 448 pcs->pci_st = rman_get_bustag(regs); 449 pcs->pci_sh = rman_get_bushandle(regs); 450 451 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 452 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 453 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 454 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 455 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 456 mdvp = &mdvec; 457 basetype = ISP_HA_SCSI_UNKNOWN; 458 psize = sizeof (sdparam); 459 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 460 mdvp = &mdvec; 461 basetype = ISP_HA_SCSI_UNKNOWN; 462 psize = sizeof (sdparam); 463 } 464 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 465 mdvp = &mdvec_1080; 466 basetype = ISP_HA_SCSI_1080; 467 psize = sizeof (sdparam); 468 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 469 ISP1080_DMA_REGS_OFF; 470 } 471 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 472 mdvp = &mdvec_1080; 473 basetype = ISP_HA_SCSI_1240; 474 psize = 2 * sizeof (sdparam); 475 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 476 ISP1080_DMA_REGS_OFF; 477 } 478 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 479 mdvp = &mdvec_1080; 480 basetype = ISP_HA_SCSI_1280; 481 psize = 2 * sizeof (sdparam); 482 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 483 ISP1080_DMA_REGS_OFF; 484 } 485 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 486 mdvp = &mdvec_12160; 487 basetype = ISP_HA_SCSI_10160; 488 psize = sizeof (sdparam); 489 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 490 ISP1080_DMA_REGS_OFF; 491 } 492 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 493 mdvp = &mdvec_12160; 494 basetype = ISP_HA_SCSI_12160; 495 psize = 2 * sizeof (sdparam); 496 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 497 ISP1080_DMA_REGS_OFF; 498 } 499 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 500 mdvp = &mdvec_2100; 501 basetype = ISP_HA_FC_2100; 502 psize = sizeof (fcparam); 503 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 504 PCI_MBOX_REGS2100_OFF; 505 if (pci_get_revid(dev) < 3) { 506 /* 507 * XXX: Need to get the actual revision 508 * XXX: number of the 2100 FB. At any rate, 509 * XXX: lower cache line size for early revision 510 * XXX; boards. 511 */ 512 linesz = 1; 513 } 514 } 515 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 516 mdvp = &mdvec_2200; 517 basetype = ISP_HA_FC_2200; 518 psize = sizeof (fcparam); 519 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 520 PCI_MBOX_REGS2100_OFF; 521 } 522 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 523 mdvp = &mdvec_2300; 524 basetype = ISP_HA_FC_2300; 525 psize = sizeof (fcparam); 526 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 527 PCI_MBOX_REGS2300_OFF; 528 } 529 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 530 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 531 mdvp = &mdvec_2300; 532 basetype = ISP_HA_FC_2312; 533 psize = sizeof (fcparam); 534 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 535 PCI_MBOX_REGS2300_OFF; 536 } 537 isp = &pcs->pci_isp; 538 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 539 if (isp->isp_param == NULL) { 540 device_printf(dev, "cannot allocate parameter data\n"); 541 goto bad; 542 } 543 isp->isp_mdvec = mdvp; 544 isp->isp_type = basetype; 545 isp->isp_revision = pci_get_revid(dev); 546 isp->isp_role = role; 547 isp->isp_dev = dev; 548 549 /* 550 * Try and find firmware for this device. 551 */ 552 553 if (isp_get_firmware_p) { 554 int device = (int) pci_get_device(dev); 555 #ifdef ISP_TARGET_MODE 556 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 557 #else 558 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 559 #endif 560 } 561 562 /* 563 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 564 * are set. 565 */ 566 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 567 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 568 if (IS_2300(isp)) { /* per QLogic errata */ 569 cmd &= ~PCIM_CMD_INVEN; 570 } 571 if (IS_23XX(isp)) { 572 /* 573 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 574 */ 575 isp->isp_touched = 1; 576 577 } 578 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 579 580 /* 581 * Make sure the Cache Line Size register is set sensibly. 582 */ 583 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 584 if (data != linesz) { 585 data = PCI_DFLT_LNSZ; 586 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 587 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 588 } 589 590 /* 591 * Make sure the Latency Timer is sane. 592 */ 593 data = pci_read_config(dev, PCIR_LATTIMER, 1); 594 if (data < PCI_DFLT_LTNCY) { 595 data = PCI_DFLT_LTNCY; 596 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 597 pci_write_config(dev, PCIR_LATTIMER, data, 1); 598 } 599 600 /* 601 * Make sure we've disabled the ROM. 602 */ 603 data = pci_read_config(dev, PCIR_ROMADDR, 4); 604 data &= ~1; 605 pci_write_config(dev, PCIR_ROMADDR, data, 4); 606 607 iqd = 0; 608 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 609 RF_ACTIVE | RF_SHAREABLE); 610 if (irq == NULL) { 611 device_printf(dev, "could not allocate interrupt\n"); 612 goto bad; 613 } 614 615 tval = 0; 616 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 617 "fwload_disable", &tval) == 0 && tval != 0) { 618 isp->isp_confopts |= ISP_CFG_NORELOAD; 619 } 620 tval = 0; 621 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 622 "ignore_nvram", &tval) == 0 && tval != 0) { 623 isp->isp_confopts |= ISP_CFG_NONVRAM; 624 } 625 tval = 0; 626 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 627 "fullduplex", &tval) == 0 && tval != 0) { 628 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 629 } 630 #ifdef ISP_FW_CRASH_DUMP 631 tval = 0; 632 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 633 "fw_dump_enable", &tval) == 0 && tval != 0) { 634 size_t amt = 0; 635 if (IS_2200(isp)) { 636 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 637 } else if (IS_23XX(isp)) { 638 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 639 } 640 if (amt) { 641 FCPARAM(isp)->isp_dump_data = 642 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 643 } else { 644 device_printf(dev, 645 "f/w crash dumps not supported for this model\n"); 646 } 647 } 648 #endif 649 650 sptr = 0; 651 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 652 "topology", (const char **) &sptr) == 0 && sptr != 0) { 653 if (strcmp(sptr, "lport") == 0) { 654 isp->isp_confopts |= ISP_CFG_LPORT; 655 } else if (strcmp(sptr, "nport") == 0) { 656 isp->isp_confopts |= ISP_CFG_NPORT; 657 } else if (strcmp(sptr, "lport-only") == 0) { 658 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 659 } else if (strcmp(sptr, "nport-only") == 0) { 660 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 661 } 662 } 663 664 /* 665 * Because the resource_*_value functions can neither return 666 * 64 bit integer values, nor can they be directly coerced 667 * to interpret the right hand side of the assignment as 668 * you want them to interpret it, we have to force WWN 669 * hint replacement to specify WWN strings with a leading 670 * 'w' (e..g w50000000aaaa0001). Sigh. 671 */ 672 sptr = 0; 673 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 674 "portwwn", (const char **) &sptr); 675 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 676 char *eptr = 0; 677 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 678 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 679 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 680 isp->isp_osinfo.default_port_wwn = 0; 681 } else { 682 isp->isp_confopts |= ISP_CFG_OWNWWPN; 683 } 684 } 685 if (isp->isp_osinfo.default_port_wwn == 0) { 686 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 687 } 688 689 sptr = 0; 690 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 691 "nodewwn", (const char **) &sptr); 692 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 693 char *eptr = 0; 694 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 695 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 696 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 697 isp->isp_osinfo.default_node_wwn = 0; 698 } else { 699 isp->isp_confopts |= ISP_CFG_OWNWWNN; 700 } 701 } 702 if (isp->isp_osinfo.default_node_wwn == 0) { 703 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 704 } 705 706 isp->isp_osinfo.default_id = -1; 707 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 708 "iid", &tval) == 0) { 709 isp->isp_osinfo.default_id = tval; 710 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 711 } 712 if (isp->isp_osinfo.default_id == -1) { 713 if (IS_FC(isp)) { 714 isp->isp_osinfo.default_id = 109; 715 } else { 716 isp->isp_osinfo.default_id = 7; 717 } 718 } 719 720 isp_debug = 0; 721 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 722 "debug", &isp_debug); 723 724 /* Make sure the lock is set up. */ 725 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 726 locksetup++; 727 728 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 729 device_printf(dev, "could not setup interrupt\n"); 730 goto bad; 731 } 732 733 /* 734 * Set up logging levels. 735 */ 736 if (isp_debug) { 737 isp->isp_dblev = isp_debug; 738 } else { 739 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 740 } 741 if (bootverbose) 742 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 743 744 /* 745 * Last minute checks... 746 */ 747 if (IS_2312(isp)) { 748 isp->isp_port = pci_get_function(dev); 749 } 750 751 /* 752 * Make sure we're in reset state. 753 */ 754 ISP_LOCK(isp); 755 isp_reset(isp); 756 if (isp->isp_state != ISP_RESETSTATE) { 757 ISP_UNLOCK(isp); 758 goto bad; 759 } 760 isp_init(isp); 761 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 762 isp_uninit(isp); 763 ISP_UNLOCK(isp); 764 goto bad; 765 } 766 isp_attach(isp); 767 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 768 isp_uninit(isp); 769 ISP_UNLOCK(isp); 770 goto bad; 771 } 772 /* 773 * XXXX: Here is where we might unload the f/w module 774 * XXXX: (or decrease the reference count to it). 775 */ 776 ISP_UNLOCK(isp); 777 return (0); 778 779 bad: 780 781 if (pcs && pcs->ih) { 782 (void) bus_teardown_intr(dev, irq, pcs->ih); 783 } 784 785 if (locksetup && isp) { 786 mtx_destroy(&isp->isp_osinfo.lock); 787 } 788 789 if (irq) { 790 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 791 } 792 793 794 if (regs) { 795 (void) bus_release_resource(dev, rtp, rgd, regs); 796 } 797 798 if (pcs) { 799 if (pcs->pci_isp.isp_param) 800 free(pcs->pci_isp.isp_param, M_DEVBUF); 801 free(pcs, M_DEVBUF); 802 } 803 804 /* 805 * XXXX: Here is where we might unload the f/w module 806 * XXXX: (or decrease the reference count to it). 807 */ 808 return (ENXIO); 809 } 810 811 static void 812 isp_pci_intr(void *arg) 813 { 814 struct ispsoftc *isp = arg; 815 u_int16_t isr, sema, mbox; 816 817 ISP_LOCK(isp); 818 isp->isp_intcnt++; 819 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 820 isp->isp_intbogus++; 821 } else { 822 int iok = isp->isp_osinfo.intsok; 823 isp->isp_osinfo.intsok = 0; 824 isp_intr(isp, isr, sema, mbox); 825 isp->isp_osinfo.intsok = iok; 826 } 827 ISP_UNLOCK(isp); 828 } 829 830 831 #define IspVirt2Off(a, x) \ 832 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 833 _BLK_REG_SHFT] + ((x) & 0xff)) 834 835 #define BXR2(pcs, off) \ 836 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 837 #define BXW2(pcs, off, v) \ 838 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 839 840 841 static INLINE int 842 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 843 { 844 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 845 u_int16_t val0, val1; 846 int i = 0; 847 848 do { 849 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 850 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 851 } while (val0 != val1 && ++i < 1000); 852 if (val0 != val1) { 853 return (1); 854 } 855 *rp = val0; 856 return (0); 857 } 858 859 static int 860 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 861 u_int16_t *semap, u_int16_t *mbp) 862 { 863 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 864 u_int16_t isr, sema; 865 866 if (IS_2100(isp)) { 867 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 868 return (0); 869 } 870 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 871 return (0); 872 } 873 } else { 874 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 875 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 876 } 877 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 878 isr &= INT_PENDING_MASK(isp); 879 sema &= BIU_SEMA_LOCK; 880 if (isr == 0 && sema == 0) { 881 return (0); 882 } 883 *isrp = isr; 884 if ((*semap = sema) != 0) { 885 if (IS_2100(isp)) { 886 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 887 return (0); 888 } 889 } else { 890 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 891 } 892 } 893 return (1); 894 } 895 896 static int 897 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 898 u_int16_t *semap, u_int16_t *mbox0p) 899 { 900 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 901 u_int32_t r2hisr; 902 903 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 904 *isrp = 0; 905 return (0); 906 } 907 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 908 IspVirt2Off(pcs, BIU_R2HSTSLO)); 909 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 910 if ((r2hisr & BIU_R2HST_INTR) == 0) { 911 *isrp = 0; 912 return (0); 913 } 914 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 915 case ISPR2HST_ROM_MBX_OK: 916 case ISPR2HST_ROM_MBX_FAIL: 917 case ISPR2HST_MBX_OK: 918 case ISPR2HST_MBX_FAIL: 919 case ISPR2HST_ASYNC_EVENT: 920 *isrp = r2hisr & 0xffff; 921 *mbox0p = (r2hisr >> 16); 922 *semap = 1; 923 return (1); 924 case ISPR2HST_RIO_16: 925 *isrp = r2hisr & 0xffff; 926 *mbox0p = ASYNC_RIO1; 927 *semap = 1; 928 return (1); 929 case ISPR2HST_FPOST: 930 *isrp = r2hisr & 0xffff; 931 *mbox0p = ASYNC_CMD_CMPLT; 932 *semap = 1; 933 return (1); 934 case ISPR2HST_FPOST_CTIO: 935 *isrp = r2hisr & 0xffff; 936 *mbox0p = ASYNC_CTIO_DONE; 937 *semap = 1; 938 return (1); 939 case ISPR2HST_RSPQ_UPDATE: 940 *isrp = r2hisr & 0xffff; 941 *mbox0p = 0; 942 *semap = 0; 943 return (1); 944 default: 945 return (0); 946 } 947 } 948 949 static u_int16_t 950 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 951 { 952 u_int16_t rv; 953 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 954 int oldconf = 0; 955 956 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 957 /* 958 * We will assume that someone has paused the RISC processor. 959 */ 960 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 961 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 962 oldconf | BIU_PCI_CONF1_SXP); 963 } 964 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 965 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 966 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 967 } 968 return (rv); 969 } 970 971 static void 972 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 973 { 974 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 975 int oldconf = 0; 976 977 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 978 /* 979 * We will assume that someone has paused the RISC processor. 980 */ 981 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 982 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 983 oldconf | BIU_PCI_CONF1_SXP); 984 } 985 BXW2(pcs, IspVirt2Off(isp, regoff), val); 986 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 987 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 988 } 989 } 990 991 static u_int16_t 992 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 993 { 994 u_int16_t rv, oc = 0; 995 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 996 997 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 998 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 999 u_int16_t tc; 1000 /* 1001 * We will assume that someone has paused the RISC processor. 1002 */ 1003 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1004 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1005 if (regoff & SXP_BANK1_SELECT) 1006 tc |= BIU_PCI1080_CONF1_SXP1; 1007 else 1008 tc |= BIU_PCI1080_CONF1_SXP0; 1009 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1010 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1011 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1012 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1013 oc | BIU_PCI1080_CONF1_DMA); 1014 } 1015 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1016 if (oc) { 1017 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1018 } 1019 return (rv); 1020 } 1021 1022 static void 1023 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 1024 { 1025 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1026 int oc = 0; 1027 1028 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1029 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1030 u_int16_t tc; 1031 /* 1032 * We will assume that someone has paused the RISC processor. 1033 */ 1034 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1035 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1036 if (regoff & SXP_BANK1_SELECT) 1037 tc |= BIU_PCI1080_CONF1_SXP1; 1038 else 1039 tc |= BIU_PCI1080_CONF1_SXP0; 1040 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1041 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1042 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1043 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1044 oc | BIU_PCI1080_CONF1_DMA); 1045 } 1046 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1047 if (oc) { 1048 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1049 } 1050 } 1051 1052 1053 struct imush { 1054 struct ispsoftc *isp; 1055 int error; 1056 }; 1057 1058 static void imc(void *, bus_dma_segment_t *, int, int); 1059 1060 static void 1061 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1062 { 1063 struct imush *imushp = (struct imush *) arg; 1064 if (error) { 1065 imushp->error = error; 1066 } else { 1067 struct ispsoftc *isp =imushp->isp; 1068 bus_addr_t addr = segs->ds_addr; 1069 1070 isp->isp_rquest_dma = addr; 1071 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1072 isp->isp_result_dma = addr; 1073 if (IS_FC(isp)) { 1074 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1075 FCPARAM(isp)->isp_scdma = addr; 1076 } 1077 } 1078 } 1079 1080 /* 1081 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1082 */ 1083 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1084 1085 static int 1086 isp_pci_mbxdma(struct ispsoftc *isp) 1087 { 1088 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1089 caddr_t base; 1090 u_int32_t len; 1091 int i, error, ns; 1092 bus_size_t alim, slim, xlim; 1093 struct imush im; 1094 1095 /* 1096 * Already been here? If so, leave... 1097 */ 1098 if (isp->isp_rquest) { 1099 return (0); 1100 } 1101 1102 #ifdef ISP_DAC_SUPPORTED 1103 alim = BUS_SPACE_UNRESTRICTED; 1104 xlim = BUS_SPACE_MAXADDR_32BIT; 1105 #else 1106 xlim = alim = BUS_SPACE_MAXADDR_32BIT; 1107 #endif 1108 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1109 slim = BUS_SPACE_MAXADDR_32BIT; 1110 } else { 1111 slim = BUS_SPACE_MAXADDR_24BIT; 1112 } 1113 1114 ISP_UNLOCK(isp); 1115 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1116 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1117 busdma_lock_mutex, &Giant, &pcs->dmat)) { 1118 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1119 ISP_LOCK(isp); 1120 return(1); 1121 } 1122 1123 1124 len = sizeof (XS_T **) * isp->isp_maxcmds; 1125 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1126 if (isp->isp_xflist == NULL) { 1127 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1128 ISP_LOCK(isp); 1129 return (1); 1130 } 1131 #ifdef ISP_TARGET_MODE 1132 len = sizeof (void **) * isp->isp_maxcmds; 1133 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1134 if (isp->isp_tgtlist == NULL) { 1135 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1136 ISP_LOCK(isp); 1137 return (1); 1138 } 1139 #endif 1140 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1141 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1142 if (pcs->dmaps == NULL) { 1143 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1144 free(isp->isp_xflist, M_DEVBUF); 1145 #ifdef ISP_TARGET_MODE 1146 free(isp->isp_tgtlist, M_DEVBUF); 1147 #endif 1148 ISP_LOCK(isp); 1149 return (1); 1150 } 1151 1152 /* 1153 * Allocate and map the request, result queues, plus FC scratch area. 1154 */ 1155 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1156 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1157 if (IS_FC(isp)) { 1158 len += ISP2100_SCRLEN; 1159 } 1160 1161 ns = (len / PAGE_SIZE) + 1; 1162 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, xlim, xlim, 1163 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant, 1164 &isp->isp_cdmat)) { 1165 isp_prt(isp, ISP_LOGERR, 1166 "cannot create a dma tag for control spaces"); 1167 free(pcs->dmaps, M_DEVBUF); 1168 free(isp->isp_xflist, M_DEVBUF); 1169 #ifdef ISP_TARGET_MODE 1170 free(isp->isp_tgtlist, M_DEVBUF); 1171 #endif 1172 ISP_LOCK(isp); 1173 return (1); 1174 } 1175 1176 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1177 &isp->isp_cdmap) != 0) { 1178 isp_prt(isp, ISP_LOGERR, 1179 "cannot allocate %d bytes of CCB memory", len); 1180 bus_dma_tag_destroy(isp->isp_cdmat); 1181 free(isp->isp_xflist, M_DEVBUF); 1182 #ifdef ISP_TARGET_MODE 1183 free(isp->isp_tgtlist, M_DEVBUF); 1184 #endif 1185 free(pcs->dmaps, M_DEVBUF); 1186 ISP_LOCK(isp); 1187 return (1); 1188 } 1189 1190 for (i = 0; i < isp->isp_maxcmds; i++) { 1191 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1192 if (error) { 1193 isp_prt(isp, ISP_LOGERR, 1194 "error %d creating per-cmd DMA maps", error); 1195 while (--i >= 0) { 1196 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1197 } 1198 goto bad; 1199 } 1200 } 1201 1202 im.isp = isp; 1203 im.error = 0; 1204 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1205 if (im.error) { 1206 isp_prt(isp, ISP_LOGERR, 1207 "error %d loading dma map for control areas", im.error); 1208 goto bad; 1209 } 1210 1211 isp->isp_rquest = base; 1212 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1213 isp->isp_result = base; 1214 if (IS_FC(isp)) { 1215 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1216 FCPARAM(isp)->isp_scratch = base; 1217 } 1218 ISP_LOCK(isp); 1219 return (0); 1220 1221 bad: 1222 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1223 bus_dma_tag_destroy(isp->isp_cdmat); 1224 free(isp->isp_xflist, M_DEVBUF); 1225 #ifdef ISP_TARGET_MODE 1226 free(isp->isp_tgtlist, M_DEVBUF); 1227 #endif 1228 free(pcs->dmaps, M_DEVBUF); 1229 ISP_LOCK(isp); 1230 isp->isp_rquest = NULL; 1231 return (1); 1232 } 1233 1234 typedef struct { 1235 struct ispsoftc *isp; 1236 void *cmd_token; 1237 void *rq; 1238 u_int16_t *nxtip; 1239 u_int16_t optr; 1240 u_int error; 1241 } mush_t; 1242 1243 #define MUSHERR_NOQENTRIES -2 1244 1245 #ifdef ISP_TARGET_MODE 1246 /* 1247 * We need to handle DMA for target mode differently from initiator mode. 1248 * 1249 * DMA mapping and construction and submission of CTIO Request Entries 1250 * and rendevous for completion are very tightly coupled because we start 1251 * out by knowing (per platform) how much data we have to move, but we 1252 * don't know, up front, how many DMA mapping segments will have to be used 1253 * cover that data, so we don't know how many CTIO Request Entries we 1254 * will end up using. Further, for performance reasons we may want to 1255 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1256 * 1257 * The standard vector still goes through isp_pci_dmasetup, but the callback 1258 * for the DMA mapping routines comes here instead with the whole transfer 1259 * mapped and a pointer to a partially filled in already allocated request 1260 * queue entry. We finish the job. 1261 */ 1262 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1263 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1264 1265 #define STATUS_WITH_DATA 1 1266 1267 static void 1268 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1269 { 1270 mush_t *mp; 1271 struct ccb_scsiio *csio; 1272 struct ispsoftc *isp; 1273 struct isp_pcisoftc *pcs; 1274 bus_dmamap_t *dp; 1275 ct_entry_t *cto, *qe; 1276 u_int8_t scsi_status; 1277 u_int16_t curi, nxti, handle; 1278 u_int32_t sflags; 1279 int32_t resid; 1280 int nth_ctio, nctios, send_status; 1281 1282 mp = (mush_t *) arg; 1283 if (error) { 1284 mp->error = error; 1285 return; 1286 } 1287 1288 isp = mp->isp; 1289 csio = mp->cmd_token; 1290 cto = mp->rq; 1291 curi = isp->isp_reqidx; 1292 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1293 1294 cto->ct_xfrlen = 0; 1295 cto->ct_seg_count = 0; 1296 cto->ct_header.rqs_entry_count = 1; 1297 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1298 1299 if (nseg == 0) { 1300 cto->ct_header.rqs_seqno = 1; 1301 isp_prt(isp, ISP_LOGTDEBUG1, 1302 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1303 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1304 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1305 cto->ct_scsi_status, cto->ct_resid); 1306 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1307 isp_put_ctio(isp, cto, qe); 1308 return; 1309 } 1310 1311 nctios = nseg / ISP_RQDSEG; 1312 if (nseg % ISP_RQDSEG) { 1313 nctios++; 1314 } 1315 1316 /* 1317 * Save syshandle, and potentially any SCSI status, which we'll 1318 * reinsert on the last CTIO we're going to send. 1319 */ 1320 1321 handle = cto->ct_syshandle; 1322 cto->ct_syshandle = 0; 1323 cto->ct_header.rqs_seqno = 0; 1324 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1325 1326 if (send_status) { 1327 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1328 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1329 /* 1330 * Preserve residual. 1331 */ 1332 resid = cto->ct_resid; 1333 1334 /* 1335 * Save actual SCSI status. 1336 */ 1337 scsi_status = cto->ct_scsi_status; 1338 1339 #ifndef STATUS_WITH_DATA 1340 sflags |= CT_NO_DATA; 1341 /* 1342 * We can't do a status at the same time as a data CTIO, so 1343 * we need to synthesize an extra CTIO at this level. 1344 */ 1345 nctios++; 1346 #endif 1347 } else { 1348 sflags = scsi_status = resid = 0; 1349 } 1350 1351 cto->ct_resid = 0; 1352 cto->ct_scsi_status = 0; 1353 1354 pcs = (struct isp_pcisoftc *)isp; 1355 dp = &pcs->dmaps[isp_handle_index(handle)]; 1356 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1357 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1358 } else { 1359 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1360 } 1361 1362 nxti = *mp->nxtip; 1363 1364 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1365 int seglim; 1366 1367 seglim = nseg; 1368 if (seglim) { 1369 int seg; 1370 1371 if (seglim > ISP_RQDSEG) 1372 seglim = ISP_RQDSEG; 1373 1374 for (seg = 0; seg < seglim; seg++, nseg--) { 1375 /* 1376 * Unlike normal initiator commands, we don't 1377 * do any swizzling here. 1378 */ 1379 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1380 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1381 cto->ct_xfrlen += dm_segs->ds_len; 1382 dm_segs++; 1383 } 1384 cto->ct_seg_count = seg; 1385 } else { 1386 /* 1387 * This case should only happen when we're sending an 1388 * extra CTIO with final status. 1389 */ 1390 if (send_status == 0) { 1391 isp_prt(isp, ISP_LOGWARN, 1392 "tdma_mk ran out of segments"); 1393 mp->error = EINVAL; 1394 return; 1395 } 1396 } 1397 1398 /* 1399 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1400 * ct_tagtype, and ct_timeout have been carried over 1401 * unchanged from what our caller had set. 1402 * 1403 * The dataseg fields and the seg_count fields we just got 1404 * through setting. The data direction we've preserved all 1405 * along and only clear it if we're now sending status. 1406 */ 1407 1408 if (nth_ctio == nctios - 1) { 1409 /* 1410 * We're the last in a sequence of CTIOs, so mark 1411 * this CTIO and save the handle to the CCB such that 1412 * when this CTIO completes we can free dma resources 1413 * and do whatever else we need to do to finish the 1414 * rest of the command. We *don't* give this to the 1415 * firmware to work on- the caller will do that. 1416 */ 1417 1418 cto->ct_syshandle = handle; 1419 cto->ct_header.rqs_seqno = 1; 1420 1421 if (send_status) { 1422 cto->ct_scsi_status = scsi_status; 1423 cto->ct_flags |= sflags; 1424 cto->ct_resid = resid; 1425 } 1426 if (send_status) { 1427 isp_prt(isp, ISP_LOGTDEBUG1, 1428 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1429 "scsi status %x resid %d", 1430 cto->ct_fwhandle, csio->ccb_h.target_lun, 1431 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1432 cto->ct_scsi_status, cto->ct_resid); 1433 } else { 1434 isp_prt(isp, ISP_LOGTDEBUG1, 1435 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1436 cto->ct_fwhandle, csio->ccb_h.target_lun, 1437 cto->ct_iid, cto->ct_tag_val, 1438 cto->ct_flags); 1439 } 1440 isp_put_ctio(isp, cto, qe); 1441 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1442 if (nctios > 1) { 1443 MEMORYBARRIER(isp, SYNC_REQUEST, 1444 curi, QENTRY_LEN); 1445 } 1446 } else { 1447 ct_entry_t *oqe = qe; 1448 1449 /* 1450 * Make sure syshandle fields are clean 1451 */ 1452 cto->ct_syshandle = 0; 1453 cto->ct_header.rqs_seqno = 0; 1454 1455 isp_prt(isp, ISP_LOGTDEBUG1, 1456 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1457 cto->ct_fwhandle, csio->ccb_h.target_lun, 1458 cto->ct_iid, cto->ct_flags); 1459 1460 /* 1461 * Get a new CTIO 1462 */ 1463 qe = (ct_entry_t *) 1464 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1465 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1466 if (nxti == mp->optr) { 1467 isp_prt(isp, ISP_LOGTDEBUG0, 1468 "Queue Overflow in tdma_mk"); 1469 mp->error = MUSHERR_NOQENTRIES; 1470 return; 1471 } 1472 1473 /* 1474 * Now that we're done with the old CTIO, 1475 * flush it out to the request queue. 1476 */ 1477 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1478 isp_put_ctio(isp, cto, oqe); 1479 if (nth_ctio != 0) { 1480 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1481 QENTRY_LEN); 1482 } 1483 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1484 1485 /* 1486 * Reset some fields in the CTIO so we can reuse 1487 * for the next one we'll flush to the request 1488 * queue. 1489 */ 1490 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1491 cto->ct_header.rqs_entry_count = 1; 1492 cto->ct_header.rqs_flags = 0; 1493 cto->ct_status = 0; 1494 cto->ct_scsi_status = 0; 1495 cto->ct_xfrlen = 0; 1496 cto->ct_resid = 0; 1497 cto->ct_seg_count = 0; 1498 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1499 } 1500 } 1501 *mp->nxtip = nxti; 1502 } 1503 1504 /* 1505 * We don't have to do multiple CTIOs here. Instead, we can just do 1506 * continuation segments as needed. This greatly simplifies the code 1507 * improves performance. 1508 */ 1509 1510 static void 1511 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1512 { 1513 mush_t *mp; 1514 struct ccb_scsiio *csio; 1515 struct ispsoftc *isp; 1516 ct2_entry_t *cto, *qe; 1517 u_int16_t curi, nxti; 1518 int segcnt; 1519 1520 mp = (mush_t *) arg; 1521 if (error) { 1522 mp->error = error; 1523 return; 1524 } 1525 1526 isp = mp->isp; 1527 csio = mp->cmd_token; 1528 cto = mp->rq; 1529 1530 curi = isp->isp_reqidx; 1531 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1532 1533 if (nseg == 0) { 1534 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1535 isp_prt(isp, ISP_LOGWARN, 1536 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1537 "set (0x%x)", cto->ct_flags); 1538 mp->error = EINVAL; 1539 return; 1540 } 1541 /* 1542 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1543 * flags to NO DATA and clear relative offset flags. 1544 * We preserve the ct_resid and the response area. 1545 */ 1546 cto->ct_header.rqs_seqno = 1; 1547 cto->ct_seg_count = 0; 1548 cto->ct_reloff = 0; 1549 isp_prt(isp, ISP_LOGTDEBUG1, 1550 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1551 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1552 cto->ct_iid, cto->ct_flags, cto->ct_status, 1553 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1554 isp_put_ctio2(isp, cto, qe); 1555 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1556 return; 1557 } 1558 1559 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1560 isp_prt(isp, ISP_LOGERR, 1561 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1562 "(0x%x)", cto->ct_flags); 1563 mp->error = EINVAL; 1564 return; 1565 } 1566 1567 1568 nxti = *mp->nxtip; 1569 1570 /* 1571 * Set up the CTIO2 data segments. 1572 */ 1573 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1574 cto->ct_seg_count++, segcnt++) { 1575 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1576 dm_segs[segcnt].ds_addr; 1577 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1578 dm_segs[segcnt].ds_len; 1579 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1580 isp_prt(isp, ISP_LOGTDEBUG1, 1581 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 1582 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 1583 (uintmax_t)dm_segs[segcnt].ds_len); 1584 } 1585 1586 while (segcnt < nseg) { 1587 u_int16_t curip; 1588 int seg; 1589 ispcontreq_t local, *crq = &local, *qep; 1590 1591 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1592 curip = nxti; 1593 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1594 if (nxti == mp->optr) { 1595 ISP_UNLOCK(isp); 1596 isp_prt(isp, ISP_LOGTDEBUG0, 1597 "tdma_mkfc: request queue overflow"); 1598 mp->error = MUSHERR_NOQENTRIES; 1599 return; 1600 } 1601 cto->ct_header.rqs_entry_count++; 1602 MEMZERO((void *)crq, sizeof (*crq)); 1603 crq->req_header.rqs_entry_count = 1; 1604 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1605 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1606 segcnt++, seg++) { 1607 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1608 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1609 isp_prt(isp, ISP_LOGTDEBUG1, 1610 "isp_send_ctio2: ent%d[%d]%jx:%ju", 1611 cto->ct_header.rqs_entry_count-1, seg, 1612 (uintmax_t)dm_segs[segcnt].ds_addr, 1613 (uintmax_t)dm_segs[segcnt].ds_len); 1614 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1615 cto->ct_seg_count++; 1616 } 1617 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1618 isp_put_cont_req(isp, crq, qep); 1619 ISP_TDQE(isp, "cont entry", curi, qep); 1620 } 1621 1622 /* 1623 * No do final twiddling for the CTIO itself. 1624 */ 1625 cto->ct_header.rqs_seqno = 1; 1626 isp_prt(isp, ISP_LOGTDEBUG1, 1627 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1628 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1629 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1630 cto->ct_resid); 1631 isp_put_ctio2(isp, cto, qe); 1632 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1633 *mp->nxtip = nxti; 1634 } 1635 #endif 1636 1637 static void dma2(void *, bus_dma_segment_t *, int, int); 1638 1639 #ifdef PAE 1640 #define LOWD(x) ((uint32_t) x) 1641 #define HIWD(x) ((uint32_t) (x >> 32)) 1642 1643 static void 1644 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1645 { 1646 mush_t *mp; 1647 struct ispsoftc *isp; 1648 struct ccb_scsiio *csio; 1649 struct isp_pcisoftc *pcs; 1650 bus_dmamap_t *dp; 1651 bus_dma_segment_t *eseg; 1652 ispreq64_t *rq; 1653 int seglim, datalen; 1654 u_int16_t nxti; 1655 1656 mp = (mush_t *) arg; 1657 if (error) { 1658 mp->error = error; 1659 return; 1660 } 1661 1662 if (nseg < 1) { 1663 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1664 mp->error = EFAULT; 1665 return; 1666 } 1667 csio = mp->cmd_token; 1668 isp = mp->isp; 1669 rq = mp->rq; 1670 pcs = (struct isp_pcisoftc *)mp->isp; 1671 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1672 nxti = *mp->nxtip; 1673 1674 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1675 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1676 } else { 1677 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1678 } 1679 datalen = XS_XFRLEN(csio); 1680 1681 /* 1682 * We're passed an initial partially filled in entry that 1683 * has most fields filled in except for data transfer 1684 * related values. 1685 * 1686 * Our job is to fill in the initial request queue entry and 1687 * then to start allocating and filling in continuation entries 1688 * until we've covered the entire transfer. 1689 */ 1690 1691 if (IS_FC(isp)) { 1692 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1693 seglim = ISP_RQDSEG_T3; 1694 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 1695 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1696 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1697 } else { 1698 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1699 } 1700 } else { 1701 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1702 if (csio->cdb_len > 12) { 1703 seglim = 0; 1704 } else { 1705 seglim = ISP_RQDSEG_A64; 1706 } 1707 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1708 rq->req_flags |= REQFLAG_DATA_IN; 1709 } else { 1710 rq->req_flags |= REQFLAG_DATA_OUT; 1711 } 1712 } 1713 1714 eseg = dm_segs + nseg; 1715 1716 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1717 if (IS_FC(isp)) { 1718 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 1719 rq3->req_dataseg[rq3->req_seg_count].ds_base = 1720 LOWD(dm_segs->ds_addr); 1721 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 1722 HIWD(dm_segs->ds_addr); 1723 rq3->req_dataseg[rq3->req_seg_count].ds_count = 1724 dm_segs->ds_len; 1725 } else { 1726 rq->req_dataseg[rq->req_seg_count].ds_base = 1727 LOWD(dm_segs->ds_addr); 1728 rq->req_dataseg[rq->req_seg_count].ds_basehi = 1729 HIWD(dm_segs->ds_addr); 1730 rq->req_dataseg[rq->req_seg_count].ds_count = 1731 dm_segs->ds_len; 1732 } 1733 datalen -= dm_segs->ds_len; 1734 rq->req_seg_count++; 1735 dm_segs++; 1736 } 1737 1738 while (datalen > 0 && dm_segs != eseg) { 1739 u_int16_t onxti; 1740 ispcontreq64_t local, *crq = &local, *cqe; 1741 1742 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1743 onxti = nxti; 1744 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1745 if (nxti == mp->optr) { 1746 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1747 mp->error = MUSHERR_NOQENTRIES; 1748 return; 1749 } 1750 rq->req_header.rqs_entry_count++; 1751 MEMZERO((void *)crq, sizeof (*crq)); 1752 crq->req_header.rqs_entry_count = 1; 1753 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 1754 1755 seglim = 0; 1756 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 1757 crq->req_dataseg[seglim].ds_base = 1758 LOWD(dm_segs->ds_addr); 1759 crq->req_dataseg[seglim].ds_basehi = 1760 HIWD(dm_segs->ds_addr); 1761 crq->req_dataseg[seglim].ds_count = 1762 dm_segs->ds_len; 1763 rq->req_seg_count++; 1764 dm_segs++; 1765 seglim++; 1766 datalen -= dm_segs->ds_len; 1767 } 1768 isp_put_cont64_req(isp, crq, cqe); 1769 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1770 } 1771 *mp->nxtip = nxti; 1772 } 1773 #else 1774 static void 1775 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1776 { 1777 mush_t *mp; 1778 struct ispsoftc *isp; 1779 struct ccb_scsiio *csio; 1780 struct isp_pcisoftc *pcs; 1781 bus_dmamap_t *dp; 1782 bus_dma_segment_t *eseg; 1783 ispreq_t *rq; 1784 int seglim, datalen; 1785 u_int16_t nxti; 1786 1787 mp = (mush_t *) arg; 1788 if (error) { 1789 mp->error = error; 1790 return; 1791 } 1792 1793 if (nseg < 1) { 1794 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1795 mp->error = EFAULT; 1796 return; 1797 } 1798 csio = mp->cmd_token; 1799 isp = mp->isp; 1800 rq = mp->rq; 1801 pcs = (struct isp_pcisoftc *)mp->isp; 1802 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1803 nxti = *mp->nxtip; 1804 1805 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1806 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1807 } else { 1808 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1809 } 1810 1811 datalen = XS_XFRLEN(csio); 1812 1813 /* 1814 * We're passed an initial partially filled in entry that 1815 * has most fields filled in except for data transfer 1816 * related values. 1817 * 1818 * Our job is to fill in the initial request queue entry and 1819 * then to start allocating and filling in continuation entries 1820 * until we've covered the entire transfer. 1821 */ 1822 1823 if (IS_FC(isp)) { 1824 seglim = ISP_RQDSEG_T2; 1825 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1826 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1827 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1828 } else { 1829 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1830 } 1831 } else { 1832 if (csio->cdb_len > 12) { 1833 seglim = 0; 1834 } else { 1835 seglim = ISP_RQDSEG; 1836 } 1837 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1838 rq->req_flags |= REQFLAG_DATA_IN; 1839 } else { 1840 rq->req_flags |= REQFLAG_DATA_OUT; 1841 } 1842 } 1843 1844 eseg = dm_segs + nseg; 1845 1846 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1847 if (IS_FC(isp)) { 1848 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1849 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1850 dm_segs->ds_addr; 1851 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1852 dm_segs->ds_len; 1853 } else { 1854 rq->req_dataseg[rq->req_seg_count].ds_base = 1855 dm_segs->ds_addr; 1856 rq->req_dataseg[rq->req_seg_count].ds_count = 1857 dm_segs->ds_len; 1858 } 1859 datalen -= dm_segs->ds_len; 1860 rq->req_seg_count++; 1861 dm_segs++; 1862 } 1863 1864 while (datalen > 0 && dm_segs != eseg) { 1865 u_int16_t onxti; 1866 ispcontreq_t local, *crq = &local, *cqe; 1867 1868 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1869 onxti = nxti; 1870 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1871 if (nxti == mp->optr) { 1872 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1873 mp->error = MUSHERR_NOQENTRIES; 1874 return; 1875 } 1876 rq->req_header.rqs_entry_count++; 1877 MEMZERO((void *)crq, sizeof (*crq)); 1878 crq->req_header.rqs_entry_count = 1; 1879 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1880 1881 seglim = 0; 1882 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1883 crq->req_dataseg[seglim].ds_base = 1884 dm_segs->ds_addr; 1885 crq->req_dataseg[seglim].ds_count = 1886 dm_segs->ds_len; 1887 rq->req_seg_count++; 1888 dm_segs++; 1889 seglim++; 1890 datalen -= dm_segs->ds_len; 1891 } 1892 isp_put_cont_req(isp, crq, cqe); 1893 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1894 } 1895 *mp->nxtip = nxti; 1896 } 1897 #endif 1898 1899 static int 1900 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1901 u_int16_t *nxtip, u_int16_t optr) 1902 { 1903 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1904 ispreq_t *qep; 1905 bus_dmamap_t *dp = NULL; 1906 mush_t mush, *mp; 1907 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1908 1909 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1910 #ifdef ISP_TARGET_MODE 1911 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1912 if (IS_FC(isp)) { 1913 eptr = tdma_mkfc; 1914 } else { 1915 eptr = tdma_mk; 1916 } 1917 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1918 (csio->dxfer_len == 0)) { 1919 mp = &mush; 1920 mp->isp = isp; 1921 mp->cmd_token = csio; 1922 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1923 mp->nxtip = nxtip; 1924 mp->optr = optr; 1925 mp->error = 0; 1926 (*eptr)(mp, NULL, 0, 0); 1927 goto mbxsync; 1928 } 1929 } else 1930 #endif 1931 eptr = dma2; 1932 1933 1934 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1935 (csio->dxfer_len == 0)) { 1936 rq->req_seg_count = 1; 1937 goto mbxsync; 1938 } 1939 1940 /* 1941 * Do a virtual grapevine step to collect info for 1942 * the callback dma allocation that we have to use... 1943 */ 1944 mp = &mush; 1945 mp->isp = isp; 1946 mp->cmd_token = csio; 1947 mp->rq = rq; 1948 mp->nxtip = nxtip; 1949 mp->optr = optr; 1950 mp->error = 0; 1951 1952 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1953 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1954 int error, s; 1955 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1956 s = splsoftvm(); 1957 error = bus_dmamap_load(pcs->dmat, *dp, 1958 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1959 if (error == EINPROGRESS) { 1960 bus_dmamap_unload(pcs->dmat, *dp); 1961 mp->error = EINVAL; 1962 isp_prt(isp, ISP_LOGERR, 1963 "deferred dma allocation not supported"); 1964 } else if (error && mp->error == 0) { 1965 #ifdef DIAGNOSTIC 1966 isp_prt(isp, ISP_LOGERR, 1967 "error %d in dma mapping code", error); 1968 #endif 1969 mp->error = error; 1970 } 1971 splx(s); 1972 } else { 1973 /* Pointer to physical buffer */ 1974 struct bus_dma_segment seg; 1975 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 1976 seg.ds_len = csio->dxfer_len; 1977 (*eptr)(mp, &seg, 1, 0); 1978 } 1979 } else { 1980 struct bus_dma_segment *segs; 1981 1982 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1983 isp_prt(isp, ISP_LOGERR, 1984 "Physical segment pointers unsupported"); 1985 mp->error = EINVAL; 1986 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1987 isp_prt(isp, ISP_LOGERR, 1988 "Virtual segment addresses unsupported"); 1989 mp->error = EINVAL; 1990 } else { 1991 /* Just use the segments provided */ 1992 segs = (struct bus_dma_segment *) csio->data_ptr; 1993 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1994 } 1995 } 1996 if (mp->error) { 1997 int retval = CMD_COMPLETE; 1998 if (mp->error == MUSHERR_NOQENTRIES) { 1999 retval = CMD_EAGAIN; 2000 } else if (mp->error == EFBIG) { 2001 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2002 } else if (mp->error == EINVAL) { 2003 XS_SETERR(csio, CAM_REQ_INVALID); 2004 } else { 2005 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2006 } 2007 return (retval); 2008 } 2009 mbxsync: 2010 switch (rq->req_header.rqs_entry_type) { 2011 case RQSTYPE_REQUEST: 2012 isp_put_request(isp, rq, qep); 2013 break; 2014 case RQSTYPE_CMDONLY: 2015 isp_put_extended_request(isp, (ispextreq_t *)rq, 2016 (ispextreq_t *)qep); 2017 break; 2018 case RQSTYPE_T2RQS: 2019 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2020 break; 2021 case RQSTYPE_A64: 2022 case RQSTYPE_T3RQS: 2023 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2024 break; 2025 } 2026 return (CMD_QUEUED); 2027 } 2028 2029 static void 2030 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 2031 { 2032 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2033 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2034 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2035 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2036 } else { 2037 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2038 } 2039 bus_dmamap_unload(pcs->dmat, *dp); 2040 } 2041 2042 2043 static void 2044 isp_pci_reset1(struct ispsoftc *isp) 2045 { 2046 /* Make sure the BIOS is disabled */ 2047 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2048 /* and enable interrupts */ 2049 ENABLE_INTS(isp); 2050 } 2051 2052 static void 2053 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 2054 { 2055 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2056 if (msg) 2057 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2058 else 2059 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2060 if (IS_SCSI(isp)) 2061 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2062 else 2063 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2064 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2065 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2066 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2067 2068 2069 if (IS_SCSI(isp)) { 2070 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2071 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2072 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2073 ISP_READ(isp, CDMA_FIFO_STS)); 2074 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2075 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2076 ISP_READ(isp, DDMA_FIFO_STS)); 2077 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2078 ISP_READ(isp, SXP_INTERRUPT), 2079 ISP_READ(isp, SXP_GROSS_ERR), 2080 ISP_READ(isp, SXP_PINS_CTRL)); 2081 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2082 } 2083 printf(" mbox regs: %x %x %x %x %x\n", 2084 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2085 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2086 ISP_READ(isp, OUTMAILBOX4)); 2087 printf(" PCI Status Command/Status=%x\n", 2088 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2089 } 2090