1 /*- 2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/bus.h> 37 38 #include <dev/pci/pcireg.h> 39 #include <dev/pci/pcivar.h> 40 41 #include <machine/bus.h> 42 #include <machine/resource.h> 43 #include <sys/rman.h> 44 #include <sys/malloc.h> 45 46 #ifdef ISP_TARGET_MODE 47 #ifdef PAE 48 #error "PAE and ISP_TARGET_MODE not supported yet" 49 #endif 50 #endif 51 52 #include <dev/isp/isp_freebsd.h> 53 54 static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 55 static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 56 static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 57 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 58 static int 59 isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 60 static int 61 isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 62 static int isp_pci_mbxdma(struct ispsoftc *); 63 static int 64 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 65 static void 66 isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 67 68 static void isp_pci_reset1(struct ispsoftc *); 69 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 70 71 static struct ispmdvec mdvec = { 72 isp_pci_rd_isr, 73 isp_pci_rd_reg, 74 isp_pci_wr_reg, 75 isp_pci_mbxdma, 76 isp_pci_dmasetup, 77 isp_pci_dmateardown, 78 NULL, 79 isp_pci_reset1, 80 isp_pci_dumpregs, 81 NULL, 82 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 83 }; 84 85 static struct ispmdvec mdvec_1080 = { 86 isp_pci_rd_isr, 87 isp_pci_rd_reg_1080, 88 isp_pci_wr_reg_1080, 89 isp_pci_mbxdma, 90 isp_pci_dmasetup, 91 isp_pci_dmateardown, 92 NULL, 93 isp_pci_reset1, 94 isp_pci_dumpregs, 95 NULL, 96 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 97 }; 98 99 static struct ispmdvec mdvec_12160 = { 100 isp_pci_rd_isr, 101 isp_pci_rd_reg_1080, 102 isp_pci_wr_reg_1080, 103 isp_pci_mbxdma, 104 isp_pci_dmasetup, 105 isp_pci_dmateardown, 106 NULL, 107 isp_pci_reset1, 108 isp_pci_dumpregs, 109 NULL, 110 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 111 }; 112 113 static struct ispmdvec mdvec_2100 = { 114 isp_pci_rd_isr, 115 isp_pci_rd_reg, 116 isp_pci_wr_reg, 117 isp_pci_mbxdma, 118 isp_pci_dmasetup, 119 isp_pci_dmateardown, 120 NULL, 121 isp_pci_reset1, 122 isp_pci_dumpregs 123 }; 124 125 static struct ispmdvec mdvec_2200 = { 126 isp_pci_rd_isr, 127 isp_pci_rd_reg, 128 isp_pci_wr_reg, 129 isp_pci_mbxdma, 130 isp_pci_dmasetup, 131 isp_pci_dmateardown, 132 NULL, 133 isp_pci_reset1, 134 isp_pci_dumpregs 135 }; 136 137 static struct ispmdvec mdvec_2300 = { 138 isp_pci_rd_isr_2300, 139 isp_pci_rd_reg, 140 isp_pci_wr_reg, 141 isp_pci_mbxdma, 142 isp_pci_dmasetup, 143 isp_pci_dmateardown, 144 NULL, 145 isp_pci_reset1, 146 isp_pci_dumpregs 147 }; 148 149 #ifndef PCIM_CMD_INVEN 150 #define PCIM_CMD_INVEN 0x10 151 #endif 152 #ifndef PCIM_CMD_BUSMASTEREN 153 #define PCIM_CMD_BUSMASTEREN 0x0004 154 #endif 155 #ifndef PCIM_CMD_PERRESPEN 156 #define PCIM_CMD_PERRESPEN 0x0040 157 #endif 158 #ifndef PCIM_CMD_SEREN 159 #define PCIM_CMD_SEREN 0x0100 160 #endif 161 162 #ifndef PCIR_COMMAND 163 #define PCIR_COMMAND 0x04 164 #endif 165 166 #ifndef PCIR_CACHELNSZ 167 #define PCIR_CACHELNSZ 0x0c 168 #endif 169 170 #ifndef PCIR_LATTIMER 171 #define PCIR_LATTIMER 0x0d 172 #endif 173 174 #ifndef PCIR_ROMADDR 175 #define PCIR_ROMADDR 0x30 176 #endif 177 178 #ifndef PCI_VENDOR_QLOGIC 179 #define PCI_VENDOR_QLOGIC 0x1077 180 #endif 181 182 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 183 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 184 #endif 185 186 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 187 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 188 #endif 189 190 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 191 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 192 #endif 193 194 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 195 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 196 #endif 197 198 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 199 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 200 #endif 201 202 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 203 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 204 #endif 205 206 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 207 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 208 #endif 209 210 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 211 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 212 #endif 213 214 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 215 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 216 #endif 217 218 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 219 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 220 #endif 221 222 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 223 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 224 #endif 225 226 #define PCI_QLOGIC_ISP1020 \ 227 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 228 229 #define PCI_QLOGIC_ISP1080 \ 230 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 231 232 #define PCI_QLOGIC_ISP10160 \ 233 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 234 235 #define PCI_QLOGIC_ISP12160 \ 236 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 237 238 #define PCI_QLOGIC_ISP1240 \ 239 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 240 241 #define PCI_QLOGIC_ISP1280 \ 242 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 243 244 #define PCI_QLOGIC_ISP2100 \ 245 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 246 247 #define PCI_QLOGIC_ISP2200 \ 248 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 249 250 #define PCI_QLOGIC_ISP2300 \ 251 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 252 253 #define PCI_QLOGIC_ISP2312 \ 254 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 255 256 #define PCI_QLOGIC_ISP6312 \ 257 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 258 259 /* 260 * Odd case for some AMI raid cards... We need to *not* attach to this. 261 */ 262 #define AMI_RAID_SUBVENDOR_ID 0x101e 263 264 #define IO_MAP_REG 0x10 265 #define MEM_MAP_REG 0x14 266 267 #define PCI_DFLT_LTNCY 0x40 268 #define PCI_DFLT_LNSZ 0x10 269 270 static int isp_pci_probe (device_t); 271 static int isp_pci_attach (device_t); 272 273 274 struct isp_pcisoftc { 275 struct ispsoftc pci_isp; 276 device_t pci_dev; 277 struct resource * pci_reg; 278 bus_space_tag_t pci_st; 279 bus_space_handle_t pci_sh; 280 void * ih; 281 int16_t pci_poff[_NREG_BLKS]; 282 bus_dma_tag_t dmat; 283 bus_dmamap_t *dmaps; 284 }; 285 extern ispfwfunc *isp_get_firmware_p; 286 287 static device_method_t isp_pci_methods[] = { 288 /* Device interface */ 289 DEVMETHOD(device_probe, isp_pci_probe), 290 DEVMETHOD(device_attach, isp_pci_attach), 291 { 0, 0 } 292 }; 293 static void isp_pci_intr(void *); 294 295 static driver_t isp_pci_driver = { 296 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 297 }; 298 static devclass_t isp_devclass; 299 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 300 301 static int 302 isp_pci_probe(device_t dev) 303 { 304 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 305 case PCI_QLOGIC_ISP1020: 306 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 307 break; 308 case PCI_QLOGIC_ISP1080: 309 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 310 break; 311 case PCI_QLOGIC_ISP1240: 312 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 313 break; 314 case PCI_QLOGIC_ISP1280: 315 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 316 break; 317 case PCI_QLOGIC_ISP10160: 318 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 319 break; 320 case PCI_QLOGIC_ISP12160: 321 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 322 return (ENXIO); 323 } 324 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 325 break; 326 case PCI_QLOGIC_ISP2100: 327 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 328 break; 329 case PCI_QLOGIC_ISP2200: 330 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 331 break; 332 case PCI_QLOGIC_ISP2300: 333 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 334 break; 335 case PCI_QLOGIC_ISP2312: 336 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 337 break; 338 case PCI_QLOGIC_ISP6312: 339 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 340 break; 341 default: 342 return (ENXIO); 343 } 344 if (isp_announced == 0 && bootverbose) { 345 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 346 "Core Version %d.%d\n", 347 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 348 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 349 isp_announced++; 350 } 351 /* 352 * XXXX: Here is where we might load the f/w module 353 * XXXX: (or increase a reference count to it). 354 */ 355 return (BUS_PROBE_DEFAULT); 356 } 357 358 static int 359 isp_pci_attach(device_t dev) 360 { 361 struct resource *regs, *irq; 362 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 363 u_int32_t data, cmd, linesz, psize, basetype; 364 struct isp_pcisoftc *pcs; 365 struct ispsoftc *isp = NULL; 366 struct ispmdvec *mdvp; 367 const char *sptr; 368 int locksetup = 0; 369 370 /* 371 * Figure out if we're supposed to skip this one. 372 */ 373 374 tval = 0; 375 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 376 "disable", &tval) == 0 && tval) { 377 device_printf(dev, "device is disabled\n"); 378 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 379 return (0); 380 } 381 382 role = -1; 383 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 384 "role", &role) == 0 && role != -1) { 385 role &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 386 device_printf(dev, "setting role to 0x%x\n", role); 387 } else { 388 #ifdef ISP_TARGET_MODE 389 role = ISP_ROLE_TARGET; 390 #else 391 role = ISP_DEFAULT_ROLES; 392 #endif 393 } 394 395 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 396 if (pcs == NULL) { 397 device_printf(dev, "cannot allocate softc\n"); 398 return (ENOMEM); 399 } 400 401 /* 402 * Which we should try first - memory mapping or i/o mapping? 403 * 404 * We used to try memory first followed by i/o on alpha, otherwise 405 * the reverse, but we should just try memory first all the time now. 406 */ 407 m1 = PCIM_CMD_MEMEN; 408 m2 = PCIM_CMD_PORTEN; 409 410 tval = 0; 411 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 412 "prefer_iomap", &tval) == 0 && tval != 0) { 413 m1 = PCIM_CMD_PORTEN; 414 m2 = PCIM_CMD_MEMEN; 415 } 416 tval = 0; 417 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 418 "prefer_memmap", &tval) == 0 && tval != 0) { 419 m1 = PCIM_CMD_MEMEN; 420 m2 = PCIM_CMD_PORTEN; 421 } 422 423 linesz = PCI_DFLT_LNSZ; 424 irq = regs = NULL; 425 rgd = rtp = iqd = 0; 426 427 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 428 if (cmd & m1) { 429 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 430 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 431 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 432 } 433 if (regs == NULL && (cmd & m2)) { 434 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 435 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 436 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 437 } 438 if (regs == NULL) { 439 device_printf(dev, "unable to map any ports\n"); 440 goto bad; 441 } 442 if (bootverbose) 443 device_printf(dev, "using %s space register mapping\n", 444 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 445 pcs->pci_dev = dev; 446 pcs->pci_reg = regs; 447 pcs->pci_st = rman_get_bustag(regs); 448 pcs->pci_sh = rman_get_bushandle(regs); 449 450 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 451 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 452 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 453 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 454 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 455 mdvp = &mdvec; 456 basetype = ISP_HA_SCSI_UNKNOWN; 457 psize = sizeof (sdparam); 458 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 459 mdvp = &mdvec; 460 basetype = ISP_HA_SCSI_UNKNOWN; 461 psize = sizeof (sdparam); 462 } 463 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 464 mdvp = &mdvec_1080; 465 basetype = ISP_HA_SCSI_1080; 466 psize = sizeof (sdparam); 467 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 468 ISP1080_DMA_REGS_OFF; 469 } 470 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 471 mdvp = &mdvec_1080; 472 basetype = ISP_HA_SCSI_1240; 473 psize = 2 * sizeof (sdparam); 474 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 475 ISP1080_DMA_REGS_OFF; 476 } 477 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 478 mdvp = &mdvec_1080; 479 basetype = ISP_HA_SCSI_1280; 480 psize = 2 * sizeof (sdparam); 481 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 482 ISP1080_DMA_REGS_OFF; 483 } 484 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 485 mdvp = &mdvec_12160; 486 basetype = ISP_HA_SCSI_10160; 487 psize = sizeof (sdparam); 488 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 489 ISP1080_DMA_REGS_OFF; 490 } 491 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 492 mdvp = &mdvec_12160; 493 basetype = ISP_HA_SCSI_12160; 494 psize = 2 * sizeof (sdparam); 495 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 496 ISP1080_DMA_REGS_OFF; 497 } 498 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 499 mdvp = &mdvec_2100; 500 basetype = ISP_HA_FC_2100; 501 psize = sizeof (fcparam); 502 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 503 PCI_MBOX_REGS2100_OFF; 504 if (pci_get_revid(dev) < 3) { 505 /* 506 * XXX: Need to get the actual revision 507 * XXX: number of the 2100 FB. At any rate, 508 * XXX: lower cache line size for early revision 509 * XXX; boards. 510 */ 511 linesz = 1; 512 } 513 } 514 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 515 mdvp = &mdvec_2200; 516 basetype = ISP_HA_FC_2200; 517 psize = sizeof (fcparam); 518 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 519 PCI_MBOX_REGS2100_OFF; 520 } 521 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 522 mdvp = &mdvec_2300; 523 basetype = ISP_HA_FC_2300; 524 psize = sizeof (fcparam); 525 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 526 PCI_MBOX_REGS2300_OFF; 527 } 528 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 529 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 530 mdvp = &mdvec_2300; 531 basetype = ISP_HA_FC_2312; 532 psize = sizeof (fcparam); 533 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 534 PCI_MBOX_REGS2300_OFF; 535 } 536 isp = &pcs->pci_isp; 537 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 538 if (isp->isp_param == NULL) { 539 device_printf(dev, "cannot allocate parameter data\n"); 540 goto bad; 541 } 542 isp->isp_mdvec = mdvp; 543 isp->isp_type = basetype; 544 isp->isp_revision = pci_get_revid(dev); 545 isp->isp_role = role; 546 isp->isp_dev = dev; 547 548 /* 549 * Try and find firmware for this device. 550 */ 551 552 if (isp_get_firmware_p) { 553 int device = (int) pci_get_device(dev); 554 #ifdef ISP_TARGET_MODE 555 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 556 #else 557 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 558 #endif 559 } 560 561 /* 562 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 563 * are set. 564 */ 565 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 566 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 567 if (IS_2300(isp)) { /* per QLogic errata */ 568 cmd &= ~PCIM_CMD_INVEN; 569 } 570 if (IS_23XX(isp)) { 571 /* 572 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 573 */ 574 isp->isp_touched = 1; 575 576 } 577 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 578 579 /* 580 * Make sure the Cache Line Size register is set sensibly. 581 */ 582 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 583 if (data != linesz) { 584 data = PCI_DFLT_LNSZ; 585 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 586 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 587 } 588 589 /* 590 * Make sure the Latency Timer is sane. 591 */ 592 data = pci_read_config(dev, PCIR_LATTIMER, 1); 593 if (data < PCI_DFLT_LTNCY) { 594 data = PCI_DFLT_LTNCY; 595 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 596 pci_write_config(dev, PCIR_LATTIMER, data, 1); 597 } 598 599 /* 600 * Make sure we've disabled the ROM. 601 */ 602 data = pci_read_config(dev, PCIR_ROMADDR, 4); 603 data &= ~1; 604 pci_write_config(dev, PCIR_ROMADDR, data, 4); 605 606 iqd = 0; 607 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 608 RF_ACTIVE | RF_SHAREABLE); 609 if (irq == NULL) { 610 device_printf(dev, "could not allocate interrupt\n"); 611 goto bad; 612 } 613 614 tval = 0; 615 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 616 "fwload_disable", &tval) == 0 && tval != 0) { 617 isp->isp_confopts |= ISP_CFG_NORELOAD; 618 } 619 tval = 0; 620 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 621 "ignore_nvram", &tval) == 0 && tval != 0) { 622 isp->isp_confopts |= ISP_CFG_NONVRAM; 623 } 624 tval = 0; 625 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 626 "fullduplex", &tval) == 0 && tval != 0) { 627 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 628 } 629 #ifdef ISP_FW_CRASH_DUMP 630 tval = 0; 631 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 632 "fw_dump_enable", &tval) == 0 && tval != 0) { 633 size_t amt = 0; 634 if (IS_2200(isp)) { 635 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 636 } else if (IS_23XX(isp)) { 637 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 638 } 639 if (amt) { 640 FCPARAM(isp)->isp_dump_data = 641 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 642 } else { 643 device_printf(dev, 644 "f/w crash dumps not supported for this model\n"); 645 } 646 } 647 #endif 648 649 sptr = 0; 650 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 651 "topology", (const char **) &sptr) == 0 && sptr != 0) { 652 if (strcmp(sptr, "lport") == 0) { 653 isp->isp_confopts |= ISP_CFG_LPORT; 654 } else if (strcmp(sptr, "nport") == 0) { 655 isp->isp_confopts |= ISP_CFG_NPORT; 656 } else if (strcmp(sptr, "lport-only") == 0) { 657 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 658 } else if (strcmp(sptr, "nport-only") == 0) { 659 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 660 } 661 } 662 663 /* 664 * Because the resource_*_value functions can neither return 665 * 64 bit integer values, nor can they be directly coerced 666 * to interpret the right hand side of the assignment as 667 * you want them to interpret it, we have to force WWN 668 * hint replacement to specify WWN strings with a leading 669 * 'w' (e..g w50000000aaaa0001). Sigh. 670 */ 671 sptr = 0; 672 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 673 "portwwn", (const char **) &sptr); 674 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 675 char *eptr = 0; 676 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 677 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 678 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 679 isp->isp_osinfo.default_port_wwn = 0; 680 } else { 681 isp->isp_confopts |= ISP_CFG_OWNWWPN; 682 } 683 } 684 if (isp->isp_osinfo.default_port_wwn == 0) { 685 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 686 } 687 688 sptr = 0; 689 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 690 "nodewwn", (const char **) &sptr); 691 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 692 char *eptr = 0; 693 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 694 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 695 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 696 isp->isp_osinfo.default_node_wwn = 0; 697 } else { 698 isp->isp_confopts |= ISP_CFG_OWNWWNN; 699 } 700 } 701 if (isp->isp_osinfo.default_node_wwn == 0) { 702 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 703 } 704 705 isp->isp_osinfo.default_id = -1; 706 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 707 "iid", &tval) == 0) { 708 isp->isp_osinfo.default_id = tval; 709 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 710 } 711 if (isp->isp_osinfo.default_id == -1) { 712 if (IS_FC(isp)) { 713 isp->isp_osinfo.default_id = 109; 714 } else { 715 isp->isp_osinfo.default_id = 7; 716 } 717 } 718 719 isp_debug = 0; 720 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 721 "debug", &isp_debug); 722 723 /* Make sure the lock is set up. */ 724 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 725 locksetup++; 726 727 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 728 device_printf(dev, "could not setup interrupt\n"); 729 goto bad; 730 } 731 732 /* 733 * Set up logging levels. 734 */ 735 if (isp_debug) { 736 isp->isp_dblev = isp_debug; 737 } else { 738 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 739 } 740 if (bootverbose) 741 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 742 743 /* 744 * Last minute checks... 745 */ 746 if (IS_2312(isp)) { 747 isp->isp_port = pci_get_function(dev); 748 } 749 750 /* 751 * Make sure we're in reset state. 752 */ 753 ISP_LOCK(isp); 754 isp_reset(isp); 755 if (isp->isp_state != ISP_RESETSTATE) { 756 ISP_UNLOCK(isp); 757 goto bad; 758 } 759 isp_init(isp); 760 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 761 isp_uninit(isp); 762 ISP_UNLOCK(isp); 763 goto bad; 764 } 765 isp_attach(isp); 766 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 767 isp_uninit(isp); 768 ISP_UNLOCK(isp); 769 goto bad; 770 } 771 /* 772 * XXXX: Here is where we might unload the f/w module 773 * XXXX: (or decrease the reference count to it). 774 */ 775 ISP_UNLOCK(isp); 776 return (0); 777 778 bad: 779 780 if (pcs && pcs->ih) { 781 (void) bus_teardown_intr(dev, irq, pcs->ih); 782 } 783 784 if (locksetup && isp) { 785 mtx_destroy(&isp->isp_osinfo.lock); 786 } 787 788 if (irq) { 789 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 790 } 791 792 793 if (regs) { 794 (void) bus_release_resource(dev, rtp, rgd, regs); 795 } 796 797 if (pcs) { 798 if (pcs->pci_isp.isp_param) 799 free(pcs->pci_isp.isp_param, M_DEVBUF); 800 free(pcs, M_DEVBUF); 801 } 802 803 /* 804 * XXXX: Here is where we might unload the f/w module 805 * XXXX: (or decrease the reference count to it). 806 */ 807 return (ENXIO); 808 } 809 810 static void 811 isp_pci_intr(void *arg) 812 { 813 struct ispsoftc *isp = arg; 814 u_int16_t isr, sema, mbox; 815 816 ISP_LOCK(isp); 817 isp->isp_intcnt++; 818 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 819 isp->isp_intbogus++; 820 } else { 821 int iok = isp->isp_osinfo.intsok; 822 isp->isp_osinfo.intsok = 0; 823 isp_intr(isp, isr, sema, mbox); 824 isp->isp_osinfo.intsok = iok; 825 } 826 ISP_UNLOCK(isp); 827 } 828 829 830 #define IspVirt2Off(a, x) \ 831 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 832 _BLK_REG_SHFT] + ((x) & 0xff)) 833 834 #define BXR2(pcs, off) \ 835 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 836 #define BXW2(pcs, off, v) \ 837 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 838 839 840 static INLINE int 841 isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 842 { 843 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 844 u_int16_t val0, val1; 845 int i = 0; 846 847 do { 848 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 849 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 850 } while (val0 != val1 && ++i < 1000); 851 if (val0 != val1) { 852 return (1); 853 } 854 *rp = val0; 855 return (0); 856 } 857 858 static int 859 isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 860 u_int16_t *semap, u_int16_t *mbp) 861 { 862 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 863 u_int16_t isr, sema; 864 865 if (IS_2100(isp)) { 866 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 867 return (0); 868 } 869 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 870 return (0); 871 } 872 } else { 873 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 874 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 875 } 876 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 877 isr &= INT_PENDING_MASK(isp); 878 sema &= BIU_SEMA_LOCK; 879 if (isr == 0 && sema == 0) { 880 return (0); 881 } 882 *isrp = isr; 883 if ((*semap = sema) != 0) { 884 if (IS_2100(isp)) { 885 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 886 return (0); 887 } 888 } else { 889 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 890 } 891 } 892 return (1); 893 } 894 895 static int 896 isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 897 u_int16_t *semap, u_int16_t *mbox0p) 898 { 899 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 900 u_int32_t r2hisr; 901 902 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 903 *isrp = 0; 904 return (0); 905 } 906 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 907 IspVirt2Off(pcs, BIU_R2HSTSLO)); 908 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 909 if ((r2hisr & BIU_R2HST_INTR) == 0) { 910 *isrp = 0; 911 return (0); 912 } 913 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 914 case ISPR2HST_ROM_MBX_OK: 915 case ISPR2HST_ROM_MBX_FAIL: 916 case ISPR2HST_MBX_OK: 917 case ISPR2HST_MBX_FAIL: 918 case ISPR2HST_ASYNC_EVENT: 919 *isrp = r2hisr & 0xffff; 920 *mbox0p = (r2hisr >> 16); 921 *semap = 1; 922 return (1); 923 case ISPR2HST_RIO_16: 924 *isrp = r2hisr & 0xffff; 925 *mbox0p = ASYNC_RIO1; 926 *semap = 1; 927 return (1); 928 case ISPR2HST_FPOST: 929 *isrp = r2hisr & 0xffff; 930 *mbox0p = ASYNC_CMD_CMPLT; 931 *semap = 1; 932 return (1); 933 case ISPR2HST_FPOST_CTIO: 934 *isrp = r2hisr & 0xffff; 935 *mbox0p = ASYNC_CTIO_DONE; 936 *semap = 1; 937 return (1); 938 case ISPR2HST_RSPQ_UPDATE: 939 *isrp = r2hisr & 0xffff; 940 *mbox0p = 0; 941 *semap = 0; 942 return (1); 943 default: 944 return (0); 945 } 946 } 947 948 static u_int16_t 949 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 950 { 951 u_int16_t rv; 952 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 953 int oldconf = 0; 954 955 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 956 /* 957 * We will assume that someone has paused the RISC processor. 958 */ 959 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 960 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 961 oldconf | BIU_PCI_CONF1_SXP); 962 } 963 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 964 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 965 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 966 } 967 return (rv); 968 } 969 970 static void 971 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 972 { 973 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 974 int oldconf = 0; 975 976 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 977 /* 978 * We will assume that someone has paused the RISC processor. 979 */ 980 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 981 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 982 oldconf | BIU_PCI_CONF1_SXP); 983 } 984 BXW2(pcs, IspVirt2Off(isp, regoff), val); 985 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 986 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 987 } 988 } 989 990 static u_int16_t 991 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 992 { 993 u_int16_t rv, oc = 0; 994 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 995 996 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 997 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 998 u_int16_t tc; 999 /* 1000 * We will assume that someone has paused the RISC processor. 1001 */ 1002 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1003 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1004 if (regoff & SXP_BANK1_SELECT) 1005 tc |= BIU_PCI1080_CONF1_SXP1; 1006 else 1007 tc |= BIU_PCI1080_CONF1_SXP0; 1008 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1009 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1010 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1011 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1012 oc | BIU_PCI1080_CONF1_DMA); 1013 } 1014 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1015 if (oc) { 1016 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1017 } 1018 return (rv); 1019 } 1020 1021 static void 1022 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 1023 { 1024 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1025 int oc = 0; 1026 1027 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1028 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1029 u_int16_t tc; 1030 /* 1031 * We will assume that someone has paused the RISC processor. 1032 */ 1033 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1034 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1035 if (regoff & SXP_BANK1_SELECT) 1036 tc |= BIU_PCI1080_CONF1_SXP1; 1037 else 1038 tc |= BIU_PCI1080_CONF1_SXP0; 1039 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1040 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1041 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1042 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1043 oc | BIU_PCI1080_CONF1_DMA); 1044 } 1045 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1046 if (oc) { 1047 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1048 } 1049 } 1050 1051 1052 struct imush { 1053 struct ispsoftc *isp; 1054 int error; 1055 }; 1056 1057 static void imc(void *, bus_dma_segment_t *, int, int); 1058 1059 static void 1060 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1061 { 1062 struct imush *imushp = (struct imush *) arg; 1063 if (error) { 1064 imushp->error = error; 1065 } else { 1066 struct ispsoftc *isp =imushp->isp; 1067 bus_addr_t addr = segs->ds_addr; 1068 1069 isp->isp_rquest_dma = addr; 1070 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1071 isp->isp_result_dma = addr; 1072 if (IS_FC(isp)) { 1073 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1074 FCPARAM(isp)->isp_scdma = addr; 1075 } 1076 } 1077 } 1078 1079 /* 1080 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1081 */ 1082 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1083 1084 static int 1085 isp_pci_mbxdma(struct ispsoftc *isp) 1086 { 1087 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1088 caddr_t base; 1089 u_int32_t len; 1090 int i, error, ns; 1091 bus_size_t alim, slim, xlim; 1092 struct imush im; 1093 1094 /* 1095 * Already been here? If so, leave... 1096 */ 1097 if (isp->isp_rquest) { 1098 return (0); 1099 } 1100 1101 #ifdef ISP_DAC_SUPPORTED 1102 alim = BUS_SPACE_UNRESTRICTED; 1103 xlim = BUS_SPACE_MAXADDR_32BIT; 1104 #else 1105 xlim = alim = BUS_SPACE_MAXADDR_32BIT; 1106 #endif 1107 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1108 slim = BUS_SPACE_MAXADDR_32BIT; 1109 } else { 1110 slim = BUS_SPACE_MAXADDR_24BIT; 1111 } 1112 1113 ISP_UNLOCK(isp); 1114 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1115 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1116 busdma_lock_mutex, &Giant, &pcs->dmat)) { 1117 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1118 ISP_LOCK(isp); 1119 return(1); 1120 } 1121 1122 1123 len = sizeof (XS_T **) * isp->isp_maxcmds; 1124 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1125 if (isp->isp_xflist == NULL) { 1126 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1127 ISP_LOCK(isp); 1128 return (1); 1129 } 1130 #ifdef ISP_TARGET_MODE 1131 len = sizeof (void **) * isp->isp_maxcmds; 1132 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1133 if (isp->isp_tgtlist == NULL) { 1134 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1135 ISP_LOCK(isp); 1136 return (1); 1137 } 1138 #endif 1139 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1140 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1141 if (pcs->dmaps == NULL) { 1142 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1143 free(isp->isp_xflist, M_DEVBUF); 1144 #ifdef ISP_TARGET_MODE 1145 free(isp->isp_tgtlist, M_DEVBUF); 1146 #endif 1147 ISP_LOCK(isp); 1148 return (1); 1149 } 1150 1151 /* 1152 * Allocate and map the request, result queues, plus FC scratch area. 1153 */ 1154 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1155 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1156 if (IS_FC(isp)) { 1157 len += ISP2100_SCRLEN; 1158 } 1159 1160 ns = (len / PAGE_SIZE) + 1; 1161 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, xlim, xlim, 1162 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant, 1163 &isp->isp_cdmat)) { 1164 isp_prt(isp, ISP_LOGERR, 1165 "cannot create a dma tag for control spaces"); 1166 free(pcs->dmaps, M_DEVBUF); 1167 free(isp->isp_xflist, M_DEVBUF); 1168 #ifdef ISP_TARGET_MODE 1169 free(isp->isp_tgtlist, M_DEVBUF); 1170 #endif 1171 ISP_LOCK(isp); 1172 return (1); 1173 } 1174 1175 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1176 &isp->isp_cdmap) != 0) { 1177 isp_prt(isp, ISP_LOGERR, 1178 "cannot allocate %d bytes of CCB memory", len); 1179 bus_dma_tag_destroy(isp->isp_cdmat); 1180 free(isp->isp_xflist, M_DEVBUF); 1181 #ifdef ISP_TARGET_MODE 1182 free(isp->isp_tgtlist, M_DEVBUF); 1183 #endif 1184 free(pcs->dmaps, M_DEVBUF); 1185 ISP_LOCK(isp); 1186 return (1); 1187 } 1188 1189 for (i = 0; i < isp->isp_maxcmds; i++) { 1190 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1191 if (error) { 1192 isp_prt(isp, ISP_LOGERR, 1193 "error %d creating per-cmd DMA maps", error); 1194 while (--i >= 0) { 1195 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1196 } 1197 goto bad; 1198 } 1199 } 1200 1201 im.isp = isp; 1202 im.error = 0; 1203 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1204 if (im.error) { 1205 isp_prt(isp, ISP_LOGERR, 1206 "error %d loading dma map for control areas", im.error); 1207 goto bad; 1208 } 1209 1210 isp->isp_rquest = base; 1211 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1212 isp->isp_result = base; 1213 if (IS_FC(isp)) { 1214 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1215 FCPARAM(isp)->isp_scratch = base; 1216 } 1217 ISP_LOCK(isp); 1218 return (0); 1219 1220 bad: 1221 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1222 bus_dma_tag_destroy(isp->isp_cdmat); 1223 free(isp->isp_xflist, M_DEVBUF); 1224 #ifdef ISP_TARGET_MODE 1225 free(isp->isp_tgtlist, M_DEVBUF); 1226 #endif 1227 free(pcs->dmaps, M_DEVBUF); 1228 ISP_LOCK(isp); 1229 isp->isp_rquest = NULL; 1230 return (1); 1231 } 1232 1233 typedef struct { 1234 struct ispsoftc *isp; 1235 void *cmd_token; 1236 void *rq; 1237 u_int16_t *nxtip; 1238 u_int16_t optr; 1239 u_int error; 1240 } mush_t; 1241 1242 #define MUSHERR_NOQENTRIES -2 1243 1244 #ifdef ISP_TARGET_MODE 1245 /* 1246 * We need to handle DMA for target mode differently from initiator mode. 1247 * 1248 * DMA mapping and construction and submission of CTIO Request Entries 1249 * and rendevous for completion are very tightly coupled because we start 1250 * out by knowing (per platform) how much data we have to move, but we 1251 * don't know, up front, how many DMA mapping segments will have to be used 1252 * cover that data, so we don't know how many CTIO Request Entries we 1253 * will end up using. Further, for performance reasons we may want to 1254 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1255 * 1256 * The standard vector still goes through isp_pci_dmasetup, but the callback 1257 * for the DMA mapping routines comes here instead with the whole transfer 1258 * mapped and a pointer to a partially filled in already allocated request 1259 * queue entry. We finish the job. 1260 */ 1261 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1262 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1263 1264 #define STATUS_WITH_DATA 1 1265 1266 static void 1267 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1268 { 1269 mush_t *mp; 1270 struct ccb_scsiio *csio; 1271 struct ispsoftc *isp; 1272 struct isp_pcisoftc *pcs; 1273 bus_dmamap_t *dp; 1274 ct_entry_t *cto, *qe; 1275 u_int8_t scsi_status; 1276 u_int16_t curi, nxti, handle; 1277 u_int32_t sflags; 1278 int32_t resid; 1279 int nth_ctio, nctios, send_status; 1280 1281 mp = (mush_t *) arg; 1282 if (error) { 1283 mp->error = error; 1284 return; 1285 } 1286 1287 isp = mp->isp; 1288 csio = mp->cmd_token; 1289 cto = mp->rq; 1290 curi = isp->isp_reqidx; 1291 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1292 1293 cto->ct_xfrlen = 0; 1294 cto->ct_seg_count = 0; 1295 cto->ct_header.rqs_entry_count = 1; 1296 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1297 1298 if (nseg == 0) { 1299 cto->ct_header.rqs_seqno = 1; 1300 isp_prt(isp, ISP_LOGTDEBUG1, 1301 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1302 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1303 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1304 cto->ct_scsi_status, cto->ct_resid); 1305 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1306 isp_put_ctio(isp, cto, qe); 1307 return; 1308 } 1309 1310 nctios = nseg / ISP_RQDSEG; 1311 if (nseg % ISP_RQDSEG) { 1312 nctios++; 1313 } 1314 1315 /* 1316 * Save syshandle, and potentially any SCSI status, which we'll 1317 * reinsert on the last CTIO we're going to send. 1318 */ 1319 1320 handle = cto->ct_syshandle; 1321 cto->ct_syshandle = 0; 1322 cto->ct_header.rqs_seqno = 0; 1323 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1324 1325 if (send_status) { 1326 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1327 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1328 /* 1329 * Preserve residual. 1330 */ 1331 resid = cto->ct_resid; 1332 1333 /* 1334 * Save actual SCSI status. 1335 */ 1336 scsi_status = cto->ct_scsi_status; 1337 1338 #ifndef STATUS_WITH_DATA 1339 sflags |= CT_NO_DATA; 1340 /* 1341 * We can't do a status at the same time as a data CTIO, so 1342 * we need to synthesize an extra CTIO at this level. 1343 */ 1344 nctios++; 1345 #endif 1346 } else { 1347 sflags = scsi_status = resid = 0; 1348 } 1349 1350 cto->ct_resid = 0; 1351 cto->ct_scsi_status = 0; 1352 1353 pcs = (struct isp_pcisoftc *)isp; 1354 dp = &pcs->dmaps[isp_handle_index(handle)]; 1355 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1356 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1357 } else { 1358 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1359 } 1360 1361 nxti = *mp->nxtip; 1362 1363 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1364 int seglim; 1365 1366 seglim = nseg; 1367 if (seglim) { 1368 int seg; 1369 1370 if (seglim > ISP_RQDSEG) 1371 seglim = ISP_RQDSEG; 1372 1373 for (seg = 0; seg < seglim; seg++, nseg--) { 1374 /* 1375 * Unlike normal initiator commands, we don't 1376 * do any swizzling here. 1377 */ 1378 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1379 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1380 cto->ct_xfrlen += dm_segs->ds_len; 1381 dm_segs++; 1382 } 1383 cto->ct_seg_count = seg; 1384 } else { 1385 /* 1386 * This case should only happen when we're sending an 1387 * extra CTIO with final status. 1388 */ 1389 if (send_status == 0) { 1390 isp_prt(isp, ISP_LOGWARN, 1391 "tdma_mk ran out of segments"); 1392 mp->error = EINVAL; 1393 return; 1394 } 1395 } 1396 1397 /* 1398 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1399 * ct_tagtype, and ct_timeout have been carried over 1400 * unchanged from what our caller had set. 1401 * 1402 * The dataseg fields and the seg_count fields we just got 1403 * through setting. The data direction we've preserved all 1404 * along and only clear it if we're now sending status. 1405 */ 1406 1407 if (nth_ctio == nctios - 1) { 1408 /* 1409 * We're the last in a sequence of CTIOs, so mark 1410 * this CTIO and save the handle to the CCB such that 1411 * when this CTIO completes we can free dma resources 1412 * and do whatever else we need to do to finish the 1413 * rest of the command. We *don't* give this to the 1414 * firmware to work on- the caller will do that. 1415 */ 1416 1417 cto->ct_syshandle = handle; 1418 cto->ct_header.rqs_seqno = 1; 1419 1420 if (send_status) { 1421 cto->ct_scsi_status = scsi_status; 1422 cto->ct_flags |= sflags; 1423 cto->ct_resid = resid; 1424 } 1425 if (send_status) { 1426 isp_prt(isp, ISP_LOGTDEBUG1, 1427 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1428 "scsi status %x resid %d", 1429 cto->ct_fwhandle, csio->ccb_h.target_lun, 1430 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1431 cto->ct_scsi_status, cto->ct_resid); 1432 } else { 1433 isp_prt(isp, ISP_LOGTDEBUG1, 1434 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1435 cto->ct_fwhandle, csio->ccb_h.target_lun, 1436 cto->ct_iid, cto->ct_tag_val, 1437 cto->ct_flags); 1438 } 1439 isp_put_ctio(isp, cto, qe); 1440 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1441 if (nctios > 1) { 1442 MEMORYBARRIER(isp, SYNC_REQUEST, 1443 curi, QENTRY_LEN); 1444 } 1445 } else { 1446 ct_entry_t *oqe = qe; 1447 1448 /* 1449 * Make sure syshandle fields are clean 1450 */ 1451 cto->ct_syshandle = 0; 1452 cto->ct_header.rqs_seqno = 0; 1453 1454 isp_prt(isp, ISP_LOGTDEBUG1, 1455 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1456 cto->ct_fwhandle, csio->ccb_h.target_lun, 1457 cto->ct_iid, cto->ct_flags); 1458 1459 /* 1460 * Get a new CTIO 1461 */ 1462 qe = (ct_entry_t *) 1463 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1464 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1465 if (nxti == mp->optr) { 1466 isp_prt(isp, ISP_LOGTDEBUG0, 1467 "Queue Overflow in tdma_mk"); 1468 mp->error = MUSHERR_NOQENTRIES; 1469 return; 1470 } 1471 1472 /* 1473 * Now that we're done with the old CTIO, 1474 * flush it out to the request queue. 1475 */ 1476 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1477 isp_put_ctio(isp, cto, oqe); 1478 if (nth_ctio != 0) { 1479 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1480 QENTRY_LEN); 1481 } 1482 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1483 1484 /* 1485 * Reset some fields in the CTIO so we can reuse 1486 * for the next one we'll flush to the request 1487 * queue. 1488 */ 1489 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1490 cto->ct_header.rqs_entry_count = 1; 1491 cto->ct_header.rqs_flags = 0; 1492 cto->ct_status = 0; 1493 cto->ct_scsi_status = 0; 1494 cto->ct_xfrlen = 0; 1495 cto->ct_resid = 0; 1496 cto->ct_seg_count = 0; 1497 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1498 } 1499 } 1500 *mp->nxtip = nxti; 1501 } 1502 1503 /* 1504 * We don't have to do multiple CTIOs here. Instead, we can just do 1505 * continuation segments as needed. This greatly simplifies the code 1506 * improves performance. 1507 */ 1508 1509 static void 1510 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1511 { 1512 mush_t *mp; 1513 struct ccb_scsiio *csio; 1514 struct ispsoftc *isp; 1515 ct2_entry_t *cto, *qe; 1516 u_int16_t curi, nxti; 1517 int segcnt; 1518 1519 mp = (mush_t *) arg; 1520 if (error) { 1521 mp->error = error; 1522 return; 1523 } 1524 1525 isp = mp->isp; 1526 csio = mp->cmd_token; 1527 cto = mp->rq; 1528 1529 curi = isp->isp_reqidx; 1530 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1531 1532 if (nseg == 0) { 1533 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1534 isp_prt(isp, ISP_LOGWARN, 1535 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1536 "set (0x%x)", cto->ct_flags); 1537 mp->error = EINVAL; 1538 return; 1539 } 1540 /* 1541 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1542 * flags to NO DATA and clear relative offset flags. 1543 * We preserve the ct_resid and the response area. 1544 */ 1545 cto->ct_header.rqs_seqno = 1; 1546 cto->ct_seg_count = 0; 1547 cto->ct_reloff = 0; 1548 isp_prt(isp, ISP_LOGTDEBUG1, 1549 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1550 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1551 cto->ct_iid, cto->ct_flags, cto->ct_status, 1552 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1553 isp_put_ctio2(isp, cto, qe); 1554 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1555 return; 1556 } 1557 1558 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1559 isp_prt(isp, ISP_LOGERR, 1560 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1561 "(0x%x)", cto->ct_flags); 1562 mp->error = EINVAL; 1563 return; 1564 } 1565 1566 1567 nxti = *mp->nxtip; 1568 1569 /* 1570 * Set up the CTIO2 data segments. 1571 */ 1572 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1573 cto->ct_seg_count++, segcnt++) { 1574 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1575 dm_segs[segcnt].ds_addr; 1576 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1577 dm_segs[segcnt].ds_len; 1578 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1579 isp_prt(isp, ISP_LOGTDEBUG1, 1580 "isp_send_ctio2: ent0[%d]0x%llx:%lld", 1581 cto->ct_seg_count, (long long)dm_segs[segcnt].ds_addr, 1582 (long long)dm_segs[segcnt].ds_len); 1583 } 1584 1585 while (segcnt < nseg) { 1586 u_int16_t curip; 1587 int seg; 1588 ispcontreq_t local, *crq = &local, *qep; 1589 1590 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1591 curip = nxti; 1592 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1593 if (nxti == mp->optr) { 1594 ISP_UNLOCK(isp); 1595 isp_prt(isp, ISP_LOGTDEBUG0, 1596 "tdma_mkfc: request queue overflow"); 1597 mp->error = MUSHERR_NOQENTRIES; 1598 return; 1599 } 1600 cto->ct_header.rqs_entry_count++; 1601 MEMZERO((void *)crq, sizeof (*crq)); 1602 crq->req_header.rqs_entry_count = 1; 1603 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1604 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1605 segcnt++, seg++) { 1606 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1607 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1608 isp_prt(isp, ISP_LOGTDEBUG1, 1609 "isp_send_ctio2: ent%d[%d]0x%llx:%lld", 1610 cto->ct_header.rqs_entry_count-1, seg, 1611 (long long) dm_segs[segcnt].ds_addr, 1612 (long long) dm_segs[segcnt].ds_len); 1613 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1614 cto->ct_seg_count++; 1615 } 1616 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1617 isp_put_cont_req(isp, crq, qep); 1618 ISP_TDQE(isp, "cont entry", curi, qep); 1619 } 1620 1621 /* 1622 * No do final twiddling for the CTIO itself. 1623 */ 1624 cto->ct_header.rqs_seqno = 1; 1625 isp_prt(isp, ISP_LOGTDEBUG1, 1626 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1627 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1628 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1629 cto->ct_resid); 1630 isp_put_ctio2(isp, cto, qe); 1631 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1632 *mp->nxtip = nxti; 1633 } 1634 #endif 1635 1636 static void dma2(void *, bus_dma_segment_t *, int, int); 1637 1638 #ifdef PAE 1639 #define LOWD(x) ((uint32_t) x) 1640 #define HIWD(x) ((uint32_t) (x >> 32)) 1641 1642 static void 1643 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1644 { 1645 mush_t *mp; 1646 struct ispsoftc *isp; 1647 struct ccb_scsiio *csio; 1648 struct isp_pcisoftc *pcs; 1649 bus_dmamap_t *dp; 1650 bus_dma_segment_t *eseg; 1651 ispreq64_t *rq; 1652 int seglim, datalen; 1653 u_int16_t nxti; 1654 1655 mp = (mush_t *) arg; 1656 if (error) { 1657 mp->error = error; 1658 return; 1659 } 1660 1661 if (nseg < 1) { 1662 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1663 mp->error = EFAULT; 1664 return; 1665 } 1666 csio = mp->cmd_token; 1667 isp = mp->isp; 1668 rq = mp->rq; 1669 pcs = (struct isp_pcisoftc *)mp->isp; 1670 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1671 nxti = *mp->nxtip; 1672 1673 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1674 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1675 } else { 1676 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1677 } 1678 datalen = XS_XFRLEN(csio); 1679 1680 /* 1681 * We're passed an initial partially filled in entry that 1682 * has most fields filled in except for data transfer 1683 * related values. 1684 * 1685 * Our job is to fill in the initial request queue entry and 1686 * then to start allocating and filling in continuation entries 1687 * until we've covered the entire transfer. 1688 */ 1689 1690 if (IS_FC(isp)) { 1691 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1692 seglim = ISP_RQDSEG_T3; 1693 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 1694 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1695 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1696 } else { 1697 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1698 } 1699 } else { 1700 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1701 if (csio->cdb_len > 12) { 1702 seglim = 0; 1703 } else { 1704 seglim = ISP_RQDSEG_A64; 1705 } 1706 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1707 rq->req_flags |= REQFLAG_DATA_IN; 1708 } else { 1709 rq->req_flags |= REQFLAG_DATA_OUT; 1710 } 1711 } 1712 1713 eseg = dm_segs + nseg; 1714 1715 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1716 if (IS_FC(isp)) { 1717 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 1718 rq3->req_dataseg[rq3->req_seg_count].ds_base = 1719 LOWD(dm_segs->ds_addr); 1720 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 1721 HIWD(dm_segs->ds_addr); 1722 rq3->req_dataseg[rq3->req_seg_count].ds_count = 1723 dm_segs->ds_len; 1724 } else { 1725 rq->req_dataseg[rq->req_seg_count].ds_base = 1726 LOWD(dm_segs->ds_addr); 1727 rq->req_dataseg[rq->req_seg_count].ds_basehi = 1728 HIWD(dm_segs->ds_addr); 1729 rq->req_dataseg[rq->req_seg_count].ds_count = 1730 dm_segs->ds_len; 1731 } 1732 datalen -= dm_segs->ds_len; 1733 rq->req_seg_count++; 1734 dm_segs++; 1735 } 1736 1737 while (datalen > 0 && dm_segs != eseg) { 1738 u_int16_t onxti; 1739 ispcontreq64_t local, *crq = &local, *cqe; 1740 1741 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1742 onxti = nxti; 1743 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1744 if (nxti == mp->optr) { 1745 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1746 mp->error = MUSHERR_NOQENTRIES; 1747 return; 1748 } 1749 rq->req_header.rqs_entry_count++; 1750 MEMZERO((void *)crq, sizeof (*crq)); 1751 crq->req_header.rqs_entry_count = 1; 1752 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 1753 1754 seglim = 0; 1755 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 1756 crq->req_dataseg[seglim].ds_base = 1757 LOWD(dm_segs->ds_addr); 1758 crq->req_dataseg[seglim].ds_basehi = 1759 HIWD(dm_segs->ds_addr); 1760 crq->req_dataseg[seglim].ds_count = 1761 dm_segs->ds_len; 1762 rq->req_seg_count++; 1763 dm_segs++; 1764 seglim++; 1765 datalen -= dm_segs->ds_len; 1766 } 1767 isp_put_cont64_req(isp, crq, cqe); 1768 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1769 } 1770 *mp->nxtip = nxti; 1771 } 1772 #else 1773 static void 1774 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1775 { 1776 mush_t *mp; 1777 struct ispsoftc *isp; 1778 struct ccb_scsiio *csio; 1779 struct isp_pcisoftc *pcs; 1780 bus_dmamap_t *dp; 1781 bus_dma_segment_t *eseg; 1782 ispreq_t *rq; 1783 int seglim, datalen; 1784 u_int16_t nxti; 1785 1786 mp = (mush_t *) arg; 1787 if (error) { 1788 mp->error = error; 1789 return; 1790 } 1791 1792 if (nseg < 1) { 1793 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1794 mp->error = EFAULT; 1795 return; 1796 } 1797 csio = mp->cmd_token; 1798 isp = mp->isp; 1799 rq = mp->rq; 1800 pcs = (struct isp_pcisoftc *)mp->isp; 1801 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1802 nxti = *mp->nxtip; 1803 1804 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1805 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1806 } else { 1807 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1808 } 1809 1810 datalen = XS_XFRLEN(csio); 1811 1812 /* 1813 * We're passed an initial partially filled in entry that 1814 * has most fields filled in except for data transfer 1815 * related values. 1816 * 1817 * Our job is to fill in the initial request queue entry and 1818 * then to start allocating and filling in continuation entries 1819 * until we've covered the entire transfer. 1820 */ 1821 1822 if (IS_FC(isp)) { 1823 seglim = ISP_RQDSEG_T2; 1824 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1825 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1826 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1827 } else { 1828 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1829 } 1830 } else { 1831 if (csio->cdb_len > 12) { 1832 seglim = 0; 1833 } else { 1834 seglim = ISP_RQDSEG; 1835 } 1836 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1837 rq->req_flags |= REQFLAG_DATA_IN; 1838 } else { 1839 rq->req_flags |= REQFLAG_DATA_OUT; 1840 } 1841 } 1842 1843 eseg = dm_segs + nseg; 1844 1845 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1846 if (IS_FC(isp)) { 1847 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1848 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1849 dm_segs->ds_addr; 1850 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1851 dm_segs->ds_len; 1852 } else { 1853 rq->req_dataseg[rq->req_seg_count].ds_base = 1854 dm_segs->ds_addr; 1855 rq->req_dataseg[rq->req_seg_count].ds_count = 1856 dm_segs->ds_len; 1857 } 1858 datalen -= dm_segs->ds_len; 1859 rq->req_seg_count++; 1860 dm_segs++; 1861 } 1862 1863 while (datalen > 0 && dm_segs != eseg) { 1864 u_int16_t onxti; 1865 ispcontreq_t local, *crq = &local, *cqe; 1866 1867 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1868 onxti = nxti; 1869 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1870 if (nxti == mp->optr) { 1871 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1872 mp->error = MUSHERR_NOQENTRIES; 1873 return; 1874 } 1875 rq->req_header.rqs_entry_count++; 1876 MEMZERO((void *)crq, sizeof (*crq)); 1877 crq->req_header.rqs_entry_count = 1; 1878 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1879 1880 seglim = 0; 1881 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1882 crq->req_dataseg[seglim].ds_base = 1883 dm_segs->ds_addr; 1884 crq->req_dataseg[seglim].ds_count = 1885 dm_segs->ds_len; 1886 rq->req_seg_count++; 1887 dm_segs++; 1888 seglim++; 1889 datalen -= dm_segs->ds_len; 1890 } 1891 isp_put_cont_req(isp, crq, cqe); 1892 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1893 } 1894 *mp->nxtip = nxti; 1895 } 1896 #endif 1897 1898 static int 1899 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1900 u_int16_t *nxtip, u_int16_t optr) 1901 { 1902 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1903 ispreq_t *qep; 1904 bus_dmamap_t *dp = NULL; 1905 mush_t mush, *mp; 1906 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1907 1908 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1909 #ifdef ISP_TARGET_MODE 1910 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1911 if (IS_FC(isp)) { 1912 eptr = tdma_mkfc; 1913 } else { 1914 eptr = tdma_mk; 1915 } 1916 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1917 (csio->dxfer_len == 0)) { 1918 mp = &mush; 1919 mp->isp = isp; 1920 mp->cmd_token = csio; 1921 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1922 mp->nxtip = nxtip; 1923 mp->optr = optr; 1924 mp->error = 0; 1925 (*eptr)(mp, NULL, 0, 0); 1926 goto mbxsync; 1927 } 1928 } else 1929 #endif 1930 eptr = dma2; 1931 1932 1933 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1934 (csio->dxfer_len == 0)) { 1935 rq->req_seg_count = 1; 1936 goto mbxsync; 1937 } 1938 1939 /* 1940 * Do a virtual grapevine step to collect info for 1941 * the callback dma allocation that we have to use... 1942 */ 1943 mp = &mush; 1944 mp->isp = isp; 1945 mp->cmd_token = csio; 1946 mp->rq = rq; 1947 mp->nxtip = nxtip; 1948 mp->optr = optr; 1949 mp->error = 0; 1950 1951 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1952 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1953 int error, s; 1954 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1955 s = splsoftvm(); 1956 error = bus_dmamap_load(pcs->dmat, *dp, 1957 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1958 if (error == EINPROGRESS) { 1959 bus_dmamap_unload(pcs->dmat, *dp); 1960 mp->error = EINVAL; 1961 isp_prt(isp, ISP_LOGERR, 1962 "deferred dma allocation not supported"); 1963 } else if (error && mp->error == 0) { 1964 #ifdef DIAGNOSTIC 1965 isp_prt(isp, ISP_LOGERR, 1966 "error %d in dma mapping code", error); 1967 #endif 1968 mp->error = error; 1969 } 1970 splx(s); 1971 } else { 1972 /* Pointer to physical buffer */ 1973 struct bus_dma_segment seg; 1974 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 1975 seg.ds_len = csio->dxfer_len; 1976 (*eptr)(mp, &seg, 1, 0); 1977 } 1978 } else { 1979 struct bus_dma_segment *segs; 1980 1981 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1982 isp_prt(isp, ISP_LOGERR, 1983 "Physical segment pointers unsupported"); 1984 mp->error = EINVAL; 1985 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1986 isp_prt(isp, ISP_LOGERR, 1987 "Virtual segment addresses unsupported"); 1988 mp->error = EINVAL; 1989 } else { 1990 /* Just use the segments provided */ 1991 segs = (struct bus_dma_segment *) csio->data_ptr; 1992 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1993 } 1994 } 1995 if (mp->error) { 1996 int retval = CMD_COMPLETE; 1997 if (mp->error == MUSHERR_NOQENTRIES) { 1998 retval = CMD_EAGAIN; 1999 } else if (mp->error == EFBIG) { 2000 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2001 } else if (mp->error == EINVAL) { 2002 XS_SETERR(csio, CAM_REQ_INVALID); 2003 } else { 2004 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2005 } 2006 return (retval); 2007 } 2008 mbxsync: 2009 switch (rq->req_header.rqs_entry_type) { 2010 case RQSTYPE_REQUEST: 2011 isp_put_request(isp, rq, qep); 2012 break; 2013 case RQSTYPE_CMDONLY: 2014 isp_put_extended_request(isp, (ispextreq_t *)rq, 2015 (ispextreq_t *)qep); 2016 break; 2017 case RQSTYPE_T2RQS: 2018 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2019 break; 2020 case RQSTYPE_A64: 2021 case RQSTYPE_T3RQS: 2022 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2023 break; 2024 } 2025 return (CMD_QUEUED); 2026 } 2027 2028 static void 2029 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 2030 { 2031 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2032 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2033 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2034 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2035 } else { 2036 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2037 } 2038 bus_dmamap_unload(pcs->dmat, *dp); 2039 } 2040 2041 2042 static void 2043 isp_pci_reset1(struct ispsoftc *isp) 2044 { 2045 /* Make sure the BIOS is disabled */ 2046 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2047 /* and enable interrupts */ 2048 ENABLE_INTS(isp); 2049 } 2050 2051 static void 2052 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 2053 { 2054 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2055 if (msg) 2056 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2057 else 2058 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2059 if (IS_SCSI(isp)) 2060 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2061 else 2062 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2063 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2064 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2065 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2066 2067 2068 if (IS_SCSI(isp)) { 2069 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2070 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2071 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2072 ISP_READ(isp, CDMA_FIFO_STS)); 2073 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2074 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2075 ISP_READ(isp, DDMA_FIFO_STS)); 2076 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2077 ISP_READ(isp, SXP_INTERRUPT), 2078 ISP_READ(isp, SXP_GROSS_ERR), 2079 ISP_READ(isp, SXP_PINS_CTRL)); 2080 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2081 } 2082 printf(" mbox regs: %x %x %x %x %x\n", 2083 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2084 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2085 ISP_READ(isp, OUTMAILBOX4)); 2086 printf(" PCI Status Command/Status=%x\n", 2087 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2088 } 2089