1 /*- 2 * Copyright (c) 1997-2006 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #if __FreeBSD_version >= 700000 38 #include <sys/linker.h> 39 #include <sys/firmware.h> 40 #endif 41 #include <sys/bus.h> 42 #if __FreeBSD_version < 500000 43 #include <pci/pcireg.h> 44 #include <pci/pcivar.h> 45 #include <machine/bus_memio.h> 46 #include <machine/bus_pio.h> 47 #else 48 #include <sys/stdint.h> 49 #include <dev/pci/pcireg.h> 50 #include <dev/pci/pcivar.h> 51 #endif 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 #include <sys/rman.h> 55 #include <sys/malloc.h> 56 57 #include <dev/isp/isp_freebsd.h> 58 59 #if __FreeBSD_version < 500000 60 #define BUS_PROBE_DEFAULT 0 61 #endif 62 63 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 64 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 65 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 66 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 67 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 68 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 69 static int 70 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 71 static int 72 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 73 static int 74 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 75 static int isp_pci_mbxdma(ispsoftc_t *); 76 static int 77 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); 78 static void 79 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t); 80 81 82 static void isp_pci_reset0(ispsoftc_t *); 83 static void isp_pci_reset1(ispsoftc_t *); 84 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 85 86 static struct ispmdvec mdvec = { 87 isp_pci_rd_isr, 88 isp_pci_rd_reg, 89 isp_pci_wr_reg, 90 isp_pci_mbxdma, 91 isp_pci_dmasetup, 92 isp_pci_dmateardown, 93 isp_pci_reset0, 94 isp_pci_reset1, 95 isp_pci_dumpregs, 96 NULL, 97 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 98 }; 99 100 static struct ispmdvec mdvec_1080 = { 101 isp_pci_rd_isr, 102 isp_pci_rd_reg_1080, 103 isp_pci_wr_reg_1080, 104 isp_pci_mbxdma, 105 isp_pci_dmasetup, 106 isp_pci_dmateardown, 107 isp_pci_reset0, 108 isp_pci_reset1, 109 isp_pci_dumpregs, 110 NULL, 111 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 112 }; 113 114 static struct ispmdvec mdvec_12160 = { 115 isp_pci_rd_isr, 116 isp_pci_rd_reg_1080, 117 isp_pci_wr_reg_1080, 118 isp_pci_mbxdma, 119 isp_pci_dmasetup, 120 isp_pci_dmateardown, 121 isp_pci_reset0, 122 isp_pci_reset1, 123 isp_pci_dumpregs, 124 NULL, 125 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 126 }; 127 128 static struct ispmdvec mdvec_2100 = { 129 isp_pci_rd_isr, 130 isp_pci_rd_reg, 131 isp_pci_wr_reg, 132 isp_pci_mbxdma, 133 isp_pci_dmasetup, 134 isp_pci_dmateardown, 135 isp_pci_reset0, 136 isp_pci_reset1, 137 isp_pci_dumpregs 138 }; 139 140 static struct ispmdvec mdvec_2200 = { 141 isp_pci_rd_isr, 142 isp_pci_rd_reg, 143 isp_pci_wr_reg, 144 isp_pci_mbxdma, 145 isp_pci_dmasetup, 146 isp_pci_dmateardown, 147 isp_pci_reset0, 148 isp_pci_reset1, 149 isp_pci_dumpregs 150 }; 151 152 static struct ispmdvec mdvec_2300 = { 153 isp_pci_rd_isr_2300, 154 isp_pci_rd_reg, 155 isp_pci_wr_reg, 156 isp_pci_mbxdma, 157 isp_pci_dmasetup, 158 isp_pci_dmateardown, 159 isp_pci_reset0, 160 isp_pci_reset1, 161 isp_pci_dumpregs 162 }; 163 164 static struct ispmdvec mdvec_2400 = { 165 isp_pci_rd_isr_2400, 166 isp_pci_rd_reg_2400, 167 isp_pci_wr_reg_2400, 168 isp_pci_mbxdma, 169 isp_pci_dmasetup, 170 isp_pci_dmateardown, 171 isp_pci_reset0, 172 isp_pci_reset1, 173 NULL 174 }; 175 176 #ifndef PCIM_CMD_INVEN 177 #define PCIM_CMD_INVEN 0x10 178 #endif 179 #ifndef PCIM_CMD_BUSMASTEREN 180 #define PCIM_CMD_BUSMASTEREN 0x0004 181 #endif 182 #ifndef PCIM_CMD_PERRESPEN 183 #define PCIM_CMD_PERRESPEN 0x0040 184 #endif 185 #ifndef PCIM_CMD_SEREN 186 #define PCIM_CMD_SEREN 0x0100 187 #endif 188 #ifndef PCIM_CMD_INTX_DISABLE 189 #define PCIM_CMD_INTX_DISABLE 0x0400 190 #endif 191 192 #ifndef PCIR_COMMAND 193 #define PCIR_COMMAND 0x04 194 #endif 195 196 #ifndef PCIR_CACHELNSZ 197 #define PCIR_CACHELNSZ 0x0c 198 #endif 199 200 #ifndef PCIR_LATTIMER 201 #define PCIR_LATTIMER 0x0d 202 #endif 203 204 #ifndef PCIR_ROMADDR 205 #define PCIR_ROMADDR 0x30 206 #endif 207 208 #ifndef PCI_VENDOR_QLOGIC 209 #define PCI_VENDOR_QLOGIC 0x1077 210 #endif 211 212 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 213 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 214 #endif 215 216 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 217 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 218 #endif 219 220 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 221 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 222 #endif 223 224 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 225 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 226 #endif 227 228 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 229 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 230 #endif 231 232 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 233 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 234 #endif 235 236 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 237 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 238 #endif 239 240 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 241 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 242 #endif 243 244 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 245 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 246 #endif 247 248 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 249 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 250 #endif 251 252 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 253 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 254 #endif 255 256 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 257 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 258 #endif 259 260 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 261 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 262 #endif 263 264 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 265 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 266 #endif 267 268 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 269 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 270 #endif 271 272 273 #define PCI_QLOGIC_ISP1020 \ 274 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 275 276 #define PCI_QLOGIC_ISP1080 \ 277 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 278 279 #define PCI_QLOGIC_ISP10160 \ 280 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 281 282 #define PCI_QLOGIC_ISP12160 \ 283 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 284 285 #define PCI_QLOGIC_ISP1240 \ 286 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 287 288 #define PCI_QLOGIC_ISP1280 \ 289 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 290 291 #define PCI_QLOGIC_ISP2100 \ 292 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 293 294 #define PCI_QLOGIC_ISP2200 \ 295 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 296 297 #define PCI_QLOGIC_ISP2300 \ 298 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 299 300 #define PCI_QLOGIC_ISP2312 \ 301 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 302 303 #define PCI_QLOGIC_ISP2322 \ 304 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 305 306 #define PCI_QLOGIC_ISP2422 \ 307 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 308 309 #define PCI_QLOGIC_ISP2432 \ 310 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 311 312 #define PCI_QLOGIC_ISP6312 \ 313 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 314 315 #define PCI_QLOGIC_ISP6322 \ 316 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 317 318 /* 319 * Odd case for some AMI raid cards... We need to *not* attach to this. 320 */ 321 #define AMI_RAID_SUBVENDOR_ID 0x101e 322 323 #define IO_MAP_REG 0x10 324 #define MEM_MAP_REG 0x14 325 326 #define PCI_DFLT_LTNCY 0x40 327 #define PCI_DFLT_LNSZ 0x10 328 329 static int isp_pci_probe (device_t); 330 static int isp_pci_attach (device_t); 331 static int isp_pci_detach (device_t); 332 333 334 struct isp_pcisoftc { 335 ispsoftc_t pci_isp; 336 device_t pci_dev; 337 struct resource * pci_reg; 338 void * ih; 339 int16_t pci_poff[_NREG_BLKS]; 340 bus_dma_tag_t dmat; 341 bus_dmamap_t *dmaps; 342 }; 343 344 345 static device_method_t isp_pci_methods[] = { 346 /* Device interface */ 347 DEVMETHOD(device_probe, isp_pci_probe), 348 DEVMETHOD(device_attach, isp_pci_attach), 349 DEVMETHOD(device_detach, isp_pci_detach), 350 { 0, 0 } 351 }; 352 static void isp_pci_intr(void *); 353 354 static driver_t isp_pci_driver = { 355 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 356 }; 357 static devclass_t isp_devclass; 358 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 359 #if __FreeBSD_version < 700000 360 extern ispfwfunc *isp_get_firmware_p; 361 #endif 362 363 static int 364 isp_pci_probe(device_t dev) 365 { 366 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 367 case PCI_QLOGIC_ISP1020: 368 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 369 break; 370 case PCI_QLOGIC_ISP1080: 371 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 372 break; 373 case PCI_QLOGIC_ISP1240: 374 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 375 break; 376 case PCI_QLOGIC_ISP1280: 377 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 378 break; 379 case PCI_QLOGIC_ISP10160: 380 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 381 break; 382 case PCI_QLOGIC_ISP12160: 383 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 384 return (ENXIO); 385 } 386 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 387 break; 388 case PCI_QLOGIC_ISP2100: 389 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 390 break; 391 case PCI_QLOGIC_ISP2200: 392 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 393 break; 394 case PCI_QLOGIC_ISP2300: 395 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 396 break; 397 case PCI_QLOGIC_ISP2312: 398 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 399 break; 400 case PCI_QLOGIC_ISP2322: 401 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 402 break; 403 case PCI_QLOGIC_ISP2422: 404 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 405 break; 406 case PCI_QLOGIC_ISP2432: 407 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 408 break; 409 case PCI_QLOGIC_ISP6312: 410 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 411 break; 412 case PCI_QLOGIC_ISP6322: 413 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 414 break; 415 default: 416 return (ENXIO); 417 } 418 if (isp_announced == 0 && bootverbose) { 419 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 420 "Core Version %d.%d\n", 421 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 422 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 423 isp_announced++; 424 } 425 /* 426 * XXXX: Here is where we might load the f/w module 427 * XXXX: (or increase a reference count to it). 428 */ 429 return (BUS_PROBE_DEFAULT); 430 } 431 432 #if __FreeBSD_version < 500000 433 static void 434 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 435 { 436 int bitmap, unit; 437 438 unit = device_get_unit(dev); 439 if (getenv_int("isp_disable", &bitmap)) { 440 if (bitmap & (1 << unit)) { 441 isp->isp_osinfo.disabled = 1; 442 return; 443 } 444 } 445 if (getenv_int("isp_no_fwload", &bitmap)) { 446 if (bitmap & (1 << unit)) 447 isp->isp_confopts |= ISP_CFG_NORELOAD; 448 } 449 if (getenv_int("isp_fwload", &bitmap)) { 450 if (bitmap & (1 << unit)) 451 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 452 } 453 if (getenv_int("isp_no_nvram", &bitmap)) { 454 if (bitmap & (1 << unit)) 455 isp->isp_confopts |= ISP_CFG_NONVRAM; 456 } 457 if (getenv_int("isp_nvram", &bitmap)) { 458 if (bitmap & (1 << unit)) 459 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 460 } 461 462 bitmap = 0; 463 (void) getenv_int("isp_debug", &bitmap); 464 if (bitmap) { 465 isp->isp_dblev = bitmap; 466 } else { 467 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 468 } 469 if (bootverbose) { 470 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 471 } 472 473 bitmap = 0; 474 if (getenv_int("role", &bitmap)) { 475 isp->isp_role = bitmap; 476 } else { 477 isp->isp_role = ISP_DEFAULT_ROLES; 478 } 479 480 } 481 482 static void 483 isp_get_pci_options(device_t dev, int *m1, int *m2) 484 { 485 int bitmap; 486 int unit = device_get_unit(dev); 487 488 *m1 = PCIM_CMD_MEMEN; 489 *m2 = PCIM_CMD_PORTEN; 490 if (getenv_int("isp_mem_map", &bitmap)) { 491 if (bitmap & (1 << unit)) { 492 *m1 = PCIM_CMD_MEMEN; 493 *m2 = PCIM_CMD_PORTEN; 494 } 495 } 496 bitmap = 0; 497 if (getenv_int("isp_io_map", &bitmap)) { 498 if (bitmap & (1 << unit)) { 499 *m1 = PCIM_CMD_PORTEN; 500 *m2 = PCIM_CMD_MEMEN; 501 } 502 } 503 } 504 505 static void 506 isp_get_specific_options(device_t dev, ispsoftc_t *isp) 507 { 508 uint64_t wwn; 509 int bitmap; 510 int unit = device_get_unit(dev); 511 512 callout_handle_init(&isp->isp_osinfo.ldt); 513 callout_handle_init(&isp->isp_osinfo.gdt); 514 515 if (IS_SCSI(isp)) { 516 return; 517 } 518 519 if (getenv_int("isp_fcduplex", &bitmap)) { 520 if (bitmap & (1 << unit)) 521 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 522 } 523 if (getenv_int("isp_no_fcduplex", &bitmap)) { 524 if (bitmap & (1 << unit)) 525 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 526 } 527 if (getenv_int("isp_nport", &bitmap)) { 528 if (bitmap & (1 << unit)) 529 isp->isp_confopts |= ISP_CFG_NPORT; 530 } 531 532 /* 533 * Because the resource_*_value functions can neither return 534 * 64 bit integer values, nor can they be directly coerced 535 * to interpret the right hand side of the assignment as 536 * you want them to interpret it, we have to force WWN 537 * hint replacement to specify WWN strings with a leading 538 * 'w' (e..g w50000000aaaa0001). Sigh. 539 */ 540 if (getenv_quad("isp_portwwn", &wwn)) { 541 isp->isp_osinfo.default_port_wwn = wwn; 542 isp->isp_confopts |= ISP_CFG_OWNWWPN; 543 } 544 if (isp->isp_osinfo.default_port_wwn == 0) { 545 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 546 } 547 548 if (getenv_quad("isp_nodewwn", &wwn)) { 549 isp->isp_osinfo.default_node_wwn = wwn; 550 isp->isp_confopts |= ISP_CFG_OWNWWNN; 551 } 552 if (isp->isp_osinfo.default_node_wwn == 0) { 553 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 554 } 555 556 bitmap = 0; 557 (void) getenv_int("isp_fabric_hysteresis", &bitmap); 558 if (bitmap >= 0 && bitmap < 256) { 559 isp->isp_osinfo.hysteresis = bitmap; 560 } else { 561 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 562 } 563 564 bitmap = 0; 565 (void) getenv_int("isp_loop_down_limit", &bitmap); 566 if (bitmap >= 0 && bitmap < 0xffff) { 567 isp->isp_osinfo.loop_down_limit = bitmap; 568 } else { 569 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 570 } 571 572 bitmap = 0; 573 (void) getenv_int("isp_gone_device_time", &bitmap); 574 if (bitmap >= 0 && bitmap < 0xffff) { 575 isp->isp_osinfo.gone_device_time = bitmap; 576 } else { 577 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 578 } 579 #ifdef ISP_FW_CRASH_DUMP 580 bitmap = 0; 581 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 582 if (bitmap & (1 << unit) { 583 size_t amt = 0; 584 if (IS_2200(isp)) { 585 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 586 } else if (IS_23XX(isp)) { 587 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 588 } 589 if (amt) { 590 FCPARAM(isp)->isp_dump_data = 591 malloc(amt, M_DEVBUF, M_WAITOK); 592 memset(FCPARAM(isp)->isp_dump_data, 0, amt); 593 } else { 594 device_printf(dev, 595 "f/w crash dumps not supported for card\n"); 596 } 597 } 598 } 599 #endif 600 } 601 #else 602 static void 603 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 604 { 605 int tval; 606 607 /* 608 * Figure out if we're supposed to skip this one. 609 */ 610 tval = 0; 611 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 612 "disable", &tval) == 0 && tval) { 613 device_printf(dev, "disabled at user request\n"); 614 isp->isp_osinfo.disabled = 1; 615 return; 616 } 617 618 tval = -1; 619 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 620 "role", &tval) == 0 && tval != -1) { 621 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 622 isp->isp_role = tval; 623 device_printf(dev, "setting role to 0x%x\n", isp->isp_role); 624 } else { 625 #ifdef ISP_TARGET_MODE 626 isp->isp_role = ISP_ROLE_TARGET; 627 #else 628 isp->isp_role = ISP_DEFAULT_ROLES; 629 #endif 630 } 631 632 tval = 0; 633 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 634 "fwload_disable", &tval) == 0 && tval != 0) { 635 isp->isp_confopts |= ISP_CFG_NORELOAD; 636 } 637 tval = 0; 638 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 639 "ignore_nvram", &tval) == 0 && tval != 0) { 640 isp->isp_confopts |= ISP_CFG_NONVRAM; 641 } 642 643 tval = 0; 644 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 645 "debug", &tval); 646 if (tval) { 647 isp->isp_dblev = tval; 648 } else { 649 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 650 } 651 if (bootverbose) { 652 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 653 } 654 655 } 656 657 static void 658 isp_get_pci_options(device_t dev, int *m1, int *m2) 659 { 660 int tval; 661 /* 662 * Which we should try first - memory mapping or i/o mapping? 663 * 664 * We used to try memory first followed by i/o on alpha, otherwise 665 * the reverse, but we should just try memory first all the time now. 666 */ 667 *m1 = PCIM_CMD_MEMEN; 668 *m2 = PCIM_CMD_PORTEN; 669 670 tval = 0; 671 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 672 "prefer_iomap", &tval) == 0 && tval != 0) { 673 *m1 = PCIM_CMD_PORTEN; 674 *m2 = PCIM_CMD_MEMEN; 675 } 676 tval = 0; 677 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 678 "prefer_memmap", &tval) == 0 && tval != 0) { 679 *m1 = PCIM_CMD_MEMEN; 680 *m2 = PCIM_CMD_PORTEN; 681 } 682 } 683 684 static void 685 isp_get_specific_options(device_t dev, ispsoftc_t *isp) 686 { 687 const char *sptr; 688 int tval; 689 690 isp->isp_osinfo.default_id = -1; 691 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 692 "iid", &tval) == 0) { 693 isp->isp_osinfo.default_id = tval; 694 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 695 } 696 if (isp->isp_osinfo.default_id == -1) { 697 if (IS_FC(isp)) { 698 isp->isp_osinfo.default_id = 109; 699 } else { 700 isp->isp_osinfo.default_id = 7; 701 } 702 } 703 704 callout_handle_init(&isp->isp_osinfo.ldt); 705 callout_handle_init(&isp->isp_osinfo.gdt); 706 707 if (IS_SCSI(isp)) { 708 return; 709 } 710 711 tval = 0; 712 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 713 "fullduplex", &tval) == 0 && tval != 0) { 714 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 715 } 716 #ifdef ISP_FW_CRASH_DUMP 717 tval = 0; 718 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 719 "fw_dump_enable", &tval) == 0 && tval != 0) { 720 size_t amt = 0; 721 if (IS_2200(isp)) { 722 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 723 } else if (IS_23XX(isp)) { 724 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 725 } 726 if (amt) { 727 FCPARAM(isp)->isp_dump_data = 728 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 729 } else { 730 device_printf(dev, 731 "f/w crash dumps not supported for this model\n"); 732 } 733 } 734 #endif 735 sptr = 0; 736 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 737 "topology", (const char **) &sptr) == 0 && sptr != 0) { 738 if (strcmp(sptr, "lport") == 0) { 739 isp->isp_confopts |= ISP_CFG_LPORT; 740 } else if (strcmp(sptr, "nport") == 0) { 741 isp->isp_confopts |= ISP_CFG_NPORT; 742 } else if (strcmp(sptr, "lport-only") == 0) { 743 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 744 } else if (strcmp(sptr, "nport-only") == 0) { 745 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 746 } 747 } 748 749 /* 750 * Because the resource_*_value functions can neither return 751 * 64 bit integer values, nor can they be directly coerced 752 * to interpret the right hand side of the assignment as 753 * you want them to interpret it, we have to force WWN 754 * hint replacement to specify WWN strings with a leading 755 * 'w' (e..g w50000000aaaa0001). Sigh. 756 */ 757 sptr = 0; 758 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 759 "portwwn", (const char **) &sptr); 760 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 761 char *eptr = 0; 762 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 763 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 764 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 765 isp->isp_osinfo.default_port_wwn = 0; 766 } else { 767 isp->isp_confopts |= ISP_CFG_OWNWWPN; 768 } 769 } 770 if (isp->isp_osinfo.default_port_wwn == 0) { 771 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 772 } 773 774 sptr = 0; 775 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 776 "nodewwn", (const char **) &sptr); 777 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 778 char *eptr = 0; 779 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 780 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 781 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 782 isp->isp_osinfo.default_node_wwn = 0; 783 } else { 784 isp->isp_confopts |= ISP_CFG_OWNWWNN; 785 } 786 } 787 if (isp->isp_osinfo.default_node_wwn == 0) { 788 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 789 } 790 791 792 tval = 0; 793 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 794 "hysteresis", &tval); 795 if (tval >= 0 && tval < 256) { 796 isp->isp_osinfo.hysteresis = tval; 797 } else { 798 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 799 } 800 801 tval = -1; 802 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 803 "loop_down_limit", &tval); 804 if (tval >= 0 && tval < 0xffff) { 805 isp->isp_osinfo.loop_down_limit = tval; 806 } else { 807 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 808 } 809 810 tval = -1; 811 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 812 "gone_device_time", &tval); 813 if (tval >= 0 && tval < 0xffff) { 814 isp->isp_osinfo.gone_device_time = tval; 815 } else { 816 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 817 } 818 } 819 #endif 820 821 static int 822 isp_pci_attach(device_t dev) 823 { 824 struct resource *regs, *irq; 825 int rtp, rgd, iqd, m1, m2; 826 uint32_t data, cmd, linesz, psize, basetype; 827 struct isp_pcisoftc *pcs; 828 ispsoftc_t *isp = NULL; 829 struct ispmdvec *mdvp; 830 #if __FreeBSD_version >= 500000 831 int locksetup = 0; 832 #endif 833 834 pcs = device_get_softc(dev); 835 if (pcs == NULL) { 836 device_printf(dev, "cannot get softc\n"); 837 return (ENOMEM); 838 } 839 memset(pcs, 0, sizeof (*pcs)); 840 pcs->pci_dev = dev; 841 isp = &pcs->pci_isp; 842 843 /* 844 * Get Generic Options 845 */ 846 isp_get_generic_options(dev, isp); 847 848 /* 849 * Check to see if options have us disabled 850 */ 851 if (isp->isp_osinfo.disabled) { 852 /* 853 * But return zero to preserve unit numbering 854 */ 855 return (0); 856 } 857 858 /* 859 * Get PCI options- which in this case are just mapping preferences. 860 */ 861 isp_get_pci_options(dev, &m1, &m2); 862 863 linesz = PCI_DFLT_LNSZ; 864 irq = regs = NULL; 865 rgd = rtp = iqd = 0; 866 867 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 868 if (cmd & m1) { 869 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 870 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 871 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 872 } 873 if (regs == NULL && (cmd & m2)) { 874 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 875 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 876 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 877 } 878 if (regs == NULL) { 879 device_printf(dev, "unable to map any ports\n"); 880 goto bad; 881 } 882 if (bootverbose) { 883 device_printf(dev, "using %s space register mapping\n", 884 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 885 } 886 pcs->pci_dev = dev; 887 pcs->pci_reg = regs; 888 isp->isp_bus_tag = rman_get_bustag(regs); 889 isp->isp_bus_handle = rman_get_bushandle(regs); 890 891 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 892 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 893 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 894 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 895 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 896 mdvp = &mdvec; 897 basetype = ISP_HA_SCSI_UNKNOWN; 898 psize = sizeof (sdparam); 899 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 900 mdvp = &mdvec; 901 basetype = ISP_HA_SCSI_UNKNOWN; 902 psize = sizeof (sdparam); 903 } 904 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 905 mdvp = &mdvec_1080; 906 basetype = ISP_HA_SCSI_1080; 907 psize = sizeof (sdparam); 908 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 909 ISP1080_DMA_REGS_OFF; 910 } 911 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 912 mdvp = &mdvec_1080; 913 basetype = ISP_HA_SCSI_1240; 914 psize = 2 * sizeof (sdparam); 915 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 916 ISP1080_DMA_REGS_OFF; 917 } 918 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 919 mdvp = &mdvec_1080; 920 basetype = ISP_HA_SCSI_1280; 921 psize = 2 * sizeof (sdparam); 922 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 923 ISP1080_DMA_REGS_OFF; 924 } 925 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 926 mdvp = &mdvec_12160; 927 basetype = ISP_HA_SCSI_10160; 928 psize = sizeof (sdparam); 929 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 930 ISP1080_DMA_REGS_OFF; 931 } 932 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 933 mdvp = &mdvec_12160; 934 basetype = ISP_HA_SCSI_12160; 935 psize = 2 * sizeof (sdparam); 936 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 937 ISP1080_DMA_REGS_OFF; 938 } 939 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 940 mdvp = &mdvec_2100; 941 basetype = ISP_HA_FC_2100; 942 psize = sizeof (fcparam); 943 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 944 PCI_MBOX_REGS2100_OFF; 945 if (pci_get_revid(dev) < 3) { 946 /* 947 * XXX: Need to get the actual revision 948 * XXX: number of the 2100 FB. At any rate, 949 * XXX: lower cache line size for early revision 950 * XXX; boards. 951 */ 952 linesz = 1; 953 } 954 } 955 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 956 mdvp = &mdvec_2200; 957 basetype = ISP_HA_FC_2200; 958 psize = sizeof (fcparam); 959 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 960 PCI_MBOX_REGS2100_OFF; 961 } 962 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 963 mdvp = &mdvec_2300; 964 basetype = ISP_HA_FC_2300; 965 psize = sizeof (fcparam); 966 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 967 PCI_MBOX_REGS2300_OFF; 968 } 969 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 970 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 971 mdvp = &mdvec_2300; 972 basetype = ISP_HA_FC_2312; 973 psize = sizeof (fcparam); 974 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 975 PCI_MBOX_REGS2300_OFF; 976 } 977 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || 978 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { 979 mdvp = &mdvec_2300; 980 basetype = ISP_HA_FC_2322; 981 psize = sizeof (fcparam); 982 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 983 PCI_MBOX_REGS2300_OFF; 984 } 985 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422 || 986 pci_get_devid(dev) == PCI_QLOGIC_ISP2432) { 987 mdvp = &mdvec_2400; 988 basetype = ISP_HA_FC_2400; 989 psize = sizeof (fcparam); 990 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 991 PCI_MBOX_REGS2400_OFF; 992 } 993 isp = &pcs->pci_isp; 994 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 995 if (isp->isp_param == NULL) { 996 device_printf(dev, "cannot allocate parameter data\n"); 997 goto bad; 998 } 999 isp->isp_mdvec = mdvp; 1000 isp->isp_type = basetype; 1001 isp->isp_revision = pci_get_revid(dev); 1002 isp->isp_dev = dev; 1003 1004 /* 1005 * Now that we know who we are (roughly) get/set specific options 1006 */ 1007 isp_get_specific_options(dev, isp); 1008 1009 #if __FreeBSD_version >= 700000 1010 /* 1011 * Try and find firmware for this device. 1012 */ 1013 { 1014 char fwname[32]; 1015 unsigned int did = pci_get_device(dev); 1016 1017 /* 1018 * Map a few pci ids to fw names 1019 */ 1020 switch (did) { 1021 case PCI_PRODUCT_QLOGIC_ISP1020: 1022 did = 0x1040; 1023 break; 1024 case PCI_PRODUCT_QLOGIC_ISP1240: 1025 did = 0x1080; 1026 break; 1027 case PCI_PRODUCT_QLOGIC_ISP10160: 1028 case PCI_PRODUCT_QLOGIC_ISP12160: 1029 did = 0x12160; 1030 break; 1031 case PCI_PRODUCT_QLOGIC_ISP6312: 1032 case PCI_PRODUCT_QLOGIC_ISP2312: 1033 did = 0x2300; 1034 break; 1035 case PCI_PRODUCT_QLOGIC_ISP6322: 1036 did = 0x2322; 1037 break; 1038 case PCI_PRODUCT_QLOGIC_ISP2422: 1039 case PCI_PRODUCT_QLOGIC_ISP2432: 1040 did = 0x2400; 1041 break; 1042 default: 1043 break; 1044 } 1045 1046 isp->isp_osinfo.fw = NULL; 1047 if (isp->isp_role & ISP_ROLE_TARGET) { 1048 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 1049 isp->isp_osinfo.fw = firmware_get(fwname); 1050 } 1051 if (isp->isp_osinfo.fw == NULL) { 1052 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 1053 isp->isp_osinfo.fw = firmware_get(fwname); 1054 } 1055 if (isp->isp_osinfo.fw != NULL) { 1056 union { 1057 const void *fred; 1058 uint16_t *bob; 1059 } u; 1060 u.fred = isp->isp_osinfo.fw->data; 1061 isp->isp_mdvec->dv_ispfw = u.bob; 1062 } 1063 } 1064 #else 1065 if (isp_get_firmware_p) { 1066 int device = (int) pci_get_device(dev); 1067 #ifdef ISP_TARGET_MODE 1068 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 1069 #else 1070 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 1071 #endif 1072 } 1073 #endif 1074 1075 /* 1076 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 1077 * are set. 1078 */ 1079 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 1080 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 1081 1082 if (IS_2300(isp)) { /* per QLogic errata */ 1083 cmd &= ~PCIM_CMD_INVEN; 1084 } 1085 1086 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 1087 cmd &= ~PCIM_CMD_INTX_DISABLE; 1088 } 1089 1090 #ifdef WE_KNEW_WHAT_WE_WERE_DOING 1091 if (IS_24XX(isp)) { 1092 int reg; 1093 1094 cmd &= ~PCIM_CMD_INTX_DISABLE; 1095 1096 /* 1097 * Is this a PCI-X card? If so, set max read byte count. 1098 */ 1099 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 1100 uint16_t pxcmd; 1101 reg += 2; 1102 1103 pxcmd = pci_read_config(dev, reg, 2); 1104 pxcmd &= ~0xc; 1105 pxcmd |= 0x8; 1106 pci_write_config(dev, reg, 2, pxcmd); 1107 } 1108 1109 /* 1110 * Is this a PCI Express card? If so, set max read byte count. 1111 */ 1112 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1113 uint16_t pectl; 1114 1115 reg += 0x8; 1116 pectl = pci_read_config(dev, reg, 2); 1117 pectl &= ~0x7000; 1118 pectl |= 0x4000; 1119 pci_write_config(dev, reg, 2, pectl); 1120 } 1121 } 1122 #else 1123 if (IS_24XX(isp)) { 1124 cmd &= ~PCIM_CMD_INTX_DISABLE; 1125 } 1126 #endif 1127 1128 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 1129 1130 /* 1131 * Make sure the Cache Line Size register is set sensibly. 1132 */ 1133 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 1134 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 1135 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d from %d", 1136 linesz, data); 1137 data = linesz; 1138 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 1139 } 1140 1141 /* 1142 * Make sure the Latency Timer is sane. 1143 */ 1144 data = pci_read_config(dev, PCIR_LATTIMER, 1); 1145 if (data < PCI_DFLT_LTNCY) { 1146 data = PCI_DFLT_LTNCY; 1147 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 1148 pci_write_config(dev, PCIR_LATTIMER, data, 1); 1149 } 1150 1151 /* 1152 * Make sure we've disabled the ROM. 1153 */ 1154 data = pci_read_config(dev, PCIR_ROMADDR, 4); 1155 data &= ~1; 1156 pci_write_config(dev, PCIR_ROMADDR, data, 4); 1157 1158 iqd = 0; 1159 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 1160 RF_ACTIVE | RF_SHAREABLE); 1161 if (irq == NULL) { 1162 device_printf(dev, "could not allocate interrupt\n"); 1163 goto bad; 1164 } 1165 1166 #if __FreeBSD_version >= 500000 1167 /* Make sure the lock is set up. */ 1168 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 1169 locksetup++; 1170 #endif 1171 1172 if (isp_setup_intr(dev, irq, ISP_IFLAGS, NULL, isp_pci_intr, isp, 1173 &pcs->ih)) { 1174 device_printf(dev, "could not setup interrupt\n"); 1175 goto bad; 1176 } 1177 1178 /* 1179 * Last minute checks... 1180 */ 1181 if (IS_23XX(isp) || IS_24XX(isp)) { 1182 isp->isp_port = pci_get_function(dev); 1183 } 1184 1185 if (IS_23XX(isp)) { 1186 /* 1187 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 1188 */ 1189 isp->isp_touched = 1; 1190 } 1191 1192 /* 1193 * Make sure we're in reset state. 1194 */ 1195 ISP_LOCK(isp); 1196 isp_reset(isp); 1197 if (isp->isp_state != ISP_RESETSTATE) { 1198 ISP_UNLOCK(isp); 1199 goto bad; 1200 } 1201 isp_init(isp); 1202 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 1203 isp_uninit(isp); 1204 ISP_UNLOCK(isp); 1205 goto bad; 1206 } 1207 isp_attach(isp); 1208 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 1209 isp_uninit(isp); 1210 ISP_UNLOCK(isp); 1211 goto bad; 1212 } 1213 /* 1214 * XXXX: Here is where we might unload the f/w module 1215 * XXXX: (or decrease the reference count to it). 1216 */ 1217 ISP_UNLOCK(isp); 1218 1219 return (0); 1220 1221 bad: 1222 1223 if (pcs && pcs->ih) { 1224 (void) bus_teardown_intr(dev, irq, pcs->ih); 1225 } 1226 1227 #if __FreeBSD_version >= 500000 1228 if (locksetup && isp) { 1229 mtx_destroy(&isp->isp_osinfo.lock); 1230 } 1231 #endif 1232 1233 if (irq) { 1234 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 1235 } 1236 1237 1238 if (regs) { 1239 (void) bus_release_resource(dev, rtp, rgd, regs); 1240 } 1241 1242 if (pcs) { 1243 if (pcs->pci_isp.isp_param) { 1244 #ifdef ISP_FW_CRASH_DUMP 1245 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { 1246 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); 1247 } 1248 #endif 1249 free(pcs->pci_isp.isp_param, M_DEVBUF); 1250 } 1251 } 1252 1253 /* 1254 * XXXX: Here is where we might unload the f/w module 1255 * XXXX: (or decrease the reference count to it). 1256 */ 1257 return (ENXIO); 1258 } 1259 1260 static int 1261 isp_pci_detach(device_t dev) 1262 { 1263 struct isp_pcisoftc *pcs; 1264 ispsoftc_t *isp; 1265 1266 pcs = device_get_softc(dev); 1267 if (pcs == NULL) { 1268 return (ENXIO); 1269 } 1270 isp = (ispsoftc_t *) pcs; 1271 ISP_DISABLE_INTS(isp); 1272 return (0); 1273 } 1274 1275 static void 1276 isp_pci_intr(void *arg) 1277 { 1278 ispsoftc_t *isp = arg; 1279 uint32_t isr; 1280 uint16_t sema, mbox; 1281 1282 ISP_LOCK(isp); 1283 isp->isp_intcnt++; 1284 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 1285 isp->isp_intbogus++; 1286 } else { 1287 isp_intr(isp, isr, sema, mbox); 1288 } 1289 ISP_UNLOCK(isp); 1290 } 1291 1292 1293 #define IspVirt2Off(a, x) \ 1294 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1295 _BLK_REG_SHFT] + ((x) & 0xfff)) 1296 1297 #define BXR2(isp, off) \ 1298 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) 1299 #define BXW2(isp, off, v) \ 1300 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1301 #define BXR4(isp, off) \ 1302 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) 1303 #define BXW4(isp, off, v) \ 1304 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1305 1306 1307 static __inline int 1308 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1309 { 1310 uint32_t val0, val1; 1311 int i = 0; 1312 1313 do { 1314 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1315 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1316 } while (val0 != val1 && ++i < 1000); 1317 if (val0 != val1) { 1318 return (1); 1319 } 1320 *rp = val0; 1321 return (0); 1322 } 1323 1324 static int 1325 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, 1326 uint16_t *semap, uint16_t *mbp) 1327 { 1328 uint16_t isr, sema; 1329 1330 if (IS_2100(isp)) { 1331 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1332 return (0); 1333 } 1334 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1335 return (0); 1336 } 1337 } else { 1338 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1339 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1340 } 1341 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1342 isr &= INT_PENDING_MASK(isp); 1343 sema &= BIU_SEMA_LOCK; 1344 if (isr == 0 && sema == 0) { 1345 return (0); 1346 } 1347 *isrp = isr; 1348 if ((*semap = sema) != 0) { 1349 if (IS_2100(isp)) { 1350 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1351 return (0); 1352 } 1353 } else { 1354 *mbp = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1355 } 1356 } 1357 return (1); 1358 } 1359 1360 static int 1361 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, 1362 uint16_t *semap, uint16_t *mbox0p) 1363 { 1364 uint32_t hccr; 1365 uint32_t r2hisr; 1366 1367 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1368 *isrp = 0; 1369 return (0); 1370 } 1371 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1372 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1373 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1374 *isrp = 0; 1375 return (0); 1376 } 1377 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1378 case ISPR2HST_ROM_MBX_OK: 1379 case ISPR2HST_ROM_MBX_FAIL: 1380 case ISPR2HST_MBX_OK: 1381 case ISPR2HST_MBX_FAIL: 1382 case ISPR2HST_ASYNC_EVENT: 1383 *isrp = r2hisr & 0xffff; 1384 *mbox0p = (r2hisr >> 16); 1385 *semap = 1; 1386 return (1); 1387 case ISPR2HST_RIO_16: 1388 *isrp = r2hisr & 0xffff; 1389 *mbox0p = ASYNC_RIO1; 1390 *semap = 1; 1391 return (1); 1392 case ISPR2HST_FPOST: 1393 *isrp = r2hisr & 0xffff; 1394 *mbox0p = ASYNC_CMD_CMPLT; 1395 *semap = 1; 1396 return (1); 1397 case ISPR2HST_FPOST_CTIO: 1398 *isrp = r2hisr & 0xffff; 1399 *mbox0p = ASYNC_CTIO_DONE; 1400 *semap = 1; 1401 return (1); 1402 case ISPR2HST_RSPQ_UPDATE: 1403 *isrp = r2hisr & 0xffff; 1404 *mbox0p = 0; 1405 *semap = 0; 1406 return (1); 1407 default: 1408 hccr = ISP_READ(isp, HCCR); 1409 if (hccr & HCCR_PAUSE) { 1410 ISP_WRITE(isp, HCCR, HCCR_RESET); 1411 isp_prt(isp, ISP_LOGERR, 1412 "RISC paused at interrupt (%x->%x)", hccr, 1413 ISP_READ(isp, HCCR)); 1414 ISP_WRITE(isp, BIU_ICR, 0); 1415 } else { 1416 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", 1417 r2hisr); 1418 } 1419 return (0); 1420 } 1421 } 1422 1423 static int 1424 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, 1425 uint16_t *semap, uint16_t *mbox0p) 1426 { 1427 uint32_t r2hisr; 1428 1429 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1430 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1431 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1432 *isrp = 0; 1433 return (0); 1434 } 1435 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1436 case ISP2400R2HST_ROM_MBX_OK: 1437 case ISP2400R2HST_ROM_MBX_FAIL: 1438 case ISP2400R2HST_MBX_OK: 1439 case ISP2400R2HST_MBX_FAIL: 1440 case ISP2400R2HST_ASYNC_EVENT: 1441 *isrp = r2hisr & 0xffff; 1442 *mbox0p = (r2hisr >> 16); 1443 *semap = 1; 1444 return (1); 1445 case ISP2400R2HST_RSPQ_UPDATE: 1446 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1447 case ISP2400R2HST_ATIO_RQST_UPDATE: 1448 *isrp = r2hisr & 0xffff; 1449 *mbox0p = 0; 1450 *semap = 0; 1451 return (1); 1452 default: 1453 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1454 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1455 return (0); 1456 } 1457 } 1458 1459 static uint32_t 1460 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1461 { 1462 uint16_t rv; 1463 int oldconf = 0; 1464 1465 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1466 /* 1467 * We will assume that someone has paused the RISC processor. 1468 */ 1469 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1470 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1471 oldconf | BIU_PCI_CONF1_SXP); 1472 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1473 } 1474 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1475 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1476 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1477 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1478 } 1479 return (rv); 1480 } 1481 1482 static void 1483 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1484 { 1485 int oldconf = 0; 1486 1487 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1488 /* 1489 * We will assume that someone has paused the RISC processor. 1490 */ 1491 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1492 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1493 oldconf | BIU_PCI_CONF1_SXP); 1494 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1495 } 1496 BXW2(isp, IspVirt2Off(isp, regoff), val); 1497 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); 1498 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1499 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1500 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1501 } 1502 1503 } 1504 1505 static uint32_t 1506 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1507 { 1508 uint32_t rv, oc = 0; 1509 1510 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1511 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1512 uint32_t tc; 1513 /* 1514 * We will assume that someone has paused the RISC processor. 1515 */ 1516 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1517 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1518 if (regoff & SXP_BANK1_SELECT) 1519 tc |= BIU_PCI1080_CONF1_SXP1; 1520 else 1521 tc |= BIU_PCI1080_CONF1_SXP0; 1522 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1523 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1524 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1525 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1526 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1527 oc | BIU_PCI1080_CONF1_DMA); 1528 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1529 } 1530 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1531 if (oc) { 1532 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1533 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1534 } 1535 return (rv); 1536 } 1537 1538 static void 1539 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1540 { 1541 int oc = 0; 1542 1543 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1544 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1545 uint32_t tc; 1546 /* 1547 * We will assume that someone has paused the RISC processor. 1548 */ 1549 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1550 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1551 if (regoff & SXP_BANK1_SELECT) 1552 tc |= BIU_PCI1080_CONF1_SXP1; 1553 else 1554 tc |= BIU_PCI1080_CONF1_SXP0; 1555 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1556 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1557 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1558 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1559 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1560 oc | BIU_PCI1080_CONF1_DMA); 1561 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1562 } 1563 BXW2(isp, IspVirt2Off(isp, regoff), val); 1564 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); 1565 if (oc) { 1566 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1567 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1568 } 1569 } 1570 1571 static uint32_t 1572 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1573 { 1574 uint32_t rv; 1575 int block = regoff & _BLK_REG_MASK; 1576 1577 switch (block) { 1578 case BIU_BLOCK: 1579 break; 1580 case MBOX_BLOCK: 1581 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1582 case SXP_BLOCK: 1583 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1584 return (0xffffffff); 1585 case RISC_BLOCK: 1586 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1587 return (0xffffffff); 1588 case DMA_BLOCK: 1589 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1590 return (0xffffffff); 1591 default: 1592 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1593 return (0xffffffff); 1594 } 1595 1596 1597 switch (regoff) { 1598 case BIU2400_FLASH_ADDR: 1599 case BIU2400_FLASH_DATA: 1600 case BIU2400_ICR: 1601 case BIU2400_ISR: 1602 case BIU2400_CSR: 1603 case BIU2400_REQINP: 1604 case BIU2400_REQOUTP: 1605 case BIU2400_RSPINP: 1606 case BIU2400_RSPOUTP: 1607 case BIU2400_PRI_RQINP: 1608 case BIU2400_PRI_RSPINP: 1609 case BIU2400_ATIO_RSPINP: 1610 case BIU2400_ATIO_REQINP: 1611 case BIU2400_HCCR: 1612 case BIU2400_GPIOD: 1613 case BIU2400_GPIOE: 1614 case BIU2400_HSEMA: 1615 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1616 break; 1617 case BIU2400_R2HSTSLO: 1618 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1619 break; 1620 case BIU2400_R2HSTSHI: 1621 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1622 break; 1623 default: 1624 isp_prt(isp, ISP_LOGERR, 1625 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1626 rv = 0xffffffff; 1627 break; 1628 } 1629 return (rv); 1630 } 1631 1632 static void 1633 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1634 { 1635 int block = regoff & _BLK_REG_MASK; 1636 1637 switch (block) { 1638 case BIU_BLOCK: 1639 break; 1640 case MBOX_BLOCK: 1641 BXW2(isp, IspVirt2Off(isp, regoff), val); 1642 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); 1643 return; 1644 case SXP_BLOCK: 1645 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1646 return; 1647 case RISC_BLOCK: 1648 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1649 return; 1650 case DMA_BLOCK: 1651 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1652 return; 1653 default: 1654 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1655 regoff); 1656 break; 1657 } 1658 1659 switch (regoff) { 1660 case BIU2400_FLASH_ADDR: 1661 case BIU2400_FLASH_DATA: 1662 case BIU2400_ICR: 1663 case BIU2400_ISR: 1664 case BIU2400_CSR: 1665 case BIU2400_REQINP: 1666 case BIU2400_REQOUTP: 1667 case BIU2400_RSPINP: 1668 case BIU2400_RSPOUTP: 1669 case BIU2400_PRI_RQINP: 1670 case BIU2400_PRI_RSPINP: 1671 case BIU2400_ATIO_RSPINP: 1672 case BIU2400_ATIO_REQINP: 1673 case BIU2400_HCCR: 1674 case BIU2400_GPIOD: 1675 case BIU2400_GPIOE: 1676 case BIU2400_HSEMA: 1677 BXW4(isp, IspVirt2Off(isp, regoff), val); 1678 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4); 1679 break; 1680 default: 1681 isp_prt(isp, ISP_LOGERR, 1682 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1683 break; 1684 } 1685 } 1686 1687 1688 struct imush { 1689 ispsoftc_t *isp; 1690 int error; 1691 }; 1692 1693 static void imc(void *, bus_dma_segment_t *, int, int); 1694 1695 static void 1696 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1697 { 1698 struct imush *imushp = (struct imush *) arg; 1699 if (error) { 1700 imushp->error = error; 1701 } else { 1702 ispsoftc_t *isp =imushp->isp; 1703 bus_addr_t addr = segs->ds_addr; 1704 1705 isp->isp_rquest_dma = addr; 1706 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1707 isp->isp_result_dma = addr; 1708 if (IS_FC(isp)) { 1709 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1710 FCPARAM(isp)->isp_scdma = addr; 1711 } 1712 } 1713 } 1714 1715 static int 1716 isp_pci_mbxdma(ispsoftc_t *isp) 1717 { 1718 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1719 caddr_t base; 1720 uint32_t len; 1721 int i, error, ns; 1722 bus_size_t slim; /* segment size */ 1723 bus_addr_t llim; /* low limit of unavailable dma */ 1724 bus_addr_t hlim; /* high limit of unavailable dma */ 1725 struct imush im; 1726 1727 /* 1728 * Already been here? If so, leave... 1729 */ 1730 if (isp->isp_rquest) { 1731 return (0); 1732 } 1733 1734 if (isp->isp_maxcmds == 0) { 1735 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1736 return (1); 1737 } 1738 1739 hlim = BUS_SPACE_MAXADDR; 1740 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1741 slim = (bus_size_t) (1ULL << 32); 1742 llim = BUS_SPACE_MAXADDR; 1743 } else { 1744 llim = BUS_SPACE_MAXADDR_32BIT; 1745 slim = (1 << 24); 1746 } 1747 1748 /* 1749 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1750 */ 1751 #ifdef ISP_TARGET_MODE 1752 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1753 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1754 return (1); 1755 } 1756 #endif 1757 1758 ISP_UNLOCK(isp); 1759 if (isp_dma_tag_create(BUS_DMA_ROOTARG(pcs->pci_dev), 1, slim, llim, 1760 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1761 &pcs->dmat)) { 1762 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1763 ISP_LOCK(isp); 1764 return (1); 1765 } 1766 1767 1768 len = sizeof (XS_T **) * isp->isp_maxcmds; 1769 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1770 if (isp->isp_xflist == NULL) { 1771 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1772 ISP_LOCK(isp); 1773 return (1); 1774 } 1775 #ifdef ISP_TARGET_MODE 1776 len = sizeof (void **) * isp->isp_maxcmds; 1777 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1778 if (isp->isp_tgtlist == NULL) { 1779 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1780 ISP_LOCK(isp); 1781 return (1); 1782 } 1783 #endif 1784 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1785 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1786 if (pcs->dmaps == NULL) { 1787 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1788 free(isp->isp_xflist, M_DEVBUF); 1789 #ifdef ISP_TARGET_MODE 1790 free(isp->isp_tgtlist, M_DEVBUF); 1791 #endif 1792 ISP_LOCK(isp); 1793 return (1); 1794 } 1795 1796 /* 1797 * Allocate and map the request, result queues, plus FC scratch area. 1798 */ 1799 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1800 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1801 if (IS_FC(isp)) { 1802 len += ISP2100_SCRLEN; 1803 } 1804 1805 ns = (len / PAGE_SIZE) + 1; 1806 /* 1807 * Create a tag for the control spaces- force it to within 32 bits. 1808 */ 1809 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim, 1810 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1811 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1812 isp_prt(isp, ISP_LOGERR, 1813 "cannot create a dma tag for control spaces"); 1814 free(pcs->dmaps, M_DEVBUF); 1815 free(isp->isp_xflist, M_DEVBUF); 1816 #ifdef ISP_TARGET_MODE 1817 free(isp->isp_tgtlist, M_DEVBUF); 1818 #endif 1819 ISP_LOCK(isp); 1820 return (1); 1821 } 1822 1823 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1824 &isp->isp_cdmap) != 0) { 1825 isp_prt(isp, ISP_LOGERR, 1826 "cannot allocate %d bytes of CCB memory", len); 1827 bus_dma_tag_destroy(isp->isp_cdmat); 1828 free(isp->isp_xflist, M_DEVBUF); 1829 #ifdef ISP_TARGET_MODE 1830 free(isp->isp_tgtlist, M_DEVBUF); 1831 #endif 1832 free(pcs->dmaps, M_DEVBUF); 1833 ISP_LOCK(isp); 1834 return (1); 1835 } 1836 1837 for (i = 0; i < isp->isp_maxcmds; i++) { 1838 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1839 if (error) { 1840 isp_prt(isp, ISP_LOGERR, 1841 "error %d creating per-cmd DMA maps", error); 1842 while (--i >= 0) { 1843 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1844 } 1845 goto bad; 1846 } 1847 } 1848 1849 im.isp = isp; 1850 im.error = 0; 1851 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1852 if (im.error) { 1853 isp_prt(isp, ISP_LOGERR, 1854 "error %d loading dma map for control areas", im.error); 1855 goto bad; 1856 } 1857 1858 isp->isp_rquest = base; 1859 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1860 isp->isp_result = base; 1861 if (IS_FC(isp)) { 1862 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1863 FCPARAM(isp)->isp_scratch = base; 1864 } 1865 ISP_LOCK(isp); 1866 return (0); 1867 1868 bad: 1869 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1870 bus_dma_tag_destroy(isp->isp_cdmat); 1871 free(isp->isp_xflist, M_DEVBUF); 1872 #ifdef ISP_TARGET_MODE 1873 free(isp->isp_tgtlist, M_DEVBUF); 1874 #endif 1875 free(pcs->dmaps, M_DEVBUF); 1876 ISP_LOCK(isp); 1877 isp->isp_rquest = NULL; 1878 return (1); 1879 } 1880 1881 typedef struct { 1882 ispsoftc_t *isp; 1883 void *cmd_token; 1884 void *rq; 1885 uint32_t *nxtip; 1886 uint32_t optr; 1887 int error; 1888 } mush_t; 1889 1890 #define MUSHERR_NOQENTRIES -2 1891 1892 #ifdef ISP_TARGET_MODE 1893 /* 1894 * We need to handle DMA for target mode differently from initiator mode. 1895 * 1896 * DMA mapping and construction and submission of CTIO Request Entries 1897 * and rendevous for completion are very tightly coupled because we start 1898 * out by knowing (per platform) how much data we have to move, but we 1899 * don't know, up front, how many DMA mapping segments will have to be used 1900 * cover that data, so we don't know how many CTIO Request Entries we 1901 * will end up using. Further, for performance reasons we may want to 1902 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1903 * 1904 * The standard vector still goes through isp_pci_dmasetup, but the callback 1905 * for the DMA mapping routines comes here instead with the whole transfer 1906 * mapped and a pointer to a partially filled in already allocated request 1907 * queue entry. We finish the job. 1908 */ 1909 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1910 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1911 1912 #define STATUS_WITH_DATA 1 1913 1914 static void 1915 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1916 { 1917 mush_t *mp; 1918 struct ccb_scsiio *csio; 1919 ispsoftc_t *isp; 1920 struct isp_pcisoftc *pcs; 1921 bus_dmamap_t *dp; 1922 ct_entry_t *cto, *qe; 1923 uint8_t scsi_status; 1924 uint32_t curi, nxti, handle; 1925 uint32_t sflags; 1926 int32_t resid; 1927 int nth_ctio, nctios, send_status; 1928 1929 mp = (mush_t *) arg; 1930 if (error) { 1931 mp->error = error; 1932 return; 1933 } 1934 1935 isp = mp->isp; 1936 csio = mp->cmd_token; 1937 cto = mp->rq; 1938 curi = isp->isp_reqidx; 1939 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1940 1941 cto->ct_xfrlen = 0; 1942 cto->ct_seg_count = 0; 1943 cto->ct_header.rqs_entry_count = 1; 1944 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1945 1946 if (nseg == 0) { 1947 cto->ct_header.rqs_seqno = 1; 1948 isp_prt(isp, ISP_LOGTDEBUG1, 1949 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1950 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1951 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1952 cto->ct_scsi_status, cto->ct_resid); 1953 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1954 isp_put_ctio(isp, cto, qe); 1955 return; 1956 } 1957 1958 nctios = nseg / ISP_RQDSEG; 1959 if (nseg % ISP_RQDSEG) { 1960 nctios++; 1961 } 1962 1963 /* 1964 * Save syshandle, and potentially any SCSI status, which we'll 1965 * reinsert on the last CTIO we're going to send. 1966 */ 1967 1968 handle = cto->ct_syshandle; 1969 cto->ct_syshandle = 0; 1970 cto->ct_header.rqs_seqno = 0; 1971 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1972 1973 if (send_status) { 1974 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1975 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1976 /* 1977 * Preserve residual. 1978 */ 1979 resid = cto->ct_resid; 1980 1981 /* 1982 * Save actual SCSI status. 1983 */ 1984 scsi_status = cto->ct_scsi_status; 1985 1986 #ifndef STATUS_WITH_DATA 1987 sflags |= CT_NO_DATA; 1988 /* 1989 * We can't do a status at the same time as a data CTIO, so 1990 * we need to synthesize an extra CTIO at this level. 1991 */ 1992 nctios++; 1993 #endif 1994 } else { 1995 sflags = scsi_status = resid = 0; 1996 } 1997 1998 cto->ct_resid = 0; 1999 cto->ct_scsi_status = 0; 2000 2001 pcs = (struct isp_pcisoftc *)isp; 2002 dp = &pcs->dmaps[isp_handle_index(handle)]; 2003 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2004 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2005 } else { 2006 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2007 } 2008 2009 nxti = *mp->nxtip; 2010 2011 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 2012 int seglim; 2013 2014 seglim = nseg; 2015 if (seglim) { 2016 int seg; 2017 2018 if (seglim > ISP_RQDSEG) 2019 seglim = ISP_RQDSEG; 2020 2021 for (seg = 0; seg < seglim; seg++, nseg--) { 2022 /* 2023 * Unlike normal initiator commands, we don't 2024 * do any swizzling here. 2025 */ 2026 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 2027 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 2028 cto->ct_xfrlen += dm_segs->ds_len; 2029 dm_segs++; 2030 } 2031 cto->ct_seg_count = seg; 2032 } else { 2033 /* 2034 * This case should only happen when we're sending an 2035 * extra CTIO with final status. 2036 */ 2037 if (send_status == 0) { 2038 isp_prt(isp, ISP_LOGWARN, 2039 "tdma_mk ran out of segments"); 2040 mp->error = EINVAL; 2041 return; 2042 } 2043 } 2044 2045 /* 2046 * At this point, the fields ct_lun, ct_iid, ct_tagval, 2047 * ct_tagtype, and ct_timeout have been carried over 2048 * unchanged from what our caller had set. 2049 * 2050 * The dataseg fields and the seg_count fields we just got 2051 * through setting. The data direction we've preserved all 2052 * along and only clear it if we're now sending status. 2053 */ 2054 2055 if (nth_ctio == nctios - 1) { 2056 /* 2057 * We're the last in a sequence of CTIOs, so mark 2058 * this CTIO and save the handle to the CCB such that 2059 * when this CTIO completes we can free dma resources 2060 * and do whatever else we need to do to finish the 2061 * rest of the command. We *don't* give this to the 2062 * firmware to work on- the caller will do that. 2063 */ 2064 2065 cto->ct_syshandle = handle; 2066 cto->ct_header.rqs_seqno = 1; 2067 2068 if (send_status) { 2069 cto->ct_scsi_status = scsi_status; 2070 cto->ct_flags |= sflags; 2071 cto->ct_resid = resid; 2072 } 2073 if (send_status) { 2074 isp_prt(isp, ISP_LOGTDEBUG1, 2075 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 2076 "scsi status %x resid %d", 2077 cto->ct_fwhandle, csio->ccb_h.target_lun, 2078 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 2079 cto->ct_scsi_status, cto->ct_resid); 2080 } else { 2081 isp_prt(isp, ISP_LOGTDEBUG1, 2082 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 2083 cto->ct_fwhandle, csio->ccb_h.target_lun, 2084 cto->ct_iid, cto->ct_tag_val, 2085 cto->ct_flags); 2086 } 2087 isp_put_ctio(isp, cto, qe); 2088 ISP_TDQE(isp, "last tdma_mk", curi, cto); 2089 if (nctios > 1) { 2090 MEMORYBARRIER(isp, SYNC_REQUEST, 2091 curi, QENTRY_LEN); 2092 } 2093 } else { 2094 ct_entry_t *oqe = qe; 2095 2096 /* 2097 * Make sure syshandle fields are clean 2098 */ 2099 cto->ct_syshandle = 0; 2100 cto->ct_header.rqs_seqno = 0; 2101 2102 isp_prt(isp, ISP_LOGTDEBUG1, 2103 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 2104 cto->ct_fwhandle, csio->ccb_h.target_lun, 2105 cto->ct_iid, cto->ct_flags); 2106 2107 /* 2108 * Get a new CTIO 2109 */ 2110 qe = (ct_entry_t *) 2111 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2112 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 2113 if (nxti == mp->optr) { 2114 isp_prt(isp, ISP_LOGTDEBUG0, 2115 "Queue Overflow in tdma_mk"); 2116 mp->error = MUSHERR_NOQENTRIES; 2117 return; 2118 } 2119 2120 /* 2121 * Now that we're done with the old CTIO, 2122 * flush it out to the request queue. 2123 */ 2124 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 2125 isp_put_ctio(isp, cto, oqe); 2126 if (nth_ctio != 0) { 2127 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 2128 QENTRY_LEN); 2129 } 2130 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 2131 2132 /* 2133 * Reset some fields in the CTIO so we can reuse 2134 * for the next one we'll flush to the request 2135 * queue. 2136 */ 2137 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 2138 cto->ct_header.rqs_entry_count = 1; 2139 cto->ct_header.rqs_flags = 0; 2140 cto->ct_status = 0; 2141 cto->ct_scsi_status = 0; 2142 cto->ct_xfrlen = 0; 2143 cto->ct_resid = 0; 2144 cto->ct_seg_count = 0; 2145 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 2146 } 2147 } 2148 *mp->nxtip = nxti; 2149 } 2150 2151 /* 2152 * We don't have to do multiple CTIOs here. Instead, we can just do 2153 * continuation segments as needed. This greatly simplifies the code 2154 * improves performance. 2155 */ 2156 2157 static void 2158 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2159 { 2160 mush_t *mp; 2161 struct ccb_scsiio *csio; 2162 ispsoftc_t *isp; 2163 ct2_entry_t *cto, *qe; 2164 uint32_t curi, nxti; 2165 ispds_t *ds; 2166 ispds64_t *ds64; 2167 int segcnt, seglim; 2168 2169 mp = (mush_t *) arg; 2170 if (error) { 2171 mp->error = error; 2172 return; 2173 } 2174 2175 isp = mp->isp; 2176 csio = mp->cmd_token; 2177 cto = mp->rq; 2178 2179 curi = isp->isp_reqidx; 2180 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 2181 2182 if (nseg == 0) { 2183 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 2184 isp_prt(isp, ISP_LOGWARN, 2185 "dma2_tgt_fc, a status CTIO2 without MODE1 " 2186 "set (0x%x)", cto->ct_flags); 2187 mp->error = EINVAL; 2188 return; 2189 } 2190 /* 2191 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 2192 * flags to NO DATA and clear relative offset flags. 2193 * We preserve the ct_resid and the response area. 2194 */ 2195 cto->ct_header.rqs_seqno = 1; 2196 cto->ct_seg_count = 0; 2197 cto->ct_reloff = 0; 2198 isp_prt(isp, ISP_LOGTDEBUG1, 2199 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 2200 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 2201 cto->ct_iid, cto->ct_flags, cto->ct_status, 2202 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 2203 if (FCPARAM(isp)->isp_2klogin) { 2204 isp_put_ctio2e(isp, 2205 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2206 } else { 2207 isp_put_ctio2(isp, cto, qe); 2208 } 2209 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 2210 return; 2211 } 2212 2213 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 2214 isp_prt(isp, ISP_LOGERR, 2215 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 2216 "(0x%x)", cto->ct_flags); 2217 mp->error = EINVAL; 2218 return; 2219 } 2220 2221 2222 nxti = *mp->nxtip; 2223 2224 /* 2225 * Check to see if we need to DAC addressing or not. 2226 * 2227 * Any address that's over the 4GB boundary causes this 2228 * to happen. 2229 */ 2230 segcnt = nseg; 2231 if (sizeof (bus_addr_t) > 4) { 2232 for (segcnt = 0; segcnt < nseg; segcnt++) { 2233 uint64_t addr = dm_segs[segcnt].ds_addr; 2234 if (addr >= 0x100000000LL) { 2235 break; 2236 } 2237 } 2238 } 2239 if (segcnt != nseg) { 2240 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 2241 seglim = ISP_RQDSEG_T3; 2242 ds64 = &cto->rsp.m0.u.ct_dataseg64[0]; 2243 ds = NULL; 2244 } else { 2245 seglim = ISP_RQDSEG_T2; 2246 ds64 = NULL; 2247 ds = &cto->rsp.m0.u.ct_dataseg[0]; 2248 } 2249 cto->ct_seg_count = 0; 2250 2251 /* 2252 * Set up the CTIO2 data segments. 2253 */ 2254 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; 2255 cto->ct_seg_count++, segcnt++) { 2256 if (ds64) { 2257 ds64->ds_basehi = 2258 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2259 ds64->ds_base = dm_segs[segcnt].ds_addr; 2260 ds64->ds_count = dm_segs[segcnt].ds_len; 2261 ds64++; 2262 } else { 2263 ds->ds_base = dm_segs[segcnt].ds_addr; 2264 ds->ds_count = dm_segs[segcnt].ds_len; 2265 ds++; 2266 } 2267 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2268 #if __FreeBSD_version < 500000 2269 isp_prt(isp, ISP_LOGTDEBUG1, 2270 "isp_send_ctio2: ent0[%d]0x%llx:%llu", 2271 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, 2272 (uint64_t)dm_segs[segcnt].ds_len); 2273 #else 2274 isp_prt(isp, ISP_LOGTDEBUG1, 2275 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 2276 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 2277 (uintmax_t)dm_segs[segcnt].ds_len); 2278 #endif 2279 } 2280 2281 while (segcnt < nseg) { 2282 uint32_t curip; 2283 int seg; 2284 ispcontreq_t local, *crq = &local, *qep; 2285 2286 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2287 curip = nxti; 2288 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 2289 if (nxti == mp->optr) { 2290 ISP_UNLOCK(isp); 2291 isp_prt(isp, ISP_LOGTDEBUG0, 2292 "tdma_mkfc: request queue overflow"); 2293 mp->error = MUSHERR_NOQENTRIES; 2294 return; 2295 } 2296 cto->ct_header.rqs_entry_count++; 2297 MEMZERO((void *)crq, sizeof (*crq)); 2298 crq->req_header.rqs_entry_count = 1; 2299 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { 2300 seglim = ISP_CDSEG64; 2301 ds = NULL; 2302 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; 2303 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2304 } else { 2305 seglim = ISP_CDSEG; 2306 ds = &crq->req_dataseg[0]; 2307 ds64 = NULL; 2308 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2309 } 2310 for (seg = 0; segcnt < nseg && seg < seglim; 2311 segcnt++, seg++) { 2312 if (ds64) { 2313 ds64->ds_basehi = 2314 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2315 ds64->ds_base = dm_segs[segcnt].ds_addr; 2316 ds64->ds_count = dm_segs[segcnt].ds_len; 2317 ds64++; 2318 } else { 2319 ds->ds_base = dm_segs[segcnt].ds_addr; 2320 ds->ds_count = dm_segs[segcnt].ds_len; 2321 ds++; 2322 } 2323 #if __FreeBSD_version < 500000 2324 isp_prt(isp, ISP_LOGTDEBUG1, 2325 "isp_send_ctio2: ent%d[%d]%llx:%llu", 2326 cto->ct_header.rqs_entry_count-1, seg, 2327 (uint64_t)dm_segs[segcnt].ds_addr, 2328 (uint64_t)dm_segs[segcnt].ds_len); 2329 #else 2330 isp_prt(isp, ISP_LOGTDEBUG1, 2331 "isp_send_ctio2: ent%d[%d]%jx:%ju", 2332 cto->ct_header.rqs_entry_count-1, seg, 2333 (uintmax_t)dm_segs[segcnt].ds_addr, 2334 (uintmax_t)dm_segs[segcnt].ds_len); 2335 #endif 2336 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2337 cto->ct_seg_count++; 2338 } 2339 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 2340 isp_put_cont_req(isp, crq, qep); 2341 ISP_TDQE(isp, "cont entry", curi, qep); 2342 } 2343 2344 /* 2345 * No do final twiddling for the CTIO itself. 2346 */ 2347 cto->ct_header.rqs_seqno = 1; 2348 isp_prt(isp, ISP_LOGTDEBUG1, 2349 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 2350 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 2351 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 2352 cto->ct_resid); 2353 if (FCPARAM(isp)->isp_2klogin) { 2354 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2355 } else { 2356 isp_put_ctio2(isp, cto, qe); 2357 } 2358 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 2359 *mp->nxtip = nxti; 2360 } 2361 #endif 2362 2363 static void dma_2400(void *, bus_dma_segment_t *, int, int); 2364 static void dma2_a64(void *, bus_dma_segment_t *, int, int); 2365 static void dma2(void *, bus_dma_segment_t *, int, int); 2366 2367 static void 2368 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2369 { 2370 mush_t *mp; 2371 ispsoftc_t *isp; 2372 struct ccb_scsiio *csio; 2373 struct isp_pcisoftc *pcs; 2374 bus_dmamap_t *dp; 2375 bus_dma_segment_t *eseg; 2376 ispreqt7_t *rq; 2377 int seglim, datalen; 2378 uint32_t nxti; 2379 2380 mp = (mush_t *) arg; 2381 if (error) { 2382 mp->error = error; 2383 return; 2384 } 2385 2386 if (nseg < 1) { 2387 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2388 mp->error = EFAULT; 2389 return; 2390 } 2391 2392 csio = mp->cmd_token; 2393 isp = mp->isp; 2394 rq = mp->rq; 2395 pcs = (struct isp_pcisoftc *)mp->isp; 2396 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2397 nxti = *mp->nxtip; 2398 2399 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2400 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2401 } else { 2402 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2403 } 2404 datalen = XS_XFRLEN(csio); 2405 2406 /* 2407 * We're passed an initial partially filled in entry that 2408 * has most fields filled in except for data transfer 2409 * related values. 2410 * 2411 * Our job is to fill in the initial request queue entry and 2412 * then to start allocating and filling in continuation entries 2413 * until we've covered the entire transfer. 2414 */ 2415 2416 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS; 2417 rq->req_dl = datalen; 2418 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2419 rq->req_alen_datadir = 0x2; 2420 } else { 2421 rq->req_alen_datadir = 0x1; 2422 } 2423 2424 eseg = dm_segs + nseg; 2425 2426 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr); 2427 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr); 2428 rq->req_dataseg.ds_count = dm_segs->ds_len; 2429 2430 datalen -= dm_segs->ds_len; 2431 2432 dm_segs++; 2433 rq->req_seg_count++; 2434 2435 while (datalen > 0 && dm_segs != eseg) { 2436 uint32_t onxti; 2437 ispcontreq64_t local, *crq = &local, *cqe; 2438 2439 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2440 onxti = nxti; 2441 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2442 if (nxti == mp->optr) { 2443 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2444 mp->error = MUSHERR_NOQENTRIES; 2445 return; 2446 } 2447 rq->req_header.rqs_entry_count++; 2448 MEMZERO((void *)crq, sizeof (*crq)); 2449 crq->req_header.rqs_entry_count = 1; 2450 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2451 2452 seglim = 0; 2453 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2454 crq->req_dataseg[seglim].ds_base = 2455 DMA_LO32(dm_segs->ds_addr); 2456 crq->req_dataseg[seglim].ds_basehi = 2457 DMA_HI32(dm_segs->ds_addr); 2458 crq->req_dataseg[seglim].ds_count = 2459 dm_segs->ds_len; 2460 rq->req_seg_count++; 2461 dm_segs++; 2462 seglim++; 2463 datalen -= dm_segs->ds_len; 2464 } 2465 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2466 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2467 } 2468 isp_put_cont64_req(isp, crq, cqe); 2469 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2470 } 2471 *mp->nxtip = nxti; 2472 } 2473 2474 static void 2475 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2476 { 2477 mush_t *mp; 2478 ispsoftc_t *isp; 2479 struct ccb_scsiio *csio; 2480 struct isp_pcisoftc *pcs; 2481 bus_dmamap_t *dp; 2482 bus_dma_segment_t *eseg; 2483 ispreq64_t *rq; 2484 int seglim, datalen; 2485 uint32_t nxti; 2486 2487 mp = (mush_t *) arg; 2488 if (error) { 2489 mp->error = error; 2490 return; 2491 } 2492 2493 if (nseg < 1) { 2494 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2495 mp->error = EFAULT; 2496 return; 2497 } 2498 csio = mp->cmd_token; 2499 isp = mp->isp; 2500 rq = mp->rq; 2501 pcs = (struct isp_pcisoftc *)mp->isp; 2502 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2503 nxti = *mp->nxtip; 2504 2505 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2506 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2507 } else { 2508 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2509 } 2510 datalen = XS_XFRLEN(csio); 2511 2512 /* 2513 * We're passed an initial partially filled in entry that 2514 * has most fields filled in except for data transfer 2515 * related values. 2516 * 2517 * Our job is to fill in the initial request queue entry and 2518 * then to start allocating and filling in continuation entries 2519 * until we've covered the entire transfer. 2520 */ 2521 2522 if (IS_FC(isp)) { 2523 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 2524 seglim = ISP_RQDSEG_T3; 2525 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 2526 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2527 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2528 } else { 2529 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2530 } 2531 } else { 2532 rq->req_header.rqs_entry_type = RQSTYPE_A64; 2533 if (csio->cdb_len > 12) { 2534 seglim = 0; 2535 } else { 2536 seglim = ISP_RQDSEG_A64; 2537 } 2538 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2539 rq->req_flags |= REQFLAG_DATA_IN; 2540 } else { 2541 rq->req_flags |= REQFLAG_DATA_OUT; 2542 } 2543 } 2544 2545 eseg = dm_segs + nseg; 2546 2547 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2548 if (IS_FC(isp)) { 2549 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 2550 rq3->req_dataseg[rq3->req_seg_count].ds_base = 2551 DMA_LO32(dm_segs->ds_addr); 2552 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 2553 DMA_HI32(dm_segs->ds_addr); 2554 rq3->req_dataseg[rq3->req_seg_count].ds_count = 2555 dm_segs->ds_len; 2556 } else { 2557 rq->req_dataseg[rq->req_seg_count].ds_base = 2558 DMA_LO32(dm_segs->ds_addr); 2559 rq->req_dataseg[rq->req_seg_count].ds_basehi = 2560 DMA_HI32(dm_segs->ds_addr); 2561 rq->req_dataseg[rq->req_seg_count].ds_count = 2562 dm_segs->ds_len; 2563 } 2564 datalen -= dm_segs->ds_len; 2565 rq->req_seg_count++; 2566 dm_segs++; 2567 } 2568 2569 while (datalen > 0 && dm_segs != eseg) { 2570 uint32_t onxti; 2571 ispcontreq64_t local, *crq = &local, *cqe; 2572 2573 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2574 onxti = nxti; 2575 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2576 if (nxti == mp->optr) { 2577 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2578 mp->error = MUSHERR_NOQENTRIES; 2579 return; 2580 } 2581 rq->req_header.rqs_entry_count++; 2582 MEMZERO((void *)crq, sizeof (*crq)); 2583 crq->req_header.rqs_entry_count = 1; 2584 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2585 2586 seglim = 0; 2587 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2588 crq->req_dataseg[seglim].ds_base = 2589 DMA_LO32(dm_segs->ds_addr); 2590 crq->req_dataseg[seglim].ds_basehi = 2591 DMA_HI32(dm_segs->ds_addr); 2592 crq->req_dataseg[seglim].ds_count = 2593 dm_segs->ds_len; 2594 rq->req_seg_count++; 2595 dm_segs++; 2596 seglim++; 2597 datalen -= dm_segs->ds_len; 2598 } 2599 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2600 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2601 } 2602 isp_put_cont64_req(isp, crq, cqe); 2603 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2604 } 2605 *mp->nxtip = nxti; 2606 } 2607 2608 static void 2609 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2610 { 2611 mush_t *mp; 2612 ispsoftc_t *isp; 2613 struct ccb_scsiio *csio; 2614 struct isp_pcisoftc *pcs; 2615 bus_dmamap_t *dp; 2616 bus_dma_segment_t *eseg; 2617 ispreq_t *rq; 2618 int seglim, datalen; 2619 uint32_t nxti; 2620 2621 mp = (mush_t *) arg; 2622 if (error) { 2623 mp->error = error; 2624 return; 2625 } 2626 2627 if (nseg < 1) { 2628 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2629 mp->error = EFAULT; 2630 return; 2631 } 2632 csio = mp->cmd_token; 2633 isp = mp->isp; 2634 rq = mp->rq; 2635 pcs = (struct isp_pcisoftc *)mp->isp; 2636 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2637 nxti = *mp->nxtip; 2638 2639 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2640 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2641 } else { 2642 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2643 } 2644 2645 datalen = XS_XFRLEN(csio); 2646 2647 /* 2648 * We're passed an initial partially filled in entry that 2649 * has most fields filled in except for data transfer 2650 * related values. 2651 * 2652 * Our job is to fill in the initial request queue entry and 2653 * then to start allocating and filling in continuation entries 2654 * until we've covered the entire transfer. 2655 */ 2656 2657 if (IS_FC(isp)) { 2658 seglim = ISP_RQDSEG_T2; 2659 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 2660 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2661 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2662 } else { 2663 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2664 } 2665 } else { 2666 if (csio->cdb_len > 12) { 2667 seglim = 0; 2668 } else { 2669 seglim = ISP_RQDSEG; 2670 } 2671 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2672 rq->req_flags |= REQFLAG_DATA_IN; 2673 } else { 2674 rq->req_flags |= REQFLAG_DATA_OUT; 2675 } 2676 } 2677 2678 eseg = dm_segs + nseg; 2679 2680 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2681 if (IS_FC(isp)) { 2682 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 2683 rq2->req_dataseg[rq2->req_seg_count].ds_base = 2684 DMA_LO32(dm_segs->ds_addr); 2685 rq2->req_dataseg[rq2->req_seg_count].ds_count = 2686 dm_segs->ds_len; 2687 } else { 2688 rq->req_dataseg[rq->req_seg_count].ds_base = 2689 DMA_LO32(dm_segs->ds_addr); 2690 rq->req_dataseg[rq->req_seg_count].ds_count = 2691 dm_segs->ds_len; 2692 } 2693 datalen -= dm_segs->ds_len; 2694 rq->req_seg_count++; 2695 dm_segs++; 2696 } 2697 2698 while (datalen > 0 && dm_segs != eseg) { 2699 uint32_t onxti; 2700 ispcontreq_t local, *crq = &local, *cqe; 2701 2702 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2703 onxti = nxti; 2704 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2705 if (nxti == mp->optr) { 2706 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2707 mp->error = MUSHERR_NOQENTRIES; 2708 return; 2709 } 2710 rq->req_header.rqs_entry_count++; 2711 MEMZERO((void *)crq, sizeof (*crq)); 2712 crq->req_header.rqs_entry_count = 1; 2713 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2714 2715 seglim = 0; 2716 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 2717 crq->req_dataseg[seglim].ds_base = 2718 DMA_LO32(dm_segs->ds_addr); 2719 crq->req_dataseg[seglim].ds_count = 2720 dm_segs->ds_len; 2721 rq->req_seg_count++; 2722 dm_segs++; 2723 seglim++; 2724 datalen -= dm_segs->ds_len; 2725 } 2726 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2727 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2728 } 2729 isp_put_cont_req(isp, crq, cqe); 2730 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2731 } 2732 *mp->nxtip = nxti; 2733 } 2734 2735 /* 2736 * We enter with ISP_LOCK held 2737 */ 2738 static int 2739 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, 2740 uint32_t *nxtip, uint32_t optr) 2741 { 2742 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2743 ispreq_t *qep; 2744 bus_dmamap_t *dp = NULL; 2745 mush_t mush, *mp; 2746 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2747 2748 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 2749 #ifdef ISP_TARGET_MODE 2750 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2751 if (IS_FC(isp)) { 2752 eptr = tdma_mkfc; 2753 } else { 2754 eptr = tdma_mk; 2755 } 2756 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2757 (csio->dxfer_len == 0)) { 2758 mp = &mush; 2759 mp->isp = isp; 2760 mp->cmd_token = csio; 2761 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 2762 mp->nxtip = nxtip; 2763 mp->optr = optr; 2764 mp->error = 0; 2765 ISPLOCK_2_CAMLOCK(isp); 2766 (*eptr)(mp, NULL, 0, 0); 2767 CAMLOCK_2_ISPLOCK(isp); 2768 goto mbxsync; 2769 } 2770 } else 2771 #endif 2772 if (IS_24XX(isp)) { 2773 eptr = dma_2400; 2774 } else if (sizeof (bus_addr_t) > 4) { 2775 eptr = dma2_a64; 2776 } else { 2777 eptr = dma2; 2778 } 2779 2780 2781 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2782 (csio->dxfer_len == 0)) { 2783 rq->req_seg_count = 1; 2784 goto mbxsync; 2785 } 2786 2787 /* 2788 * Do a virtual grapevine step to collect info for 2789 * the callback dma allocation that we have to use... 2790 */ 2791 mp = &mush; 2792 mp->isp = isp; 2793 mp->cmd_token = csio; 2794 mp->rq = rq; 2795 mp->nxtip = nxtip; 2796 mp->optr = optr; 2797 mp->error = 0; 2798 2799 ISPLOCK_2_CAMLOCK(isp); 2800 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2801 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 2802 int error, s; 2803 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2804 s = splsoftvm(); 2805 error = bus_dmamap_load(pcs->dmat, *dp, 2806 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 2807 if (error == EINPROGRESS) { 2808 bus_dmamap_unload(pcs->dmat, *dp); 2809 mp->error = EINVAL; 2810 isp_prt(isp, ISP_LOGERR, 2811 "deferred dma allocation not supported"); 2812 } else if (error && mp->error == 0) { 2813 #ifdef DIAGNOSTIC 2814 isp_prt(isp, ISP_LOGERR, 2815 "error %d in dma mapping code", error); 2816 #endif 2817 mp->error = error; 2818 } 2819 splx(s); 2820 } else { 2821 /* Pointer to physical buffer */ 2822 struct bus_dma_segment seg; 2823 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2824 seg.ds_len = csio->dxfer_len; 2825 (*eptr)(mp, &seg, 1, 0); 2826 } 2827 } else { 2828 struct bus_dma_segment *segs; 2829 2830 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2831 isp_prt(isp, ISP_LOGERR, 2832 "Physical segment pointers unsupported"); 2833 mp->error = EINVAL; 2834 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2835 isp_prt(isp, ISP_LOGERR, 2836 "Virtual segment addresses unsupported"); 2837 mp->error = EINVAL; 2838 } else { 2839 /* Just use the segments provided */ 2840 segs = (struct bus_dma_segment *) csio->data_ptr; 2841 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2842 } 2843 } 2844 CAMLOCK_2_ISPLOCK(isp); 2845 if (mp->error) { 2846 int retval = CMD_COMPLETE; 2847 if (mp->error == MUSHERR_NOQENTRIES) { 2848 retval = CMD_EAGAIN; 2849 } else if (mp->error == EFBIG) { 2850 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2851 } else if (mp->error == EINVAL) { 2852 XS_SETERR(csio, CAM_REQ_INVALID); 2853 } else { 2854 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2855 } 2856 return (retval); 2857 } 2858 mbxsync: 2859 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2860 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); 2861 } 2862 switch (rq->req_header.rqs_entry_type) { 2863 case RQSTYPE_REQUEST: 2864 isp_put_request(isp, rq, qep); 2865 break; 2866 case RQSTYPE_CMDONLY: 2867 isp_put_extended_request(isp, (ispextreq_t *)rq, 2868 (ispextreq_t *)qep); 2869 break; 2870 case RQSTYPE_T2RQS: 2871 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2872 break; 2873 case RQSTYPE_A64: 2874 case RQSTYPE_T3RQS: 2875 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2876 break; 2877 case RQSTYPE_T7RQS: 2878 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep); 2879 break; 2880 } 2881 return (CMD_QUEUED); 2882 } 2883 2884 static void 2885 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle) 2886 { 2887 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2888 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2889 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2890 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2891 } else { 2892 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2893 } 2894 bus_dmamap_unload(pcs->dmat, *dp); 2895 } 2896 2897 2898 static void 2899 isp_pci_reset0(ispsoftc_t *isp) 2900 { 2901 ISP_DISABLE_INTS(isp); 2902 } 2903 2904 static void 2905 isp_pci_reset1(ispsoftc_t *isp) 2906 { 2907 if (!IS_24XX(isp)) { 2908 /* Make sure the BIOS is disabled */ 2909 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2910 } 2911 /* and enable interrupts */ 2912 ISP_ENABLE_INTS(isp); 2913 } 2914 2915 static void 2916 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2917 { 2918 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2919 if (msg) 2920 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2921 else 2922 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2923 if (IS_SCSI(isp)) 2924 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2925 else 2926 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2927 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2928 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2929 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2930 2931 2932 if (IS_SCSI(isp)) { 2933 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2934 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2935 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2936 ISP_READ(isp, CDMA_FIFO_STS)); 2937 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2938 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2939 ISP_READ(isp, DDMA_FIFO_STS)); 2940 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2941 ISP_READ(isp, SXP_INTERRUPT), 2942 ISP_READ(isp, SXP_GROSS_ERR), 2943 ISP_READ(isp, SXP_PINS_CTRL)); 2944 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2945 } 2946 printf(" mbox regs: %x %x %x %x %x\n", 2947 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2948 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2949 ISP_READ(isp, OUTMAILBOX4)); 2950 printf(" PCI Status Command/Status=%x\n", 2951 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2952 } 2953