1 /*- 2 * Copyright (c) 1997-2006 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #if __FreeBSD_version >= 700000 38 #include <sys/linker.h> 39 #include <sys/firmware.h> 40 #endif 41 #include <sys/bus.h> 42 #if __FreeBSD_version < 500000 43 #include <pci/pcireg.h> 44 #include <pci/pcivar.h> 45 #include <machine/bus_memio.h> 46 #include <machine/bus_pio.h> 47 #else 48 #include <sys/stdint.h> 49 #include <dev/pci/pcireg.h> 50 #include <dev/pci/pcivar.h> 51 #endif 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 #include <sys/rman.h> 55 #include <sys/malloc.h> 56 57 #include <dev/isp/isp_freebsd.h> 58 59 #if __FreeBSD_version < 500000 60 #define BUS_PROBE_DEFAULT 0 61 #endif 62 63 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 64 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 65 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 66 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 67 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 68 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 69 static int 70 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 71 static int 72 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 73 static int 74 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 75 static int isp_pci_mbxdma(ispsoftc_t *); 76 static int 77 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); 78 static void 79 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t); 80 81 82 static void isp_pci_reset0(ispsoftc_t *); 83 static void isp_pci_reset1(ispsoftc_t *); 84 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 85 86 static struct ispmdvec mdvec = { 87 isp_pci_rd_isr, 88 isp_pci_rd_reg, 89 isp_pci_wr_reg, 90 isp_pci_mbxdma, 91 isp_pci_dmasetup, 92 isp_pci_dmateardown, 93 isp_pci_reset0, 94 isp_pci_reset1, 95 isp_pci_dumpregs, 96 NULL, 97 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 98 }; 99 100 static struct ispmdvec mdvec_1080 = { 101 isp_pci_rd_isr, 102 isp_pci_rd_reg_1080, 103 isp_pci_wr_reg_1080, 104 isp_pci_mbxdma, 105 isp_pci_dmasetup, 106 isp_pci_dmateardown, 107 isp_pci_reset0, 108 isp_pci_reset1, 109 isp_pci_dumpregs, 110 NULL, 111 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 112 }; 113 114 static struct ispmdvec mdvec_12160 = { 115 isp_pci_rd_isr, 116 isp_pci_rd_reg_1080, 117 isp_pci_wr_reg_1080, 118 isp_pci_mbxdma, 119 isp_pci_dmasetup, 120 isp_pci_dmateardown, 121 isp_pci_reset0, 122 isp_pci_reset1, 123 isp_pci_dumpregs, 124 NULL, 125 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 126 }; 127 128 static struct ispmdvec mdvec_2100 = { 129 isp_pci_rd_isr, 130 isp_pci_rd_reg, 131 isp_pci_wr_reg, 132 isp_pci_mbxdma, 133 isp_pci_dmasetup, 134 isp_pci_dmateardown, 135 isp_pci_reset0, 136 isp_pci_reset1, 137 isp_pci_dumpregs 138 }; 139 140 static struct ispmdvec mdvec_2200 = { 141 isp_pci_rd_isr, 142 isp_pci_rd_reg, 143 isp_pci_wr_reg, 144 isp_pci_mbxdma, 145 isp_pci_dmasetup, 146 isp_pci_dmateardown, 147 isp_pci_reset0, 148 isp_pci_reset1, 149 isp_pci_dumpregs 150 }; 151 152 static struct ispmdvec mdvec_2300 = { 153 isp_pci_rd_isr_2300, 154 isp_pci_rd_reg, 155 isp_pci_wr_reg, 156 isp_pci_mbxdma, 157 isp_pci_dmasetup, 158 isp_pci_dmateardown, 159 isp_pci_reset0, 160 isp_pci_reset1, 161 isp_pci_dumpregs 162 }; 163 164 static struct ispmdvec mdvec_2400 = { 165 isp_pci_rd_isr_2400, 166 isp_pci_rd_reg_2400, 167 isp_pci_wr_reg_2400, 168 isp_pci_mbxdma, 169 isp_pci_dmasetup, 170 isp_pci_dmateardown, 171 isp_pci_reset0, 172 isp_pci_reset1, 173 NULL 174 }; 175 176 #ifndef PCIM_CMD_INVEN 177 #define PCIM_CMD_INVEN 0x10 178 #endif 179 #ifndef PCIM_CMD_BUSMASTEREN 180 #define PCIM_CMD_BUSMASTEREN 0x0004 181 #endif 182 #ifndef PCIM_CMD_PERRESPEN 183 #define PCIM_CMD_PERRESPEN 0x0040 184 #endif 185 #ifndef PCIM_CMD_SEREN 186 #define PCIM_CMD_SEREN 0x0100 187 #endif 188 #ifndef PCIM_CMD_INTX_DISABLE 189 #define PCIM_CMD_INTX_DISABLE 0x0400 190 #endif 191 192 #ifndef PCIR_COMMAND 193 #define PCIR_COMMAND 0x04 194 #endif 195 196 #ifndef PCIR_CACHELNSZ 197 #define PCIR_CACHELNSZ 0x0c 198 #endif 199 200 #ifndef PCIR_LATTIMER 201 #define PCIR_LATTIMER 0x0d 202 #endif 203 204 #ifndef PCIR_ROMADDR 205 #define PCIR_ROMADDR 0x30 206 #endif 207 208 #ifndef PCI_VENDOR_QLOGIC 209 #define PCI_VENDOR_QLOGIC 0x1077 210 #endif 211 212 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 213 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 214 #endif 215 216 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 217 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 218 #endif 219 220 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 221 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 222 #endif 223 224 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 225 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 226 #endif 227 228 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 229 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 230 #endif 231 232 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 233 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 234 #endif 235 236 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 237 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 238 #endif 239 240 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 241 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 242 #endif 243 244 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 245 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 246 #endif 247 248 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 249 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 250 #endif 251 252 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 253 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 254 #endif 255 256 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 257 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 258 #endif 259 260 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 261 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 262 #endif 263 264 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 265 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 266 #endif 267 268 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 269 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 270 #endif 271 272 273 #define PCI_QLOGIC_ISP1020 \ 274 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 275 276 #define PCI_QLOGIC_ISP1080 \ 277 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 278 279 #define PCI_QLOGIC_ISP10160 \ 280 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 281 282 #define PCI_QLOGIC_ISP12160 \ 283 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 284 285 #define PCI_QLOGIC_ISP1240 \ 286 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 287 288 #define PCI_QLOGIC_ISP1280 \ 289 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 290 291 #define PCI_QLOGIC_ISP2100 \ 292 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 293 294 #define PCI_QLOGIC_ISP2200 \ 295 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 296 297 #define PCI_QLOGIC_ISP2300 \ 298 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 299 300 #define PCI_QLOGIC_ISP2312 \ 301 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 302 303 #define PCI_QLOGIC_ISP2322 \ 304 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 305 306 #define PCI_QLOGIC_ISP2422 \ 307 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 308 309 #define PCI_QLOGIC_ISP2432 \ 310 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 311 312 #define PCI_QLOGIC_ISP6312 \ 313 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 314 315 #define PCI_QLOGIC_ISP6322 \ 316 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 317 318 /* 319 * Odd case for some AMI raid cards... We need to *not* attach to this. 320 */ 321 #define AMI_RAID_SUBVENDOR_ID 0x101e 322 323 #define IO_MAP_REG 0x10 324 #define MEM_MAP_REG 0x14 325 326 #define PCI_DFLT_LTNCY 0x40 327 #define PCI_DFLT_LNSZ 0x10 328 329 static int isp_pci_probe (device_t); 330 static int isp_pci_attach (device_t); 331 static int isp_pci_detach (device_t); 332 333 334 struct isp_pcisoftc { 335 ispsoftc_t pci_isp; 336 device_t pci_dev; 337 struct resource * pci_reg; 338 void * ih; 339 int16_t pci_poff[_NREG_BLKS]; 340 bus_dma_tag_t dmat; 341 bus_dmamap_t *dmaps; 342 }; 343 344 345 static device_method_t isp_pci_methods[] = { 346 /* Device interface */ 347 DEVMETHOD(device_probe, isp_pci_probe), 348 DEVMETHOD(device_attach, isp_pci_attach), 349 DEVMETHOD(device_detach, isp_pci_detach), 350 { 0, 0 } 351 }; 352 static void isp_pci_intr(void *); 353 354 static driver_t isp_pci_driver = { 355 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 356 }; 357 static devclass_t isp_devclass; 358 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 359 #if __FreeBSD_version < 700000 360 extern ispfwfunc *isp_get_firmware_p; 361 #endif 362 363 static int 364 isp_pci_probe(device_t dev) 365 { 366 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 367 case PCI_QLOGIC_ISP1020: 368 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 369 break; 370 case PCI_QLOGIC_ISP1080: 371 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 372 break; 373 case PCI_QLOGIC_ISP1240: 374 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 375 break; 376 case PCI_QLOGIC_ISP1280: 377 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 378 break; 379 case PCI_QLOGIC_ISP10160: 380 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 381 break; 382 case PCI_QLOGIC_ISP12160: 383 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 384 return (ENXIO); 385 } 386 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 387 break; 388 case PCI_QLOGIC_ISP2100: 389 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 390 break; 391 case PCI_QLOGIC_ISP2200: 392 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 393 break; 394 case PCI_QLOGIC_ISP2300: 395 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 396 break; 397 case PCI_QLOGIC_ISP2312: 398 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 399 break; 400 case PCI_QLOGIC_ISP2322: 401 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 402 break; 403 case PCI_QLOGIC_ISP2422: 404 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 405 break; 406 case PCI_QLOGIC_ISP2432: 407 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 408 break; 409 case PCI_QLOGIC_ISP6312: 410 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 411 break; 412 case PCI_QLOGIC_ISP6322: 413 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 414 break; 415 default: 416 return (ENXIO); 417 } 418 if (isp_announced == 0 && bootverbose) { 419 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 420 "Core Version %d.%d\n", 421 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 422 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 423 isp_announced++; 424 } 425 /* 426 * XXXX: Here is where we might load the f/w module 427 * XXXX: (or increase a reference count to it). 428 */ 429 return (BUS_PROBE_DEFAULT); 430 } 431 432 #if __FreeBSD_version < 500000 433 static void 434 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 435 { 436 int bitmap, unit; 437 438 unit = device_get_unit(dev); 439 if (getenv_int("isp_disable", &bitmap)) { 440 if (bitmap & (1 << unit)) { 441 isp->isp_osinfo.disabled = 1; 442 return; 443 } 444 } 445 if (getenv_int("isp_no_fwload", &bitmap)) { 446 if (bitmap & (1 << unit)) 447 isp->isp_confopts |= ISP_CFG_NORELOAD; 448 } 449 if (getenv_int("isp_fwload", &bitmap)) { 450 if (bitmap & (1 << unit)) 451 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 452 } 453 if (getenv_int("isp_no_nvram", &bitmap)) { 454 if (bitmap & (1 << unit)) 455 isp->isp_confopts |= ISP_CFG_NONVRAM; 456 } 457 if (getenv_int("isp_nvram", &bitmap)) { 458 if (bitmap & (1 << unit)) 459 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 460 } 461 462 bitmap = 0; 463 (void) getenv_int("isp_debug", &bitmap); 464 if (bitmap) { 465 isp->isp_dblev = bitmap; 466 } else { 467 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 468 } 469 if (bootverbose) { 470 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 471 } 472 473 bitmap = 0; 474 if (getenv_int("role", &bitmap)) { 475 isp->isp_role = bitmap; 476 } else { 477 isp->isp_role = ISP_DEFAULT_ROLES; 478 } 479 480 } 481 482 static void 483 isp_get_pci_options(device_t dev, int *m1, int *m2) 484 { 485 int bitmap; 486 int unit = device_get_unit(dev); 487 488 *m1 = PCIM_CMD_MEMEN; 489 *m2 = PCIM_CMD_PORTEN; 490 if (getenv_int("isp_mem_map", &bitmap)) { 491 if (bitmap & (1 << unit)) { 492 *m1 = PCIM_CMD_MEMEN; 493 *m2 = PCIM_CMD_PORTEN; 494 } 495 } 496 bitmap = 0; 497 if (getenv_int("isp_io_map", &bitmap)) { 498 if (bitmap & (1 << unit)) { 499 *m1 = PCIM_CMD_PORTEN; 500 *m2 = PCIM_CMD_MEMEN; 501 } 502 } 503 } 504 505 static void 506 isp_get_specific_options(device_t dev, ispsoftc_t *isp) 507 { 508 uint64_t wwn; 509 int bitmap; 510 int unit = device_get_unit(dev); 511 512 callout_handle_init(&isp->isp_osinfo.ldt); 513 callout_handle_init(&isp->isp_osinfo.gdt); 514 515 if (IS_SCSI(isp)) { 516 return; 517 } 518 519 if (getenv_int("isp_fcduplex", &bitmap)) { 520 if (bitmap & (1 << unit)) 521 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 522 } 523 if (getenv_int("isp_no_fcduplex", &bitmap)) { 524 if (bitmap & (1 << unit)) 525 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 526 } 527 if (getenv_int("isp_nport", &bitmap)) { 528 if (bitmap & (1 << unit)) 529 isp->isp_confopts |= ISP_CFG_NPORT; 530 } 531 532 /* 533 * Because the resource_*_value functions can neither return 534 * 64 bit integer values, nor can they be directly coerced 535 * to interpret the right hand side of the assignment as 536 * you want them to interpret it, we have to force WWN 537 * hint replacement to specify WWN strings with a leading 538 * 'w' (e..g w50000000aaaa0001). Sigh. 539 */ 540 if (getenv_quad("isp_portwwn", &wwn)) { 541 isp->isp_osinfo.default_port_wwn = wwn; 542 isp->isp_confopts |= ISP_CFG_OWNWWPN; 543 } 544 if (isp->isp_osinfo.default_port_wwn == 0) { 545 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 546 } 547 548 if (getenv_quad("isp_nodewwn", &wwn)) { 549 isp->isp_osinfo.default_node_wwn = wwn; 550 isp->isp_confopts |= ISP_CFG_OWNWWNN; 551 } 552 if (isp->isp_osinfo.default_node_wwn == 0) { 553 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 554 } 555 556 bitmap = 0; 557 (void) getenv_int("isp_fabric_hysteresis", &bitmap); 558 if (bitmap >= 0 && bitmap < 256) { 559 isp->isp_osinfo.hysteresis = bitmap; 560 } else { 561 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 562 } 563 564 bitmap = 0; 565 (void) getenv_int("isp_loop_down_limit", &bitmap); 566 if (bitmap >= 0 && bitmap < 0xffff) { 567 isp->isp_osinfo.loop_down_limit = bitmap; 568 } else { 569 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 570 } 571 572 bitmap = 0; 573 (void) getenv_int("isp_gone_device_time", &bitmap); 574 if (bitmap >= 0 && bitmap < 0xffff) { 575 isp->isp_osinfo.gone_device_time = bitmap; 576 } else { 577 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 578 } 579 #ifdef ISP_FW_CRASH_DUMP 580 bitmap = 0; 581 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 582 if (bitmap & (1 << unit) { 583 size_t amt = 0; 584 if (IS_2200(isp)) { 585 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 586 } else if (IS_23XX(isp)) { 587 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 588 } 589 if (amt) { 590 FCPARAM(isp)->isp_dump_data = 591 malloc(amt, M_DEVBUF, M_WAITOK); 592 memset(FCPARAM(isp)->isp_dump_data, 0, amt); 593 } else { 594 device_printf(dev, 595 "f/w crash dumps not supported for card\n"); 596 } 597 } 598 } 599 #endif 600 } 601 #else 602 static void 603 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 604 { 605 int tval; 606 607 /* 608 * Figure out if we're supposed to skip this one. 609 */ 610 tval = 0; 611 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 612 "disable", &tval) == 0 && tval) { 613 device_printf(dev, "disabled at user request\n"); 614 isp->isp_osinfo.disabled = 1; 615 return; 616 } 617 618 tval = -1; 619 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 620 "role", &tval) == 0 && tval != -1) { 621 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 622 isp->isp_role = tval; 623 device_printf(dev, "setting role to 0x%x\n", isp->isp_role); 624 } else { 625 #ifdef ISP_TARGET_MODE 626 isp->isp_role = ISP_ROLE_TARGET; 627 #else 628 isp->isp_role = ISP_DEFAULT_ROLES; 629 #endif 630 } 631 632 tval = 0; 633 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 634 "fwload_disable", &tval) == 0 && tval != 0) { 635 isp->isp_confopts |= ISP_CFG_NORELOAD; 636 } 637 tval = 0; 638 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 639 "ignore_nvram", &tval) == 0 && tval != 0) { 640 isp->isp_confopts |= ISP_CFG_NONVRAM; 641 } 642 643 tval = 0; 644 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 645 "debug", &tval); 646 if (tval) { 647 isp->isp_dblev = tval; 648 } else { 649 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 650 } 651 if (bootverbose) { 652 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 653 } 654 655 } 656 657 static void 658 isp_get_pci_options(device_t dev, int *m1, int *m2) 659 { 660 int tval; 661 /* 662 * Which we should try first - memory mapping or i/o mapping? 663 * 664 * We used to try memory first followed by i/o on alpha, otherwise 665 * the reverse, but we should just try memory first all the time now. 666 */ 667 *m1 = PCIM_CMD_MEMEN; 668 *m2 = PCIM_CMD_PORTEN; 669 670 tval = 0; 671 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 672 "prefer_iomap", &tval) == 0 && tval != 0) { 673 *m1 = PCIM_CMD_PORTEN; 674 *m2 = PCIM_CMD_MEMEN; 675 } 676 tval = 0; 677 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 678 "prefer_memmap", &tval) == 0 && tval != 0) { 679 *m1 = PCIM_CMD_MEMEN; 680 *m2 = PCIM_CMD_PORTEN; 681 } 682 } 683 684 static void 685 isp_get_specific_options(device_t dev, ispsoftc_t *isp) 686 { 687 const char *sptr; 688 int tval; 689 690 isp->isp_osinfo.default_id = -1; 691 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 692 "iid", &tval) == 0) { 693 isp->isp_osinfo.default_id = tval; 694 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 695 } 696 if (isp->isp_osinfo.default_id == -1) { 697 if (IS_FC(isp)) { 698 isp->isp_osinfo.default_id = 109; 699 } else { 700 isp->isp_osinfo.default_id = 7; 701 } 702 } 703 704 callout_handle_init(&isp->isp_osinfo.ldt); 705 callout_handle_init(&isp->isp_osinfo.gdt); 706 707 if (IS_SCSI(isp)) { 708 return; 709 } 710 711 tval = 0; 712 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 713 "fullduplex", &tval) == 0 && tval != 0) { 714 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 715 } 716 #ifdef ISP_FW_CRASH_DUMP 717 tval = 0; 718 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 719 "fw_dump_enable", &tval) == 0 && tval != 0) { 720 size_t amt = 0; 721 if (IS_2200(isp)) { 722 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 723 } else if (IS_23XX(isp)) { 724 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 725 } 726 if (amt) { 727 FCPARAM(isp)->isp_dump_data = 728 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 729 } else { 730 device_printf(dev, 731 "f/w crash dumps not supported for this model\n"); 732 } 733 } 734 #endif 735 sptr = 0; 736 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 737 "topology", (const char **) &sptr) == 0 && sptr != 0) { 738 if (strcmp(sptr, "lport") == 0) { 739 isp->isp_confopts |= ISP_CFG_LPORT; 740 } else if (strcmp(sptr, "nport") == 0) { 741 isp->isp_confopts |= ISP_CFG_NPORT; 742 } else if (strcmp(sptr, "lport-only") == 0) { 743 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 744 } else if (strcmp(sptr, "nport-only") == 0) { 745 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 746 } 747 } 748 749 /* 750 * Because the resource_*_value functions can neither return 751 * 64 bit integer values, nor can they be directly coerced 752 * to interpret the right hand side of the assignment as 753 * you want them to interpret it, we have to force WWN 754 * hint replacement to specify WWN strings with a leading 755 * 'w' (e..g w50000000aaaa0001). Sigh. 756 */ 757 sptr = 0; 758 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 759 "portwwn", (const char **) &sptr); 760 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 761 char *eptr = 0; 762 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 763 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 764 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 765 isp->isp_osinfo.default_port_wwn = 0; 766 } else { 767 isp->isp_confopts |= ISP_CFG_OWNWWPN; 768 } 769 } 770 if (isp->isp_osinfo.default_port_wwn == 0) { 771 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 772 } 773 774 sptr = 0; 775 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 776 "nodewwn", (const char **) &sptr); 777 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 778 char *eptr = 0; 779 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 780 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 781 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 782 isp->isp_osinfo.default_node_wwn = 0; 783 } else { 784 isp->isp_confopts |= ISP_CFG_OWNWWNN; 785 } 786 } 787 if (isp->isp_osinfo.default_node_wwn == 0) { 788 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 789 } 790 791 792 tval = 0; 793 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 794 "hysteresis", &tval); 795 if (tval >= 0 && tval < 256) { 796 isp->isp_osinfo.hysteresis = tval; 797 } else { 798 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 799 } 800 801 tval = -1; 802 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 803 "loop_down_limit", &tval); 804 if (tval >= 0 && tval < 0xffff) { 805 isp->isp_osinfo.loop_down_limit = tval; 806 } else { 807 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 808 } 809 810 tval = -1; 811 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 812 "gone_device_time", &tval); 813 if (tval >= 0 && tval < 0xffff) { 814 isp->isp_osinfo.gone_device_time = tval; 815 } else { 816 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 817 } 818 } 819 #endif 820 821 static int 822 isp_pci_attach(device_t dev) 823 { 824 struct resource *regs, *irq; 825 int rtp, rgd, iqd, m1, m2; 826 uint32_t data, cmd, linesz, psize, basetype; 827 struct isp_pcisoftc *pcs; 828 ispsoftc_t *isp = NULL; 829 struct ispmdvec *mdvp; 830 #if __FreeBSD_version >= 500000 831 int locksetup = 0; 832 #endif 833 834 pcs = device_get_softc(dev); 835 if (pcs == NULL) { 836 device_printf(dev, "cannot get softc\n"); 837 return (ENOMEM); 838 } 839 memset(pcs, 0, sizeof (*pcs)); 840 pcs->pci_dev = dev; 841 isp = &pcs->pci_isp; 842 843 /* 844 * Get Generic Options 845 */ 846 isp_get_generic_options(dev, isp); 847 848 /* 849 * Check to see if options have us disabled 850 */ 851 if (isp->isp_osinfo.disabled) { 852 /* 853 * But return zero to preserve unit numbering 854 */ 855 return (0); 856 } 857 858 /* 859 * Get PCI options- which in this case are just mapping preferences. 860 */ 861 isp_get_pci_options(dev, &m1, &m2); 862 863 linesz = PCI_DFLT_LNSZ; 864 irq = regs = NULL; 865 rgd = rtp = iqd = 0; 866 867 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 868 if (cmd & m1) { 869 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 870 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 871 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 872 } 873 if (regs == NULL && (cmd & m2)) { 874 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 875 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 876 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 877 } 878 if (regs == NULL) { 879 device_printf(dev, "unable to map any ports\n"); 880 goto bad; 881 } 882 if (bootverbose) { 883 device_printf(dev, "using %s space register mapping\n", 884 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 885 } 886 pcs->pci_dev = dev; 887 pcs->pci_reg = regs; 888 isp->isp_bus_tag = rman_get_bustag(regs); 889 isp->isp_bus_handle = rman_get_bushandle(regs); 890 891 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 892 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 893 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 894 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 895 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 896 mdvp = &mdvec; 897 basetype = ISP_HA_SCSI_UNKNOWN; 898 psize = sizeof (sdparam); 899 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 900 mdvp = &mdvec; 901 basetype = ISP_HA_SCSI_UNKNOWN; 902 psize = sizeof (sdparam); 903 } 904 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 905 mdvp = &mdvec_1080; 906 basetype = ISP_HA_SCSI_1080; 907 psize = sizeof (sdparam); 908 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 909 ISP1080_DMA_REGS_OFF; 910 } 911 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 912 mdvp = &mdvec_1080; 913 basetype = ISP_HA_SCSI_1240; 914 psize = 2 * sizeof (sdparam); 915 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 916 ISP1080_DMA_REGS_OFF; 917 } 918 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 919 mdvp = &mdvec_1080; 920 basetype = ISP_HA_SCSI_1280; 921 psize = 2 * sizeof (sdparam); 922 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 923 ISP1080_DMA_REGS_OFF; 924 } 925 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 926 mdvp = &mdvec_12160; 927 basetype = ISP_HA_SCSI_10160; 928 psize = sizeof (sdparam); 929 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 930 ISP1080_DMA_REGS_OFF; 931 } 932 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 933 mdvp = &mdvec_12160; 934 basetype = ISP_HA_SCSI_12160; 935 psize = 2 * sizeof (sdparam); 936 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 937 ISP1080_DMA_REGS_OFF; 938 } 939 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 940 mdvp = &mdvec_2100; 941 basetype = ISP_HA_FC_2100; 942 psize = sizeof (fcparam); 943 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 944 PCI_MBOX_REGS2100_OFF; 945 if (pci_get_revid(dev) < 3) { 946 /* 947 * XXX: Need to get the actual revision 948 * XXX: number of the 2100 FB. At any rate, 949 * XXX: lower cache line size for early revision 950 * XXX; boards. 951 */ 952 linesz = 1; 953 } 954 } 955 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 956 mdvp = &mdvec_2200; 957 basetype = ISP_HA_FC_2200; 958 psize = sizeof (fcparam); 959 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 960 PCI_MBOX_REGS2100_OFF; 961 } 962 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 963 mdvp = &mdvec_2300; 964 basetype = ISP_HA_FC_2300; 965 psize = sizeof (fcparam); 966 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 967 PCI_MBOX_REGS2300_OFF; 968 } 969 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 970 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 971 mdvp = &mdvec_2300; 972 basetype = ISP_HA_FC_2312; 973 psize = sizeof (fcparam); 974 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 975 PCI_MBOX_REGS2300_OFF; 976 } 977 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || 978 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { 979 mdvp = &mdvec_2300; 980 basetype = ISP_HA_FC_2322; 981 psize = sizeof (fcparam); 982 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 983 PCI_MBOX_REGS2300_OFF; 984 } 985 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422 || 986 pci_get_devid(dev) == PCI_QLOGIC_ISP2432) { 987 mdvp = &mdvec_2400; 988 basetype = ISP_HA_FC_2400; 989 psize = sizeof (fcparam); 990 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 991 PCI_MBOX_REGS2400_OFF; 992 } 993 isp = &pcs->pci_isp; 994 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 995 if (isp->isp_param == NULL) { 996 device_printf(dev, "cannot allocate parameter data\n"); 997 goto bad; 998 } 999 isp->isp_mdvec = mdvp; 1000 isp->isp_type = basetype; 1001 isp->isp_revision = pci_get_revid(dev); 1002 isp->isp_dev = dev; 1003 1004 /* 1005 * Now that we know who we are (roughly) get/set specific options 1006 */ 1007 isp_get_specific_options(dev, isp); 1008 1009 #if __FreeBSD_version >= 700000 1010 /* 1011 * Try and find firmware for this device. 1012 */ 1013 { 1014 char fwname[32]; 1015 unsigned int did = pci_get_device(dev); 1016 1017 /* 1018 * Map a few pci ids to fw names 1019 */ 1020 switch (did) { 1021 case PCI_PRODUCT_QLOGIC_ISP1020: 1022 did = 0x1040; 1023 break; 1024 case PCI_PRODUCT_QLOGIC_ISP1240: 1025 did = 0x1080; 1026 break; 1027 case PCI_PRODUCT_QLOGIC_ISP10160: 1028 case PCI_PRODUCT_QLOGIC_ISP12160: 1029 did = 0x12160; 1030 break; 1031 case PCI_PRODUCT_QLOGIC_ISP6312: 1032 case PCI_PRODUCT_QLOGIC_ISP2312: 1033 did = 0x2300; 1034 break; 1035 case PCI_PRODUCT_QLOGIC_ISP6322: 1036 did = 0x2322; 1037 break; 1038 case PCI_PRODUCT_QLOGIC_ISP2422: 1039 case PCI_PRODUCT_QLOGIC_ISP2432: 1040 did = 0x2400; 1041 break; 1042 default: 1043 break; 1044 } 1045 1046 isp->isp_osinfo.fw = NULL; 1047 if (isp->isp_role & ISP_ROLE_TARGET) { 1048 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 1049 isp->isp_osinfo.fw = firmware_get(fwname); 1050 } 1051 if (isp->isp_osinfo.fw == NULL) { 1052 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 1053 isp->isp_osinfo.fw = firmware_get(fwname); 1054 } 1055 if (isp->isp_osinfo.fw != NULL) { 1056 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 1057 } 1058 } 1059 #else 1060 if (isp_get_firmware_p) { 1061 int device = (int) pci_get_device(dev); 1062 #ifdef ISP_TARGET_MODE 1063 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 1064 #else 1065 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 1066 #endif 1067 } 1068 #endif 1069 1070 /* 1071 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 1072 * are set. 1073 */ 1074 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 1075 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 1076 1077 if (IS_2300(isp)) { /* per QLogic errata */ 1078 cmd &= ~PCIM_CMD_INVEN; 1079 } 1080 1081 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 1082 cmd &= ~PCIM_CMD_INTX_DISABLE; 1083 } 1084 1085 #ifdef WE_KNEW_WHAT_WE_WERE_DOING 1086 if (IS_24XX(isp)) { 1087 int reg; 1088 1089 cmd &= ~PCIM_CMD_INTX_DISABLE; 1090 1091 /* 1092 * Is this a PCI-X card? If so, set max read byte count. 1093 */ 1094 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 1095 uint16_t pxcmd; 1096 reg += 2; 1097 1098 pxcmd = pci_read_config(dev, reg, 2); 1099 pxcmd &= ~0xc; 1100 pxcmd |= 0x8; 1101 pci_write_config(dev, reg, 2, pxcmd); 1102 } 1103 1104 /* 1105 * Is this a PCI Express card? If so, set max read byte count. 1106 */ 1107 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1108 uint16_t pectl; 1109 1110 reg += 0x8; 1111 pectl = pci_read_config(dev, reg, 2); 1112 pectl &= ~0x7000; 1113 pectl |= 0x4000; 1114 pci_write_config(dev, reg, 2, pectl); 1115 } 1116 } 1117 #else 1118 if (IS_24XX(isp)) { 1119 cmd &= ~PCIM_CMD_INTX_DISABLE; 1120 } 1121 #endif 1122 1123 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 1124 1125 /* 1126 * Make sure the Cache Line Size register is set sensibly. 1127 */ 1128 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 1129 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 1130 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d from %d", 1131 linesz, data); 1132 data = linesz; 1133 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 1134 } 1135 1136 /* 1137 * Make sure the Latency Timer is sane. 1138 */ 1139 data = pci_read_config(dev, PCIR_LATTIMER, 1); 1140 if (data < PCI_DFLT_LTNCY) { 1141 data = PCI_DFLT_LTNCY; 1142 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 1143 pci_write_config(dev, PCIR_LATTIMER, data, 1); 1144 } 1145 1146 /* 1147 * Make sure we've disabled the ROM. 1148 */ 1149 data = pci_read_config(dev, PCIR_ROMADDR, 4); 1150 data &= ~1; 1151 pci_write_config(dev, PCIR_ROMADDR, data, 4); 1152 1153 iqd = 0; 1154 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 1155 RF_ACTIVE | RF_SHAREABLE); 1156 if (irq == NULL) { 1157 device_printf(dev, "could not allocate interrupt\n"); 1158 goto bad; 1159 } 1160 1161 #if __FreeBSD_version >= 500000 1162 /* Make sure the lock is set up. */ 1163 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 1164 locksetup++; 1165 #endif 1166 1167 if (isp_setup_intr(dev, irq, ISP_IFLAGS, NULL, isp_pci_intr, isp, 1168 &pcs->ih)) { 1169 device_printf(dev, "could not setup interrupt\n"); 1170 goto bad; 1171 } 1172 1173 /* 1174 * Last minute checks... 1175 */ 1176 if (IS_23XX(isp) || IS_24XX(isp)) { 1177 isp->isp_port = pci_get_function(dev); 1178 } 1179 1180 if (IS_23XX(isp)) { 1181 /* 1182 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 1183 */ 1184 isp->isp_touched = 1; 1185 } 1186 1187 /* 1188 * Make sure we're in reset state. 1189 */ 1190 ISP_LOCK(isp); 1191 isp_reset(isp); 1192 if (isp->isp_state != ISP_RESETSTATE) { 1193 ISP_UNLOCK(isp); 1194 goto bad; 1195 } 1196 isp_init(isp); 1197 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 1198 isp_uninit(isp); 1199 ISP_UNLOCK(isp); 1200 goto bad; 1201 } 1202 isp_attach(isp); 1203 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 1204 isp_uninit(isp); 1205 ISP_UNLOCK(isp); 1206 goto bad; 1207 } 1208 /* 1209 * XXXX: Here is where we might unload the f/w module 1210 * XXXX: (or decrease the reference count to it). 1211 */ 1212 ISP_UNLOCK(isp); 1213 1214 return (0); 1215 1216 bad: 1217 1218 if (pcs && pcs->ih) { 1219 (void) bus_teardown_intr(dev, irq, pcs->ih); 1220 } 1221 1222 #if __FreeBSD_version >= 500000 1223 if (locksetup && isp) { 1224 mtx_destroy(&isp->isp_osinfo.lock); 1225 } 1226 #endif 1227 1228 if (irq) { 1229 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 1230 } 1231 1232 1233 if (regs) { 1234 (void) bus_release_resource(dev, rtp, rgd, regs); 1235 } 1236 1237 if (pcs) { 1238 if (pcs->pci_isp.isp_param) { 1239 #ifdef ISP_FW_CRASH_DUMP 1240 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { 1241 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); 1242 } 1243 #endif 1244 free(pcs->pci_isp.isp_param, M_DEVBUF); 1245 } 1246 } 1247 1248 /* 1249 * XXXX: Here is where we might unload the f/w module 1250 * XXXX: (or decrease the reference count to it). 1251 */ 1252 return (ENXIO); 1253 } 1254 1255 static int 1256 isp_pci_detach(device_t dev) 1257 { 1258 struct isp_pcisoftc *pcs; 1259 ispsoftc_t *isp; 1260 1261 pcs = device_get_softc(dev); 1262 if (pcs == NULL) { 1263 return (ENXIO); 1264 } 1265 isp = (ispsoftc_t *) pcs; 1266 ISP_DISABLE_INTS(isp); 1267 return (0); 1268 } 1269 1270 static void 1271 isp_pci_intr(void *arg) 1272 { 1273 ispsoftc_t *isp = arg; 1274 uint32_t isr; 1275 uint16_t sema, mbox; 1276 1277 ISP_LOCK(isp); 1278 isp->isp_intcnt++; 1279 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 1280 isp->isp_intbogus++; 1281 } else { 1282 isp_intr(isp, isr, sema, mbox); 1283 } 1284 ISP_UNLOCK(isp); 1285 } 1286 1287 1288 #define IspVirt2Off(a, x) \ 1289 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1290 _BLK_REG_SHFT] + ((x) & 0xfff)) 1291 1292 #define BXR2(isp, off) \ 1293 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) 1294 #define BXW2(isp, off, v) \ 1295 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1296 #define BXR4(isp, off) \ 1297 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) 1298 #define BXW4(isp, off, v) \ 1299 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1300 1301 1302 static __inline int 1303 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1304 { 1305 uint32_t val0, val1; 1306 int i = 0; 1307 1308 do { 1309 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1310 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1311 } while (val0 != val1 && ++i < 1000); 1312 if (val0 != val1) { 1313 return (1); 1314 } 1315 *rp = val0; 1316 return (0); 1317 } 1318 1319 static int 1320 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, 1321 uint16_t *semap, uint16_t *mbp) 1322 { 1323 uint16_t isr, sema; 1324 1325 if (IS_2100(isp)) { 1326 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1327 return (0); 1328 } 1329 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1330 return (0); 1331 } 1332 } else { 1333 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1334 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1335 } 1336 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1337 isr &= INT_PENDING_MASK(isp); 1338 sema &= BIU_SEMA_LOCK; 1339 if (isr == 0 && sema == 0) { 1340 return (0); 1341 } 1342 *isrp = isr; 1343 if ((*semap = sema) != 0) { 1344 if (IS_2100(isp)) { 1345 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1346 return (0); 1347 } 1348 } else { 1349 *mbp = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1350 } 1351 } 1352 return (1); 1353 } 1354 1355 static int 1356 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, 1357 uint16_t *semap, uint16_t *mbox0p) 1358 { 1359 uint32_t hccr; 1360 uint32_t r2hisr; 1361 1362 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1363 *isrp = 0; 1364 return (0); 1365 } 1366 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1367 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1368 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1369 *isrp = 0; 1370 return (0); 1371 } 1372 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1373 case ISPR2HST_ROM_MBX_OK: 1374 case ISPR2HST_ROM_MBX_FAIL: 1375 case ISPR2HST_MBX_OK: 1376 case ISPR2HST_MBX_FAIL: 1377 case ISPR2HST_ASYNC_EVENT: 1378 *isrp = r2hisr & 0xffff; 1379 *mbox0p = (r2hisr >> 16); 1380 *semap = 1; 1381 return (1); 1382 case ISPR2HST_RIO_16: 1383 *isrp = r2hisr & 0xffff; 1384 *mbox0p = ASYNC_RIO1; 1385 *semap = 1; 1386 return (1); 1387 case ISPR2HST_FPOST: 1388 *isrp = r2hisr & 0xffff; 1389 *mbox0p = ASYNC_CMD_CMPLT; 1390 *semap = 1; 1391 return (1); 1392 case ISPR2HST_FPOST_CTIO: 1393 *isrp = r2hisr & 0xffff; 1394 *mbox0p = ASYNC_CTIO_DONE; 1395 *semap = 1; 1396 return (1); 1397 case ISPR2HST_RSPQ_UPDATE: 1398 *isrp = r2hisr & 0xffff; 1399 *mbox0p = 0; 1400 *semap = 0; 1401 return (1); 1402 default: 1403 hccr = ISP_READ(isp, HCCR); 1404 if (hccr & HCCR_PAUSE) { 1405 ISP_WRITE(isp, HCCR, HCCR_RESET); 1406 isp_prt(isp, ISP_LOGERR, 1407 "RISC paused at interrupt (%x->%x)", hccr, 1408 ISP_READ(isp, HCCR)); 1409 ISP_WRITE(isp, BIU_ICR, 0); 1410 } else { 1411 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", 1412 r2hisr); 1413 } 1414 return (0); 1415 } 1416 } 1417 1418 static int 1419 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, 1420 uint16_t *semap, uint16_t *mbox0p) 1421 { 1422 uint32_t r2hisr; 1423 1424 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1425 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1426 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1427 *isrp = 0; 1428 return (0); 1429 } 1430 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1431 case ISP2400R2HST_ROM_MBX_OK: 1432 case ISP2400R2HST_ROM_MBX_FAIL: 1433 case ISP2400R2HST_MBX_OK: 1434 case ISP2400R2HST_MBX_FAIL: 1435 case ISP2400R2HST_ASYNC_EVENT: 1436 *isrp = r2hisr & 0xffff; 1437 *mbox0p = (r2hisr >> 16); 1438 *semap = 1; 1439 return (1); 1440 case ISP2400R2HST_RSPQ_UPDATE: 1441 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1442 case ISP2400R2HST_ATIO_RQST_UPDATE: 1443 *isrp = r2hisr & 0xffff; 1444 *mbox0p = 0; 1445 *semap = 0; 1446 return (1); 1447 default: 1448 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1449 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1450 return (0); 1451 } 1452 } 1453 1454 static uint32_t 1455 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1456 { 1457 uint16_t rv; 1458 int oldconf = 0; 1459 1460 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1461 /* 1462 * We will assume that someone has paused the RISC processor. 1463 */ 1464 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1465 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1466 oldconf | BIU_PCI_CONF1_SXP); 1467 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1468 } 1469 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1470 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1471 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1472 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1473 } 1474 return (rv); 1475 } 1476 1477 static void 1478 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1479 { 1480 int oldconf = 0; 1481 1482 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1483 /* 1484 * We will assume that someone has paused the RISC processor. 1485 */ 1486 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1487 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1488 oldconf | BIU_PCI_CONF1_SXP); 1489 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1490 } 1491 BXW2(isp, IspVirt2Off(isp, regoff), val); 1492 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); 1493 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1494 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1495 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1496 } 1497 1498 } 1499 1500 static uint32_t 1501 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1502 { 1503 uint32_t rv, oc = 0; 1504 1505 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1506 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1507 uint32_t tc; 1508 /* 1509 * We will assume that someone has paused the RISC processor. 1510 */ 1511 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1512 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1513 if (regoff & SXP_BANK1_SELECT) 1514 tc |= BIU_PCI1080_CONF1_SXP1; 1515 else 1516 tc |= BIU_PCI1080_CONF1_SXP0; 1517 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1518 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1519 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1520 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1521 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1522 oc | BIU_PCI1080_CONF1_DMA); 1523 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1524 } 1525 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1526 if (oc) { 1527 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1528 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1529 } 1530 return (rv); 1531 } 1532 1533 static void 1534 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1535 { 1536 int oc = 0; 1537 1538 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1539 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1540 uint32_t tc; 1541 /* 1542 * We will assume that someone has paused the RISC processor. 1543 */ 1544 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1545 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1546 if (regoff & SXP_BANK1_SELECT) 1547 tc |= BIU_PCI1080_CONF1_SXP1; 1548 else 1549 tc |= BIU_PCI1080_CONF1_SXP0; 1550 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1551 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1552 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1553 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1554 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1555 oc | BIU_PCI1080_CONF1_DMA); 1556 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1557 } 1558 BXW2(isp, IspVirt2Off(isp, regoff), val); 1559 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); 1560 if (oc) { 1561 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1562 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2); 1563 } 1564 } 1565 1566 static uint32_t 1567 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1568 { 1569 uint32_t rv; 1570 int block = regoff & _BLK_REG_MASK; 1571 1572 switch (block) { 1573 case BIU_BLOCK: 1574 break; 1575 case MBOX_BLOCK: 1576 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1577 case SXP_BLOCK: 1578 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1579 return (0xffffffff); 1580 case RISC_BLOCK: 1581 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1582 return (0xffffffff); 1583 case DMA_BLOCK: 1584 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1585 return (0xffffffff); 1586 default: 1587 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1588 return (0xffffffff); 1589 } 1590 1591 1592 switch (regoff) { 1593 case BIU2400_FLASH_ADDR: 1594 case BIU2400_FLASH_DATA: 1595 case BIU2400_ICR: 1596 case BIU2400_ISR: 1597 case BIU2400_CSR: 1598 case BIU2400_REQINP: 1599 case BIU2400_REQOUTP: 1600 case BIU2400_RSPINP: 1601 case BIU2400_RSPOUTP: 1602 case BIU2400_PRI_RQINP: 1603 case BIU2400_PRI_RSPINP: 1604 case BIU2400_ATIO_RSPINP: 1605 case BIU2400_ATIO_REQINP: 1606 case BIU2400_HCCR: 1607 case BIU2400_GPIOD: 1608 case BIU2400_GPIOE: 1609 case BIU2400_HSEMA: 1610 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1611 break; 1612 case BIU2400_R2HSTSLO: 1613 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1614 break; 1615 case BIU2400_R2HSTSHI: 1616 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1617 break; 1618 default: 1619 isp_prt(isp, ISP_LOGERR, 1620 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1621 rv = 0xffffffff; 1622 break; 1623 } 1624 return (rv); 1625 } 1626 1627 static void 1628 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1629 { 1630 int block = regoff & _BLK_REG_MASK; 1631 1632 switch (block) { 1633 case BIU_BLOCK: 1634 break; 1635 case MBOX_BLOCK: 1636 BXW2(isp, IspVirt2Off(isp, regoff), val); 1637 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2); 1638 return; 1639 case SXP_BLOCK: 1640 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1641 return; 1642 case RISC_BLOCK: 1643 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1644 return; 1645 case DMA_BLOCK: 1646 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1647 return; 1648 default: 1649 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1650 regoff); 1651 break; 1652 } 1653 1654 switch (regoff) { 1655 case BIU2400_FLASH_ADDR: 1656 case BIU2400_FLASH_DATA: 1657 case BIU2400_ICR: 1658 case BIU2400_ISR: 1659 case BIU2400_CSR: 1660 case BIU2400_REQINP: 1661 case BIU2400_REQOUTP: 1662 case BIU2400_RSPINP: 1663 case BIU2400_RSPOUTP: 1664 case BIU2400_PRI_RQINP: 1665 case BIU2400_PRI_RSPINP: 1666 case BIU2400_ATIO_RSPINP: 1667 case BIU2400_ATIO_REQINP: 1668 case BIU2400_HCCR: 1669 case BIU2400_GPIOD: 1670 case BIU2400_GPIOE: 1671 case BIU2400_HSEMA: 1672 BXW4(isp, IspVirt2Off(isp, regoff), val); 1673 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4); 1674 break; 1675 default: 1676 isp_prt(isp, ISP_LOGERR, 1677 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1678 break; 1679 } 1680 } 1681 1682 1683 struct imush { 1684 ispsoftc_t *isp; 1685 int error; 1686 }; 1687 1688 static void imc(void *, bus_dma_segment_t *, int, int); 1689 1690 static void 1691 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1692 { 1693 struct imush *imushp = (struct imush *) arg; 1694 if (error) { 1695 imushp->error = error; 1696 } else { 1697 ispsoftc_t *isp =imushp->isp; 1698 bus_addr_t addr = segs->ds_addr; 1699 1700 isp->isp_rquest_dma = addr; 1701 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1702 isp->isp_result_dma = addr; 1703 if (IS_FC(isp)) { 1704 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1705 FCPARAM(isp)->isp_scdma = addr; 1706 } 1707 } 1708 } 1709 1710 static int 1711 isp_pci_mbxdma(ispsoftc_t *isp) 1712 { 1713 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1714 caddr_t base; 1715 uint32_t len; 1716 int i, error, ns; 1717 bus_size_t slim; /* segment size */ 1718 bus_addr_t llim; /* low limit of unavailable dma */ 1719 bus_addr_t hlim; /* high limit of unavailable dma */ 1720 struct imush im; 1721 1722 /* 1723 * Already been here? If so, leave... 1724 */ 1725 if (isp->isp_rquest) { 1726 return (0); 1727 } 1728 1729 if (isp->isp_maxcmds == 0) { 1730 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1731 return (1); 1732 } 1733 1734 hlim = BUS_SPACE_MAXADDR; 1735 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1736 slim = (bus_size_t) (1ULL << 32); 1737 llim = BUS_SPACE_MAXADDR; 1738 } else { 1739 llim = BUS_SPACE_MAXADDR_32BIT; 1740 slim = (1 << 24); 1741 } 1742 1743 /* 1744 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1745 */ 1746 #ifdef ISP_TARGET_MODE 1747 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1748 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1749 return (1); 1750 } 1751 #endif 1752 1753 ISP_UNLOCK(isp); 1754 if (isp_dma_tag_create(BUS_DMA_ROOTARG(pcs->pci_dev), 1, slim, llim, 1755 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1756 &pcs->dmat)) { 1757 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1758 ISP_LOCK(isp); 1759 return (1); 1760 } 1761 1762 1763 len = sizeof (XS_T **) * isp->isp_maxcmds; 1764 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1765 if (isp->isp_xflist == NULL) { 1766 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1767 ISP_LOCK(isp); 1768 return (1); 1769 } 1770 #ifdef ISP_TARGET_MODE 1771 len = sizeof (void **) * isp->isp_maxcmds; 1772 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1773 if (isp->isp_tgtlist == NULL) { 1774 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1775 ISP_LOCK(isp); 1776 return (1); 1777 } 1778 #endif 1779 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1780 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1781 if (pcs->dmaps == NULL) { 1782 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1783 free(isp->isp_xflist, M_DEVBUF); 1784 #ifdef ISP_TARGET_MODE 1785 free(isp->isp_tgtlist, M_DEVBUF); 1786 #endif 1787 ISP_LOCK(isp); 1788 return (1); 1789 } 1790 1791 /* 1792 * Allocate and map the request, result queues, plus FC scratch area. 1793 */ 1794 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1795 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1796 if (IS_FC(isp)) { 1797 len += ISP2100_SCRLEN; 1798 } 1799 1800 ns = (len / PAGE_SIZE) + 1; 1801 /* 1802 * Create a tag for the control spaces- force it to within 32 bits. 1803 */ 1804 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim, 1805 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1806 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1807 isp_prt(isp, ISP_LOGERR, 1808 "cannot create a dma tag for control spaces"); 1809 free(pcs->dmaps, M_DEVBUF); 1810 free(isp->isp_xflist, M_DEVBUF); 1811 #ifdef ISP_TARGET_MODE 1812 free(isp->isp_tgtlist, M_DEVBUF); 1813 #endif 1814 ISP_LOCK(isp); 1815 return (1); 1816 } 1817 1818 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1819 &isp->isp_cdmap) != 0) { 1820 isp_prt(isp, ISP_LOGERR, 1821 "cannot allocate %d bytes of CCB memory", len); 1822 bus_dma_tag_destroy(isp->isp_cdmat); 1823 free(isp->isp_xflist, M_DEVBUF); 1824 #ifdef ISP_TARGET_MODE 1825 free(isp->isp_tgtlist, M_DEVBUF); 1826 #endif 1827 free(pcs->dmaps, M_DEVBUF); 1828 ISP_LOCK(isp); 1829 return (1); 1830 } 1831 1832 for (i = 0; i < isp->isp_maxcmds; i++) { 1833 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1834 if (error) { 1835 isp_prt(isp, ISP_LOGERR, 1836 "error %d creating per-cmd DMA maps", error); 1837 while (--i >= 0) { 1838 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1839 } 1840 goto bad; 1841 } 1842 } 1843 1844 im.isp = isp; 1845 im.error = 0; 1846 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1847 if (im.error) { 1848 isp_prt(isp, ISP_LOGERR, 1849 "error %d loading dma map for control areas", im.error); 1850 goto bad; 1851 } 1852 1853 isp->isp_rquest = base; 1854 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1855 isp->isp_result = base; 1856 if (IS_FC(isp)) { 1857 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1858 FCPARAM(isp)->isp_scratch = base; 1859 } 1860 ISP_LOCK(isp); 1861 return (0); 1862 1863 bad: 1864 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1865 bus_dma_tag_destroy(isp->isp_cdmat); 1866 free(isp->isp_xflist, M_DEVBUF); 1867 #ifdef ISP_TARGET_MODE 1868 free(isp->isp_tgtlist, M_DEVBUF); 1869 #endif 1870 free(pcs->dmaps, M_DEVBUF); 1871 ISP_LOCK(isp); 1872 isp->isp_rquest = NULL; 1873 return (1); 1874 } 1875 1876 typedef struct { 1877 ispsoftc_t *isp; 1878 void *cmd_token; 1879 void *rq; 1880 uint32_t *nxtip; 1881 uint32_t optr; 1882 int error; 1883 } mush_t; 1884 1885 #define MUSHERR_NOQENTRIES -2 1886 1887 #ifdef ISP_TARGET_MODE 1888 /* 1889 * We need to handle DMA for target mode differently from initiator mode. 1890 * 1891 * DMA mapping and construction and submission of CTIO Request Entries 1892 * and rendevous for completion are very tightly coupled because we start 1893 * out by knowing (per platform) how much data we have to move, but we 1894 * don't know, up front, how many DMA mapping segments will have to be used 1895 * cover that data, so we don't know how many CTIO Request Entries we 1896 * will end up using. Further, for performance reasons we may want to 1897 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1898 * 1899 * The standard vector still goes through isp_pci_dmasetup, but the callback 1900 * for the DMA mapping routines comes here instead with the whole transfer 1901 * mapped and a pointer to a partially filled in already allocated request 1902 * queue entry. We finish the job. 1903 */ 1904 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1905 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1906 1907 #define STATUS_WITH_DATA 1 1908 1909 static void 1910 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1911 { 1912 mush_t *mp; 1913 struct ccb_scsiio *csio; 1914 ispsoftc_t *isp; 1915 struct isp_pcisoftc *pcs; 1916 bus_dmamap_t *dp; 1917 ct_entry_t *cto, *qe; 1918 uint8_t scsi_status; 1919 uint32_t curi, nxti, handle; 1920 uint32_t sflags; 1921 int32_t resid; 1922 int nth_ctio, nctios, send_status; 1923 1924 mp = (mush_t *) arg; 1925 if (error) { 1926 mp->error = error; 1927 return; 1928 } 1929 1930 isp = mp->isp; 1931 csio = mp->cmd_token; 1932 cto = mp->rq; 1933 curi = isp->isp_reqidx; 1934 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1935 1936 cto->ct_xfrlen = 0; 1937 cto->ct_seg_count = 0; 1938 cto->ct_header.rqs_entry_count = 1; 1939 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1940 1941 if (nseg == 0) { 1942 cto->ct_header.rqs_seqno = 1; 1943 isp_prt(isp, ISP_LOGTDEBUG1, 1944 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1945 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1946 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1947 cto->ct_scsi_status, cto->ct_resid); 1948 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1949 isp_put_ctio(isp, cto, qe); 1950 return; 1951 } 1952 1953 nctios = nseg / ISP_RQDSEG; 1954 if (nseg % ISP_RQDSEG) { 1955 nctios++; 1956 } 1957 1958 /* 1959 * Save syshandle, and potentially any SCSI status, which we'll 1960 * reinsert on the last CTIO we're going to send. 1961 */ 1962 1963 handle = cto->ct_syshandle; 1964 cto->ct_syshandle = 0; 1965 cto->ct_header.rqs_seqno = 0; 1966 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1967 1968 if (send_status) { 1969 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1970 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1971 /* 1972 * Preserve residual. 1973 */ 1974 resid = cto->ct_resid; 1975 1976 /* 1977 * Save actual SCSI status. 1978 */ 1979 scsi_status = cto->ct_scsi_status; 1980 1981 #ifndef STATUS_WITH_DATA 1982 sflags |= CT_NO_DATA; 1983 /* 1984 * We can't do a status at the same time as a data CTIO, so 1985 * we need to synthesize an extra CTIO at this level. 1986 */ 1987 nctios++; 1988 #endif 1989 } else { 1990 sflags = scsi_status = resid = 0; 1991 } 1992 1993 cto->ct_resid = 0; 1994 cto->ct_scsi_status = 0; 1995 1996 pcs = (struct isp_pcisoftc *)isp; 1997 dp = &pcs->dmaps[isp_handle_index(handle & ISP_HANDLE_MASK)]; 1998 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1999 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2000 } else { 2001 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2002 } 2003 2004 nxti = *mp->nxtip; 2005 2006 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 2007 int seglim; 2008 2009 seglim = nseg; 2010 if (seglim) { 2011 int seg; 2012 2013 if (seglim > ISP_RQDSEG) 2014 seglim = ISP_RQDSEG; 2015 2016 for (seg = 0; seg < seglim; seg++, nseg--) { 2017 /* 2018 * Unlike normal initiator commands, we don't 2019 * do any swizzling here. 2020 */ 2021 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 2022 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 2023 cto->ct_xfrlen += dm_segs->ds_len; 2024 dm_segs++; 2025 } 2026 cto->ct_seg_count = seg; 2027 } else { 2028 /* 2029 * This case should only happen when we're sending an 2030 * extra CTIO with final status. 2031 */ 2032 if (send_status == 0) { 2033 isp_prt(isp, ISP_LOGWARN, 2034 "tdma_mk ran out of segments"); 2035 mp->error = EINVAL; 2036 return; 2037 } 2038 } 2039 2040 /* 2041 * At this point, the fields ct_lun, ct_iid, ct_tagval, 2042 * ct_tagtype, and ct_timeout have been carried over 2043 * unchanged from what our caller had set. 2044 * 2045 * The dataseg fields and the seg_count fields we just got 2046 * through setting. The data direction we've preserved all 2047 * along and only clear it if we're now sending status. 2048 */ 2049 2050 if (nth_ctio == nctios - 1) { 2051 /* 2052 * We're the last in a sequence of CTIOs, so mark 2053 * this CTIO and save the handle to the CCB such that 2054 * when this CTIO completes we can free dma resources 2055 * and do whatever else we need to do to finish the 2056 * rest of the command. We *don't* give this to the 2057 * firmware to work on- the caller will do that. 2058 */ 2059 2060 cto->ct_syshandle = handle; 2061 cto->ct_header.rqs_seqno = 1; 2062 2063 if (send_status) { 2064 cto->ct_scsi_status = scsi_status; 2065 cto->ct_flags |= sflags; 2066 cto->ct_resid = resid; 2067 } 2068 if (send_status) { 2069 isp_prt(isp, ISP_LOGTDEBUG1, 2070 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 2071 "scsi status %x resid %d", 2072 cto->ct_fwhandle, csio->ccb_h.target_lun, 2073 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 2074 cto->ct_scsi_status, cto->ct_resid); 2075 } else { 2076 isp_prt(isp, ISP_LOGTDEBUG1, 2077 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 2078 cto->ct_fwhandle, csio->ccb_h.target_lun, 2079 cto->ct_iid, cto->ct_tag_val, 2080 cto->ct_flags); 2081 } 2082 isp_put_ctio(isp, cto, qe); 2083 ISP_TDQE(isp, "last tdma_mk", curi, cto); 2084 if (nctios > 1) { 2085 MEMORYBARRIER(isp, SYNC_REQUEST, 2086 curi, QENTRY_LEN); 2087 } 2088 } else { 2089 ct_entry_t *oqe = qe; 2090 2091 /* 2092 * Make sure syshandle fields are clean 2093 */ 2094 cto->ct_syshandle = 0; 2095 cto->ct_header.rqs_seqno = 0; 2096 2097 isp_prt(isp, ISP_LOGTDEBUG1, 2098 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 2099 cto->ct_fwhandle, csio->ccb_h.target_lun, 2100 cto->ct_iid, cto->ct_flags); 2101 2102 /* 2103 * Get a new CTIO 2104 */ 2105 qe = (ct_entry_t *) 2106 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2107 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 2108 if (nxti == mp->optr) { 2109 isp_prt(isp, ISP_LOGTDEBUG0, 2110 "Queue Overflow in tdma_mk"); 2111 mp->error = MUSHERR_NOQENTRIES; 2112 return; 2113 } 2114 2115 /* 2116 * Now that we're done with the old CTIO, 2117 * flush it out to the request queue. 2118 */ 2119 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 2120 isp_put_ctio(isp, cto, oqe); 2121 if (nth_ctio != 0) { 2122 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 2123 QENTRY_LEN); 2124 } 2125 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 2126 2127 /* 2128 * Reset some fields in the CTIO so we can reuse 2129 * for the next one we'll flush to the request 2130 * queue. 2131 */ 2132 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 2133 cto->ct_header.rqs_entry_count = 1; 2134 cto->ct_header.rqs_flags = 0; 2135 cto->ct_status = 0; 2136 cto->ct_scsi_status = 0; 2137 cto->ct_xfrlen = 0; 2138 cto->ct_resid = 0; 2139 cto->ct_seg_count = 0; 2140 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 2141 } 2142 } 2143 *mp->nxtip = nxti; 2144 } 2145 2146 /* 2147 * We don't have to do multiple CTIOs here. Instead, we can just do 2148 * continuation segments as needed. This greatly simplifies the code 2149 * improves performance. 2150 */ 2151 2152 static void 2153 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2154 { 2155 mush_t *mp; 2156 struct ccb_scsiio *csio; 2157 ispsoftc_t *isp; 2158 ct2_entry_t *cto, *qe; 2159 uint32_t curi, nxti; 2160 ispds_t *ds; 2161 ispds64_t *ds64; 2162 int segcnt, seglim; 2163 2164 mp = (mush_t *) arg; 2165 if (error) { 2166 mp->error = error; 2167 return; 2168 } 2169 2170 isp = mp->isp; 2171 csio = mp->cmd_token; 2172 cto = mp->rq; 2173 2174 curi = isp->isp_reqidx; 2175 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 2176 2177 if (nseg == 0) { 2178 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 2179 isp_prt(isp, ISP_LOGWARN, 2180 "dma2_tgt_fc, a status CTIO2 without MODE1 " 2181 "set (0x%x)", cto->ct_flags); 2182 mp->error = EINVAL; 2183 return; 2184 } 2185 /* 2186 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 2187 * flags to NO DATA and clear relative offset flags. 2188 * We preserve the ct_resid and the response area. 2189 */ 2190 cto->ct_header.rqs_seqno = 1; 2191 cto->ct_seg_count = 0; 2192 cto->ct_reloff = 0; 2193 isp_prt(isp, ISP_LOGTDEBUG1, 2194 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 2195 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 2196 cto->ct_iid, cto->ct_flags, cto->ct_status, 2197 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 2198 if (FCPARAM(isp)->isp_2klogin) { 2199 isp_put_ctio2e(isp, 2200 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2201 } else { 2202 isp_put_ctio2(isp, cto, qe); 2203 } 2204 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 2205 return; 2206 } 2207 2208 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 2209 isp_prt(isp, ISP_LOGERR, 2210 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 2211 "(0x%x)", cto->ct_flags); 2212 mp->error = EINVAL; 2213 return; 2214 } 2215 2216 2217 nxti = *mp->nxtip; 2218 2219 /* 2220 * Check to see if we need to DAC addressing or not. 2221 * 2222 * Any address that's over the 4GB boundary causes this 2223 * to happen. 2224 */ 2225 segcnt = nseg; 2226 if (sizeof (bus_addr_t) > 4) { 2227 for (segcnt = 0; segcnt < nseg; segcnt++) { 2228 uint64_t addr = dm_segs[segcnt].ds_addr; 2229 if (addr >= 0x100000000LL) { 2230 break; 2231 } 2232 } 2233 } 2234 if (segcnt != nseg) { 2235 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 2236 seglim = ISP_RQDSEG_T3; 2237 ds64 = &cto->rsp.m0.u.ct_dataseg64[0]; 2238 ds = NULL; 2239 } else { 2240 seglim = ISP_RQDSEG_T2; 2241 ds64 = NULL; 2242 ds = &cto->rsp.m0.u.ct_dataseg[0]; 2243 } 2244 cto->ct_seg_count = 0; 2245 2246 /* 2247 * Set up the CTIO2 data segments. 2248 */ 2249 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; 2250 cto->ct_seg_count++, segcnt++) { 2251 if (ds64) { 2252 ds64->ds_basehi = 2253 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2254 ds64->ds_base = dm_segs[segcnt].ds_addr; 2255 ds64->ds_count = dm_segs[segcnt].ds_len; 2256 ds64++; 2257 } else { 2258 ds->ds_base = dm_segs[segcnt].ds_addr; 2259 ds->ds_count = dm_segs[segcnt].ds_len; 2260 ds++; 2261 } 2262 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2263 #if __FreeBSD_version < 500000 2264 isp_prt(isp, ISP_LOGTDEBUG1, 2265 "isp_send_ctio2: ent0[%d]0x%llx:%llu", 2266 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, 2267 (uint64_t)dm_segs[segcnt].ds_len); 2268 #else 2269 isp_prt(isp, ISP_LOGTDEBUG1, 2270 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 2271 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 2272 (uintmax_t)dm_segs[segcnt].ds_len); 2273 #endif 2274 } 2275 2276 while (segcnt < nseg) { 2277 uint32_t curip; 2278 int seg; 2279 ispcontreq_t local, *crq = &local, *qep; 2280 2281 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2282 curip = nxti; 2283 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 2284 if (nxti == mp->optr) { 2285 ISP_UNLOCK(isp); 2286 isp_prt(isp, ISP_LOGTDEBUG0, 2287 "tdma_mkfc: request queue overflow"); 2288 mp->error = MUSHERR_NOQENTRIES; 2289 return; 2290 } 2291 cto->ct_header.rqs_entry_count++; 2292 MEMZERO((void *)crq, sizeof (*crq)); 2293 crq->req_header.rqs_entry_count = 1; 2294 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { 2295 seglim = ISP_CDSEG64; 2296 ds = NULL; 2297 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; 2298 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2299 } else { 2300 seglim = ISP_CDSEG; 2301 ds = &crq->req_dataseg[0]; 2302 ds64 = NULL; 2303 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2304 } 2305 for (seg = 0; segcnt < nseg && seg < seglim; 2306 segcnt++, seg++) { 2307 if (ds64) { 2308 ds64->ds_basehi = 2309 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2310 ds64->ds_base = dm_segs[segcnt].ds_addr; 2311 ds64->ds_count = dm_segs[segcnt].ds_len; 2312 ds64++; 2313 } else { 2314 ds->ds_base = dm_segs[segcnt].ds_addr; 2315 ds->ds_count = dm_segs[segcnt].ds_len; 2316 ds++; 2317 } 2318 #if __FreeBSD_version < 500000 2319 isp_prt(isp, ISP_LOGTDEBUG1, 2320 "isp_send_ctio2: ent%d[%d]%llx:%llu", 2321 cto->ct_header.rqs_entry_count-1, seg, 2322 (uint64_t)dm_segs[segcnt].ds_addr, 2323 (uint64_t)dm_segs[segcnt].ds_len); 2324 #else 2325 isp_prt(isp, ISP_LOGTDEBUG1, 2326 "isp_send_ctio2: ent%d[%d]%jx:%ju", 2327 cto->ct_header.rqs_entry_count-1, seg, 2328 (uintmax_t)dm_segs[segcnt].ds_addr, 2329 (uintmax_t)dm_segs[segcnt].ds_len); 2330 #endif 2331 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2332 cto->ct_seg_count++; 2333 } 2334 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 2335 isp_put_cont_req(isp, crq, qep); 2336 ISP_TDQE(isp, "cont entry", curi, qep); 2337 } 2338 2339 /* 2340 * No do final twiddling for the CTIO itself. 2341 */ 2342 cto->ct_header.rqs_seqno = 1; 2343 isp_prt(isp, ISP_LOGTDEBUG1, 2344 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 2345 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 2346 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 2347 cto->ct_resid); 2348 if (FCPARAM(isp)->isp_2klogin) { 2349 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2350 } else { 2351 isp_put_ctio2(isp, cto, qe); 2352 } 2353 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 2354 *mp->nxtip = nxti; 2355 } 2356 #endif 2357 2358 static void dma_2400(void *, bus_dma_segment_t *, int, int); 2359 static void dma2_a64(void *, bus_dma_segment_t *, int, int); 2360 static void dma2(void *, bus_dma_segment_t *, int, int); 2361 2362 static void 2363 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2364 { 2365 mush_t *mp; 2366 ispsoftc_t *isp; 2367 struct ccb_scsiio *csio; 2368 struct isp_pcisoftc *pcs; 2369 bus_dmamap_t *dp; 2370 bus_dma_segment_t *eseg; 2371 ispreqt7_t *rq; 2372 int seglim, datalen; 2373 uint32_t nxti; 2374 2375 mp = (mush_t *) arg; 2376 if (error) { 2377 mp->error = error; 2378 return; 2379 } 2380 2381 if (nseg < 1) { 2382 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2383 mp->error = EFAULT; 2384 return; 2385 } 2386 2387 csio = mp->cmd_token; 2388 isp = mp->isp; 2389 rq = mp->rq; 2390 pcs = (struct isp_pcisoftc *)mp->isp; 2391 dp = &pcs->dmaps[isp_handle_index(rq->req_handle & ISP_HANDLE_MASK)]; 2392 nxti = *mp->nxtip; 2393 2394 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2395 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2396 } else { 2397 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2398 } 2399 datalen = XS_XFRLEN(csio); 2400 2401 /* 2402 * We're passed an initial partially filled in entry that 2403 * has most fields filled in except for data transfer 2404 * related values. 2405 * 2406 * Our job is to fill in the initial request queue entry and 2407 * then to start allocating and filling in continuation entries 2408 * until we've covered the entire transfer. 2409 */ 2410 2411 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS; 2412 rq->req_dl = datalen; 2413 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2414 rq->req_alen_datadir = 0x2; 2415 } else { 2416 rq->req_alen_datadir = 0x1; 2417 } 2418 2419 eseg = dm_segs + nseg; 2420 2421 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr); 2422 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr); 2423 rq->req_dataseg.ds_count = dm_segs->ds_len; 2424 2425 datalen -= dm_segs->ds_len; 2426 2427 dm_segs++; 2428 rq->req_seg_count++; 2429 2430 while (datalen > 0 && dm_segs != eseg) { 2431 uint32_t onxti; 2432 ispcontreq64_t local, *crq = &local, *cqe; 2433 2434 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2435 onxti = nxti; 2436 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2437 if (nxti == mp->optr) { 2438 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2439 mp->error = MUSHERR_NOQENTRIES; 2440 return; 2441 } 2442 rq->req_header.rqs_entry_count++; 2443 MEMZERO((void *)crq, sizeof (*crq)); 2444 crq->req_header.rqs_entry_count = 1; 2445 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2446 2447 seglim = 0; 2448 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2449 crq->req_dataseg[seglim].ds_base = 2450 DMA_LO32(dm_segs->ds_addr); 2451 crq->req_dataseg[seglim].ds_basehi = 2452 DMA_HI32(dm_segs->ds_addr); 2453 crq->req_dataseg[seglim].ds_count = 2454 dm_segs->ds_len; 2455 rq->req_seg_count++; 2456 dm_segs++; 2457 seglim++; 2458 datalen -= dm_segs->ds_len; 2459 } 2460 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2461 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2462 } 2463 isp_put_cont64_req(isp, crq, cqe); 2464 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2465 } 2466 *mp->nxtip = nxti; 2467 } 2468 2469 static void 2470 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2471 { 2472 mush_t *mp; 2473 ispsoftc_t *isp; 2474 struct ccb_scsiio *csio; 2475 struct isp_pcisoftc *pcs; 2476 bus_dmamap_t *dp; 2477 bus_dma_segment_t *eseg; 2478 ispreq64_t *rq; 2479 int seglim, datalen; 2480 uint32_t nxti; 2481 2482 mp = (mush_t *) arg; 2483 if (error) { 2484 mp->error = error; 2485 return; 2486 } 2487 2488 if (nseg < 1) { 2489 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2490 mp->error = EFAULT; 2491 return; 2492 } 2493 csio = mp->cmd_token; 2494 isp = mp->isp; 2495 rq = mp->rq; 2496 pcs = (struct isp_pcisoftc *)mp->isp; 2497 dp = &pcs->dmaps[isp_handle_index(rq->req_handle & ISP_HANDLE_MASK)]; 2498 nxti = *mp->nxtip; 2499 2500 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2501 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2502 } else { 2503 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2504 } 2505 datalen = XS_XFRLEN(csio); 2506 2507 /* 2508 * We're passed an initial partially filled in entry that 2509 * has most fields filled in except for data transfer 2510 * related values. 2511 * 2512 * Our job is to fill in the initial request queue entry and 2513 * then to start allocating and filling in continuation entries 2514 * until we've covered the entire transfer. 2515 */ 2516 2517 if (IS_FC(isp)) { 2518 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 2519 seglim = ISP_RQDSEG_T3; 2520 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 2521 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2522 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2523 } else { 2524 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2525 } 2526 } else { 2527 rq->req_header.rqs_entry_type = RQSTYPE_A64; 2528 if (csio->cdb_len > 12) { 2529 seglim = 0; 2530 } else { 2531 seglim = ISP_RQDSEG_A64; 2532 } 2533 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2534 rq->req_flags |= REQFLAG_DATA_IN; 2535 } else { 2536 rq->req_flags |= REQFLAG_DATA_OUT; 2537 } 2538 } 2539 2540 eseg = dm_segs + nseg; 2541 2542 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2543 if (IS_FC(isp)) { 2544 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 2545 rq3->req_dataseg[rq3->req_seg_count].ds_base = 2546 DMA_LO32(dm_segs->ds_addr); 2547 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 2548 DMA_HI32(dm_segs->ds_addr); 2549 rq3->req_dataseg[rq3->req_seg_count].ds_count = 2550 dm_segs->ds_len; 2551 } else { 2552 rq->req_dataseg[rq->req_seg_count].ds_base = 2553 DMA_LO32(dm_segs->ds_addr); 2554 rq->req_dataseg[rq->req_seg_count].ds_basehi = 2555 DMA_HI32(dm_segs->ds_addr); 2556 rq->req_dataseg[rq->req_seg_count].ds_count = 2557 dm_segs->ds_len; 2558 } 2559 datalen -= dm_segs->ds_len; 2560 rq->req_seg_count++; 2561 dm_segs++; 2562 } 2563 2564 while (datalen > 0 && dm_segs != eseg) { 2565 uint32_t onxti; 2566 ispcontreq64_t local, *crq = &local, *cqe; 2567 2568 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2569 onxti = nxti; 2570 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2571 if (nxti == mp->optr) { 2572 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2573 mp->error = MUSHERR_NOQENTRIES; 2574 return; 2575 } 2576 rq->req_header.rqs_entry_count++; 2577 MEMZERO((void *)crq, sizeof (*crq)); 2578 crq->req_header.rqs_entry_count = 1; 2579 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2580 2581 seglim = 0; 2582 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2583 crq->req_dataseg[seglim].ds_base = 2584 DMA_LO32(dm_segs->ds_addr); 2585 crq->req_dataseg[seglim].ds_basehi = 2586 DMA_HI32(dm_segs->ds_addr); 2587 crq->req_dataseg[seglim].ds_count = 2588 dm_segs->ds_len; 2589 rq->req_seg_count++; 2590 dm_segs++; 2591 seglim++; 2592 datalen -= dm_segs->ds_len; 2593 } 2594 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2595 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2596 } 2597 isp_put_cont64_req(isp, crq, cqe); 2598 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2599 } 2600 *mp->nxtip = nxti; 2601 } 2602 2603 static void 2604 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2605 { 2606 mush_t *mp; 2607 ispsoftc_t *isp; 2608 struct ccb_scsiio *csio; 2609 struct isp_pcisoftc *pcs; 2610 bus_dmamap_t *dp; 2611 bus_dma_segment_t *eseg; 2612 ispreq_t *rq; 2613 int seglim, datalen; 2614 uint32_t nxti; 2615 2616 mp = (mush_t *) arg; 2617 if (error) { 2618 mp->error = error; 2619 return; 2620 } 2621 2622 if (nseg < 1) { 2623 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2624 mp->error = EFAULT; 2625 return; 2626 } 2627 csio = mp->cmd_token; 2628 isp = mp->isp; 2629 rq = mp->rq; 2630 pcs = (struct isp_pcisoftc *)mp->isp; 2631 dp = &pcs->dmaps[isp_handle_index(rq->req_handle & ISP_HANDLE_MASK)]; 2632 nxti = *mp->nxtip; 2633 2634 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2635 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2636 } else { 2637 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2638 } 2639 2640 datalen = XS_XFRLEN(csio); 2641 2642 /* 2643 * We're passed an initial partially filled in entry that 2644 * has most fields filled in except for data transfer 2645 * related values. 2646 * 2647 * Our job is to fill in the initial request queue entry and 2648 * then to start allocating and filling in continuation entries 2649 * until we've covered the entire transfer. 2650 */ 2651 2652 if (IS_FC(isp)) { 2653 seglim = ISP_RQDSEG_T2; 2654 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 2655 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2656 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2657 } else { 2658 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2659 } 2660 } else { 2661 if (csio->cdb_len > 12) { 2662 seglim = 0; 2663 } else { 2664 seglim = ISP_RQDSEG; 2665 } 2666 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2667 rq->req_flags |= REQFLAG_DATA_IN; 2668 } else { 2669 rq->req_flags |= REQFLAG_DATA_OUT; 2670 } 2671 } 2672 2673 eseg = dm_segs + nseg; 2674 2675 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2676 if (IS_FC(isp)) { 2677 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 2678 rq2->req_dataseg[rq2->req_seg_count].ds_base = 2679 DMA_LO32(dm_segs->ds_addr); 2680 rq2->req_dataseg[rq2->req_seg_count].ds_count = 2681 dm_segs->ds_len; 2682 } else { 2683 rq->req_dataseg[rq->req_seg_count].ds_base = 2684 DMA_LO32(dm_segs->ds_addr); 2685 rq->req_dataseg[rq->req_seg_count].ds_count = 2686 dm_segs->ds_len; 2687 } 2688 datalen -= dm_segs->ds_len; 2689 rq->req_seg_count++; 2690 dm_segs++; 2691 } 2692 2693 while (datalen > 0 && dm_segs != eseg) { 2694 uint32_t onxti; 2695 ispcontreq_t local, *crq = &local, *cqe; 2696 2697 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2698 onxti = nxti; 2699 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2700 if (nxti == mp->optr) { 2701 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2702 mp->error = MUSHERR_NOQENTRIES; 2703 return; 2704 } 2705 rq->req_header.rqs_entry_count++; 2706 MEMZERO((void *)crq, sizeof (*crq)); 2707 crq->req_header.rqs_entry_count = 1; 2708 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2709 2710 seglim = 0; 2711 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 2712 crq->req_dataseg[seglim].ds_base = 2713 DMA_LO32(dm_segs->ds_addr); 2714 crq->req_dataseg[seglim].ds_count = 2715 dm_segs->ds_len; 2716 rq->req_seg_count++; 2717 dm_segs++; 2718 seglim++; 2719 datalen -= dm_segs->ds_len; 2720 } 2721 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2722 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2723 } 2724 isp_put_cont_req(isp, crq, cqe); 2725 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2726 } 2727 *mp->nxtip = nxti; 2728 } 2729 2730 /* 2731 * We enter with ISP_LOCK held 2732 */ 2733 static int 2734 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, 2735 uint32_t *nxtip, uint32_t optr) 2736 { 2737 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2738 ispreq_t *qep; 2739 bus_dmamap_t *dp = NULL; 2740 mush_t mush, *mp; 2741 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2742 2743 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 2744 #ifdef ISP_TARGET_MODE 2745 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2746 if (IS_FC(isp)) { 2747 eptr = tdma_mkfc; 2748 } else { 2749 eptr = tdma_mk; 2750 } 2751 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2752 (csio->dxfer_len == 0)) { 2753 mp = &mush; 2754 mp->isp = isp; 2755 mp->cmd_token = csio; 2756 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 2757 mp->nxtip = nxtip; 2758 mp->optr = optr; 2759 mp->error = 0; 2760 ISPLOCK_2_CAMLOCK(isp); 2761 (*eptr)(mp, NULL, 0, 0); 2762 CAMLOCK_2_ISPLOCK(isp); 2763 goto mbxsync; 2764 } 2765 } else 2766 #endif 2767 if (IS_24XX(isp)) { 2768 eptr = dma_2400; 2769 } else if (sizeof (bus_addr_t) > 4) { 2770 eptr = dma2_a64; 2771 } else { 2772 eptr = dma2; 2773 } 2774 2775 2776 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2777 (csio->dxfer_len == 0)) { 2778 rq->req_seg_count = 1; 2779 goto mbxsync; 2780 } 2781 2782 /* 2783 * Do a virtual grapevine step to collect info for 2784 * the callback dma allocation that we have to use... 2785 */ 2786 mp = &mush; 2787 mp->isp = isp; 2788 mp->cmd_token = csio; 2789 mp->rq = rq; 2790 mp->nxtip = nxtip; 2791 mp->optr = optr; 2792 mp->error = 0; 2793 2794 ISPLOCK_2_CAMLOCK(isp); 2795 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2796 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 2797 int error, s; 2798 dp = &pcs->dmaps[isp_handle_index( 2799 rq->req_handle & ISP_HANDLE_MASK)]; 2800 s = splsoftvm(); 2801 error = bus_dmamap_load(pcs->dmat, *dp, 2802 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 2803 if (error == EINPROGRESS) { 2804 bus_dmamap_unload(pcs->dmat, *dp); 2805 mp->error = EINVAL; 2806 isp_prt(isp, ISP_LOGERR, 2807 "deferred dma allocation not supported"); 2808 } else if (error && mp->error == 0) { 2809 #ifdef DIAGNOSTIC 2810 isp_prt(isp, ISP_LOGERR, 2811 "error %d in dma mapping code", error); 2812 #endif 2813 mp->error = error; 2814 } 2815 splx(s); 2816 } else { 2817 /* Pointer to physical buffer */ 2818 struct bus_dma_segment seg; 2819 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2820 seg.ds_len = csio->dxfer_len; 2821 (*eptr)(mp, &seg, 1, 0); 2822 } 2823 } else { 2824 struct bus_dma_segment *segs; 2825 2826 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2827 isp_prt(isp, ISP_LOGERR, 2828 "Physical segment pointers unsupported"); 2829 mp->error = EINVAL; 2830 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2831 isp_prt(isp, ISP_LOGERR, 2832 "Virtual segment addresses unsupported"); 2833 mp->error = EINVAL; 2834 } else { 2835 /* Just use the segments provided */ 2836 segs = (struct bus_dma_segment *) csio->data_ptr; 2837 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2838 } 2839 } 2840 CAMLOCK_2_ISPLOCK(isp); 2841 if (mp->error) { 2842 int retval = CMD_COMPLETE; 2843 if (mp->error == MUSHERR_NOQENTRIES) { 2844 retval = CMD_EAGAIN; 2845 } else if (mp->error == EFBIG) { 2846 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2847 } else if (mp->error == EINVAL) { 2848 XS_SETERR(csio, CAM_REQ_INVALID); 2849 } else { 2850 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2851 } 2852 return (retval); 2853 } 2854 mbxsync: 2855 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2856 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); 2857 } 2858 switch (rq->req_header.rqs_entry_type) { 2859 case RQSTYPE_REQUEST: 2860 isp_put_request(isp, rq, qep); 2861 break; 2862 case RQSTYPE_CMDONLY: 2863 isp_put_extended_request(isp, (ispextreq_t *)rq, 2864 (ispextreq_t *)qep); 2865 break; 2866 case RQSTYPE_T2RQS: 2867 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2868 break; 2869 case RQSTYPE_A64: 2870 case RQSTYPE_T3RQS: 2871 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2872 break; 2873 case RQSTYPE_T7RQS: 2874 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep); 2875 break; 2876 } 2877 return (CMD_QUEUED); 2878 } 2879 2880 static void 2881 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle) 2882 { 2883 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2884 bus_dmamap_t *dp; 2885 dp = &pcs->dmaps[isp_handle_index(handle & ISP_HANDLE_MASK)]; 2886 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2887 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2888 } else { 2889 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2890 } 2891 bus_dmamap_unload(pcs->dmat, *dp); 2892 } 2893 2894 2895 static void 2896 isp_pci_reset0(ispsoftc_t *isp) 2897 { 2898 ISP_DISABLE_INTS(isp); 2899 } 2900 2901 static void 2902 isp_pci_reset1(ispsoftc_t *isp) 2903 { 2904 if (!IS_24XX(isp)) { 2905 /* Make sure the BIOS is disabled */ 2906 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2907 } 2908 /* and enable interrupts */ 2909 ISP_ENABLE_INTS(isp); 2910 } 2911 2912 static void 2913 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2914 { 2915 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2916 if (msg) 2917 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2918 else 2919 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2920 if (IS_SCSI(isp)) 2921 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2922 else 2923 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2924 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2925 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2926 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2927 2928 2929 if (IS_SCSI(isp)) { 2930 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2931 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2932 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2933 ISP_READ(isp, CDMA_FIFO_STS)); 2934 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2935 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2936 ISP_READ(isp, DDMA_FIFO_STS)); 2937 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2938 ISP_READ(isp, SXP_INTERRUPT), 2939 ISP_READ(isp, SXP_GROSS_ERR), 2940 ISP_READ(isp, SXP_PINS_CTRL)); 2941 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2942 } 2943 printf(" mbox regs: %x %x %x %x %x\n", 2944 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2945 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2946 ISP_READ(isp, OUTMAILBOX4)); 2947 printf(" PCI Status Command/Status=%x\n", 2948 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2949 } 2950