1 /*- 2 * 3 * Copyright (c) 1997-2006 by Matthew Jacob 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 /* 29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 30 * FreeBSD Version. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/module.h> 39 #if __FreeBSD_version >= 700000 40 #include <sys/linker.h> 41 #include <sys/firmware.h> 42 #endif 43 #include <sys/bus.h> 44 #if __FreeBSD_version < 500000 45 #include <pci/pcireg.h> 46 #include <pci/pcivar.h> 47 #include <machine/bus_memio.h> 48 #include <machine/bus_pio.h> 49 #else 50 #include <sys/stdint.h> 51 #include <dev/pci/pcireg.h> 52 #include <dev/pci/pcivar.h> 53 #endif 54 #include <machine/bus.h> 55 #include <machine/resource.h> 56 #include <sys/rman.h> 57 #include <sys/malloc.h> 58 59 #include <dev/isp/isp_freebsd.h> 60 61 #if __FreeBSD_version < 500000 62 #define BUS_PROBE_DEFAULT 0 63 #endif 64 65 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 67 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 69 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 70 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 71 static int 72 isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 73 static int 74 isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 75 static int 76 isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 77 static int isp_pci_mbxdma(ispsoftc_t *); 78 static int 79 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); 80 static void 81 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t); 82 83 84 static void isp_pci_reset0(ispsoftc_t *); 85 static void isp_pci_reset1(ispsoftc_t *); 86 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 87 88 static struct ispmdvec mdvec = { 89 isp_pci_rd_isr, 90 isp_pci_rd_reg, 91 isp_pci_wr_reg, 92 isp_pci_mbxdma, 93 isp_pci_dmasetup, 94 isp_pci_dmateardown, 95 isp_pci_reset0, 96 isp_pci_reset1, 97 isp_pci_dumpregs, 98 NULL, 99 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 100 }; 101 102 static struct ispmdvec mdvec_1080 = { 103 isp_pci_rd_isr, 104 isp_pci_rd_reg_1080, 105 isp_pci_wr_reg_1080, 106 isp_pci_mbxdma, 107 isp_pci_dmasetup, 108 isp_pci_dmateardown, 109 isp_pci_reset0, 110 isp_pci_reset1, 111 isp_pci_dumpregs, 112 NULL, 113 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 114 }; 115 116 static struct ispmdvec mdvec_12160 = { 117 isp_pci_rd_isr, 118 isp_pci_rd_reg_1080, 119 isp_pci_wr_reg_1080, 120 isp_pci_mbxdma, 121 isp_pci_dmasetup, 122 isp_pci_dmateardown, 123 isp_pci_reset0, 124 isp_pci_reset1, 125 isp_pci_dumpregs, 126 NULL, 127 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 128 }; 129 130 static struct ispmdvec mdvec_2100 = { 131 isp_pci_rd_isr, 132 isp_pci_rd_reg, 133 isp_pci_wr_reg, 134 isp_pci_mbxdma, 135 isp_pci_dmasetup, 136 isp_pci_dmateardown, 137 isp_pci_reset0, 138 isp_pci_reset1, 139 isp_pci_dumpregs 140 }; 141 142 static struct ispmdvec mdvec_2200 = { 143 isp_pci_rd_isr, 144 isp_pci_rd_reg, 145 isp_pci_wr_reg, 146 isp_pci_mbxdma, 147 isp_pci_dmasetup, 148 isp_pci_dmateardown, 149 isp_pci_reset0, 150 isp_pci_reset1, 151 isp_pci_dumpregs 152 }; 153 154 static struct ispmdvec mdvec_2300 = { 155 isp_pci_rd_isr_2300, 156 isp_pci_rd_reg, 157 isp_pci_wr_reg, 158 isp_pci_mbxdma, 159 isp_pci_dmasetup, 160 isp_pci_dmateardown, 161 isp_pci_reset0, 162 isp_pci_reset1, 163 isp_pci_dumpregs 164 }; 165 166 static struct ispmdvec mdvec_2400 = { 167 isp_pci_rd_isr_2400, 168 isp_pci_rd_reg_2400, 169 isp_pci_wr_reg_2400, 170 isp_pci_mbxdma, 171 isp_pci_dmasetup, 172 isp_pci_dmateardown, 173 isp_pci_reset0, 174 isp_pci_reset1, 175 NULL 176 }; 177 178 #ifndef PCIM_CMD_INVEN 179 #define PCIM_CMD_INVEN 0x10 180 #endif 181 #ifndef PCIM_CMD_BUSMASTEREN 182 #define PCIM_CMD_BUSMASTEREN 0x0004 183 #endif 184 #ifndef PCIM_CMD_PERRESPEN 185 #define PCIM_CMD_PERRESPEN 0x0040 186 #endif 187 #ifndef PCIM_CMD_SEREN 188 #define PCIM_CMD_SEREN 0x0100 189 #endif 190 #ifndef PCIM_CMD_INTX_DISABLE 191 #define PCIM_CMD_INTX_DISABLE 0x0400 192 #endif 193 194 #ifndef PCIR_COMMAND 195 #define PCIR_COMMAND 0x04 196 #endif 197 198 #ifndef PCIR_CACHELNSZ 199 #define PCIR_CACHELNSZ 0x0c 200 #endif 201 202 #ifndef PCIR_LATTIMER 203 #define PCIR_LATTIMER 0x0d 204 #endif 205 206 #ifndef PCIR_ROMADDR 207 #define PCIR_ROMADDR 0x30 208 #endif 209 210 #ifndef PCI_VENDOR_QLOGIC 211 #define PCI_VENDOR_QLOGIC 0x1077 212 #endif 213 214 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 215 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 216 #endif 217 218 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 219 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 220 #endif 221 222 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 223 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 224 #endif 225 226 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 227 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 228 #endif 229 230 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 231 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 232 #endif 233 234 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 235 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 236 #endif 237 238 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 239 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 240 #endif 241 242 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 243 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 244 #endif 245 246 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 247 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 248 #endif 249 250 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 251 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 252 #endif 253 254 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 255 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 256 #endif 257 258 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 259 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 260 #endif 261 262 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 263 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 264 #endif 265 266 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 267 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 268 #endif 269 270 271 #define PCI_QLOGIC_ISP1020 \ 272 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 273 274 #define PCI_QLOGIC_ISP1080 \ 275 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 276 277 #define PCI_QLOGIC_ISP10160 \ 278 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 279 280 #define PCI_QLOGIC_ISP12160 \ 281 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 282 283 #define PCI_QLOGIC_ISP1240 \ 284 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 285 286 #define PCI_QLOGIC_ISP1280 \ 287 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 288 289 #define PCI_QLOGIC_ISP2100 \ 290 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 291 292 #define PCI_QLOGIC_ISP2200 \ 293 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 294 295 #define PCI_QLOGIC_ISP2300 \ 296 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 297 298 #define PCI_QLOGIC_ISP2312 \ 299 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 300 301 #define PCI_QLOGIC_ISP2322 \ 302 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 303 304 #define PCI_QLOGIC_ISP2422 \ 305 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 306 307 #define PCI_QLOGIC_ISP6312 \ 308 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 309 310 #define PCI_QLOGIC_ISP6322 \ 311 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 312 313 /* 314 * Odd case for some AMI raid cards... We need to *not* attach to this. 315 */ 316 #define AMI_RAID_SUBVENDOR_ID 0x101e 317 318 #define IO_MAP_REG 0x10 319 #define MEM_MAP_REG 0x14 320 321 #define PCI_DFLT_LTNCY 0x40 322 #define PCI_DFLT_LNSZ 0x10 323 324 static int isp_pci_probe (device_t); 325 static int isp_pci_attach (device_t); 326 static int isp_pci_detach (device_t); 327 328 329 struct isp_pcisoftc { 330 ispsoftc_t pci_isp; 331 device_t pci_dev; 332 struct resource * pci_reg; 333 bus_space_tag_t pci_st; 334 bus_space_handle_t pci_sh; 335 void * ih; 336 int16_t pci_poff[_NREG_BLKS]; 337 bus_dma_tag_t dmat; 338 bus_dmamap_t *dmaps; 339 }; 340 341 342 static device_method_t isp_pci_methods[] = { 343 /* Device interface */ 344 DEVMETHOD(device_probe, isp_pci_probe), 345 DEVMETHOD(device_attach, isp_pci_attach), 346 DEVMETHOD(device_detach, isp_pci_detach), 347 { 0, 0 } 348 }; 349 static void isp_pci_intr(void *); 350 351 static driver_t isp_pci_driver = { 352 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 353 }; 354 static devclass_t isp_devclass; 355 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 356 #if __FreeBSD_version < 700000 357 extern ispfwfunc *isp_get_firmware_p; 358 #endif 359 360 static int 361 isp_pci_probe(device_t dev) 362 { 363 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 364 case PCI_QLOGIC_ISP1020: 365 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 366 break; 367 case PCI_QLOGIC_ISP1080: 368 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 369 break; 370 case PCI_QLOGIC_ISP1240: 371 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 372 break; 373 case PCI_QLOGIC_ISP1280: 374 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 375 break; 376 case PCI_QLOGIC_ISP10160: 377 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 378 break; 379 case PCI_QLOGIC_ISP12160: 380 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 381 return (ENXIO); 382 } 383 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 384 break; 385 case PCI_QLOGIC_ISP2100: 386 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 387 break; 388 case PCI_QLOGIC_ISP2200: 389 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 390 break; 391 case PCI_QLOGIC_ISP2300: 392 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 393 break; 394 case PCI_QLOGIC_ISP2312: 395 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 396 break; 397 case PCI_QLOGIC_ISP2322: 398 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 399 break; 400 case PCI_QLOGIC_ISP2422: 401 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 402 break; 403 case PCI_QLOGIC_ISP6312: 404 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 405 break; 406 case PCI_QLOGIC_ISP6322: 407 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 408 break; 409 default: 410 return (ENXIO); 411 } 412 if (isp_announced == 0 && bootverbose) { 413 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 414 "Core Version %d.%d\n", 415 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 416 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 417 isp_announced++; 418 } 419 /* 420 * XXXX: Here is where we might load the f/w module 421 * XXXX: (or increase a reference count to it). 422 */ 423 return (BUS_PROBE_DEFAULT); 424 } 425 426 #if __FreeBSD_version < 500000 427 static void 428 isp_get_options(device_t dev, ispsoftc_t *isp) 429 { 430 uint64_t wwn; 431 int bitmap, unit; 432 433 callout_handle_init(&isp->isp_osinfo.ldt); 434 callout_handle_init(&isp->isp_osinfo.gdt); 435 436 unit = device_get_unit(dev); 437 if (getenv_int("isp_disable", &bitmap)) { 438 if (bitmap & (1 << unit)) { 439 isp->isp_osinfo.disabled = 1; 440 return; 441 } 442 } 443 444 if (getenv_int("isp_no_fwload", &bitmap)) { 445 if (bitmap & (1 << unit)) 446 isp->isp_confopts |= ISP_CFG_NORELOAD; 447 } 448 if (getenv_int("isp_fwload", &bitmap)) { 449 if (bitmap & (1 << unit)) 450 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 451 } 452 if (getenv_int("isp_no_nvram", &bitmap)) { 453 if (bitmap & (1 << unit)) 454 isp->isp_confopts |= ISP_CFG_NONVRAM; 455 } 456 if (getenv_int("isp_nvram", &bitmap)) { 457 if (bitmap & (1 << unit)) 458 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 459 } 460 if (getenv_int("isp_fcduplex", &bitmap)) { 461 if (bitmap & (1 << unit)) 462 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 463 } 464 if (getenv_int("isp_no_fcduplex", &bitmap)) { 465 if (bitmap & (1 << unit)) 466 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 467 } 468 if (getenv_int("isp_nport", &bitmap)) { 469 if (bitmap & (1 << unit)) 470 isp->isp_confopts |= ISP_CFG_NPORT; 471 } 472 473 /* 474 * Because the resource_*_value functions can neither return 475 * 64 bit integer values, nor can they be directly coerced 476 * to interpret the right hand side of the assignment as 477 * you want them to interpret it, we have to force WWN 478 * hint replacement to specify WWN strings with a leading 479 * 'w' (e..g w50000000aaaa0001). Sigh. 480 */ 481 if (getenv_quad("isp_portwwn", &wwn)) { 482 isp->isp_osinfo.default_port_wwn = wwn; 483 isp->isp_confopts |= ISP_CFG_OWNWWPN; 484 } 485 if (isp->isp_osinfo.default_port_wwn == 0) { 486 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 487 } 488 489 if (getenv_quad("isp_nodewwn", &wwn)) { 490 isp->isp_osinfo.default_node_wwn = wwn; 491 isp->isp_confopts |= ISP_CFG_OWNWWNN; 492 } 493 if (isp->isp_osinfo.default_node_wwn == 0) { 494 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 495 } 496 497 bitmap = 0; 498 (void) getenv_int("isp_debug", &bitmap); 499 if (bitmap) { 500 isp->isp_dblev = bitmap; 501 } else { 502 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 503 } 504 if (bootverbose) { 505 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 506 } 507 508 bitmap = 0; 509 (void) getenv_int("isp_fabric_hysteresis", &bitmap); 510 if (bitmap >= 0 && bitmap < 256) { 511 isp->isp_osinfo.hysteresis = bitmap; 512 } else { 513 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 514 } 515 516 bitmap = 0; 517 (void) getenv_int("isp_loop_down_limit", &bitmap); 518 if (bitmap >= 0 && bitmap < 0xffff) { 519 isp->isp_osinfo.loop_down_limit = bitmap; 520 } else { 521 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 522 } 523 524 bitmap = 0; 525 (void) getenv_int("isp_gone_device_time", &bitmap); 526 if (bitmap >= 0 && bitmap < 0xffff) { 527 isp->isp_osinfo.gone_device_time = bitmap; 528 } else { 529 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 530 } 531 532 533 #ifdef ISP_FW_CRASH_DUMP 534 bitmap = 0; 535 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 536 if (bitmap & (1 << unit) { 537 size_t amt = 0; 538 if (IS_2200(isp)) { 539 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 540 } else if (IS_23XX(isp)) { 541 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 542 } 543 if (amt) { 544 FCPARAM(isp)->isp_dump_data = 545 malloc(amt, M_DEVBUF, M_WAITOK); 546 memset(FCPARAM(isp)->isp_dump_data, 0, amt); 547 } else { 548 device_printf(dev, 549 "f/w crash dumps not supported for card\n"); 550 } 551 } 552 } 553 #endif 554 bitmap = 0; 555 if (getenv_int("role", &bitmap)) { 556 isp->isp_role = bitmap; 557 } else { 558 isp->isp_role = ISP_DEFAULT_ROLES; 559 } 560 } 561 562 static void 563 isp_get_pci_options(device_t dev, int *m1, int *m2) 564 { 565 int bitmap; 566 int unit = device_get_unit(dev); 567 568 *m1 = PCIM_CMD_MEMEN; 569 *m2 = PCIM_CMD_PORTEN; 570 if (getenv_int("isp_mem_map", &bitmap)) { 571 if (bitmap & (1 << unit)) { 572 *m1 = PCIM_CMD_MEMEN; 573 *m2 = PCIM_CMD_PORTEN; 574 } 575 } 576 bitmap = 0; 577 if (getenv_int("isp_io_map", &bitmap)) { 578 if (bitmap & (1 << unit)) { 579 *m1 = PCIM_CMD_PORTEN; 580 *m2 = PCIM_CMD_MEMEN; 581 } 582 } 583 } 584 #else 585 static void 586 isp_get_options(device_t dev, ispsoftc_t *isp) 587 { 588 int tval; 589 const char *sptr; 590 591 callout_handle_init(&isp->isp_osinfo.ldt); 592 callout_handle_init(&isp->isp_osinfo.gdt); 593 594 /* 595 * Figure out if we're supposed to skip this one. 596 */ 597 598 tval = 0; 599 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 600 "disable", &tval) == 0 && tval) { 601 device_printf(dev, "disabled at user request\n"); 602 isp->isp_osinfo.disabled = 1; 603 return; 604 } 605 606 tval = -1; 607 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 608 "role", &tval) == 0 && tval != -1) { 609 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 610 isp->isp_role = tval; 611 device_printf(dev, "setting role to 0x%x\n", isp->isp_role); 612 } else { 613 #ifdef ISP_TARGET_MODE 614 isp->isp_role = ISP_ROLE_TARGET; 615 #else 616 isp->isp_role = ISP_DEFAULT_ROLES; 617 #endif 618 } 619 620 tval = 0; 621 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 622 "fwload_disable", &tval) == 0 && tval != 0) { 623 isp->isp_confopts |= ISP_CFG_NORELOAD; 624 } 625 tval = 0; 626 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 627 "ignore_nvram", &tval) == 0 && tval != 0) { 628 isp->isp_confopts |= ISP_CFG_NONVRAM; 629 } 630 tval = 0; 631 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 632 "fullduplex", &tval) == 0 && tval != 0) { 633 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 634 } 635 #ifdef ISP_FW_CRASH_DUMP 636 tval = 0; 637 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 638 "fw_dump_enable", &tval) == 0 && tval != 0) { 639 size_t amt = 0; 640 if (IS_2200(isp)) { 641 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 642 } else if (IS_23XX(isp)) { 643 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 644 } 645 if (amt) { 646 FCPARAM(isp)->isp_dump_data = 647 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 648 } else { 649 device_printf(dev, 650 "f/w crash dumps not supported for this model\n"); 651 } 652 } 653 #endif 654 655 sptr = 0; 656 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 657 "topology", (const char **) &sptr) == 0 && sptr != 0) { 658 if (strcmp(sptr, "lport") == 0) { 659 isp->isp_confopts |= ISP_CFG_LPORT; 660 } else if (strcmp(sptr, "nport") == 0) { 661 isp->isp_confopts |= ISP_CFG_NPORT; 662 } else if (strcmp(sptr, "lport-only") == 0) { 663 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 664 } else if (strcmp(sptr, "nport-only") == 0) { 665 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 666 } 667 } 668 669 /* 670 * Because the resource_*_value functions can neither return 671 * 64 bit integer values, nor can they be directly coerced 672 * to interpret the right hand side of the assignment as 673 * you want them to interpret it, we have to force WWN 674 * hint replacement to specify WWN strings with a leading 675 * 'w' (e..g w50000000aaaa0001). Sigh. 676 */ 677 sptr = 0; 678 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 679 "portwwn", (const char **) &sptr); 680 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 681 char *eptr = 0; 682 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 683 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 684 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 685 isp->isp_osinfo.default_port_wwn = 0; 686 } else { 687 isp->isp_confopts |= ISP_CFG_OWNWWPN; 688 } 689 } 690 if (isp->isp_osinfo.default_port_wwn == 0) { 691 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 692 } 693 694 sptr = 0; 695 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 696 "nodewwn", (const char **) &sptr); 697 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 698 char *eptr = 0; 699 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 700 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 701 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 702 isp->isp_osinfo.default_node_wwn = 0; 703 } else { 704 isp->isp_confopts |= ISP_CFG_OWNWWNN; 705 } 706 } 707 if (isp->isp_osinfo.default_node_wwn == 0) { 708 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 709 } 710 711 isp->isp_osinfo.default_id = -1; 712 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 713 "iid", &tval) == 0) { 714 isp->isp_osinfo.default_id = tval; 715 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 716 } 717 if (isp->isp_osinfo.default_id == -1) { 718 if (IS_FC(isp)) { 719 isp->isp_osinfo.default_id = 109; 720 } else { 721 isp->isp_osinfo.default_id = 7; 722 } 723 } 724 725 /* 726 * Set up logging levels. 727 */ 728 tval = 0; 729 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 730 "debug", &tval); 731 if (tval) { 732 isp->isp_dblev = tval; 733 } else { 734 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 735 } 736 if (bootverbose) { 737 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 738 } 739 740 tval = 0; 741 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 742 "hysteresis", &tval); 743 if (tval >= 0 && tval < 256) { 744 isp->isp_osinfo.hysteresis = tval; 745 } else { 746 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 747 } 748 749 tval = -1; 750 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 751 "loop_down_limit", &tval); 752 if (tval >= 0 && tval < 0xffff) { 753 isp->isp_osinfo.loop_down_limit = tval; 754 } else { 755 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 756 } 757 758 tval = -1; 759 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 760 "gone_device_time", &tval); 761 if (tval >= 0 && tval < 0xffff) { 762 isp->isp_osinfo.gone_device_time = tval; 763 } else { 764 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 765 } 766 } 767 768 static void 769 isp_get_pci_options(device_t dev, int *m1, int *m2) 770 { 771 int tval; 772 /* 773 * Which we should try first - memory mapping or i/o mapping? 774 * 775 * We used to try memory first followed by i/o on alpha, otherwise 776 * the reverse, but we should just try memory first all the time now. 777 */ 778 *m1 = PCIM_CMD_MEMEN; 779 *m2 = PCIM_CMD_PORTEN; 780 781 tval = 0; 782 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 783 "prefer_iomap", &tval) == 0 && tval != 0) { 784 *m1 = PCIM_CMD_PORTEN; 785 *m2 = PCIM_CMD_MEMEN; 786 } 787 tval = 0; 788 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 789 "prefer_memmap", &tval) == 0 && tval != 0) { 790 *m1 = PCIM_CMD_MEMEN; 791 *m2 = PCIM_CMD_PORTEN; 792 } 793 } 794 #endif 795 796 static int 797 isp_pci_attach(device_t dev) 798 { 799 struct resource *regs, *irq; 800 int rtp, rgd, iqd, m1, m2; 801 uint32_t data, cmd, linesz, psize, basetype; 802 struct isp_pcisoftc *pcs; 803 ispsoftc_t *isp = NULL; 804 struct ispmdvec *mdvp; 805 #if __FreeBSD_version >= 500000 806 int locksetup = 0; 807 #endif 808 809 pcs = device_get_softc(dev); 810 if (pcs == NULL) { 811 device_printf(dev, "cannot get softc\n"); 812 return (ENOMEM); 813 } 814 memset(pcs, 0, sizeof (*pcs)); 815 pcs->pci_dev = dev; 816 isp = &pcs->pci_isp; 817 818 /* 819 * Set and Get Generic Options 820 */ 821 isp_get_options(dev, isp); 822 823 /* 824 * Check to see if options have us disabled 825 */ 826 if (isp->isp_osinfo.disabled) { 827 /* 828 * But return zero to preserve unit numbering 829 */ 830 return (0); 831 } 832 833 /* 834 * Get PCI options- which in this case are just mapping preferences. 835 */ 836 isp_get_pci_options(dev, &m1, &m2); 837 838 linesz = PCI_DFLT_LNSZ; 839 irq = regs = NULL; 840 rgd = rtp = iqd = 0; 841 842 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 843 if (cmd & m1) { 844 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 845 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 846 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 847 } 848 if (regs == NULL && (cmd & m2)) { 849 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 850 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 851 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 852 } 853 if (regs == NULL) { 854 device_printf(dev, "unable to map any ports\n"); 855 goto bad; 856 } 857 if (bootverbose) { 858 device_printf(dev, "using %s space register mapping\n", 859 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 860 } 861 pcs->pci_dev = dev; 862 pcs->pci_reg = regs; 863 pcs->pci_st = rman_get_bustag(regs); 864 pcs->pci_sh = rman_get_bushandle(regs); 865 866 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 867 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 868 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 869 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 870 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 871 mdvp = &mdvec; 872 basetype = ISP_HA_SCSI_UNKNOWN; 873 psize = sizeof (sdparam); 874 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 875 mdvp = &mdvec; 876 basetype = ISP_HA_SCSI_UNKNOWN; 877 psize = sizeof (sdparam); 878 } 879 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 880 mdvp = &mdvec_1080; 881 basetype = ISP_HA_SCSI_1080; 882 psize = sizeof (sdparam); 883 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 884 ISP1080_DMA_REGS_OFF; 885 } 886 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 887 mdvp = &mdvec_1080; 888 basetype = ISP_HA_SCSI_1240; 889 psize = 2 * sizeof (sdparam); 890 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 891 ISP1080_DMA_REGS_OFF; 892 } 893 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 894 mdvp = &mdvec_1080; 895 basetype = ISP_HA_SCSI_1280; 896 psize = 2 * sizeof (sdparam); 897 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 898 ISP1080_DMA_REGS_OFF; 899 } 900 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 901 mdvp = &mdvec_12160; 902 basetype = ISP_HA_SCSI_10160; 903 psize = sizeof (sdparam); 904 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 905 ISP1080_DMA_REGS_OFF; 906 } 907 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 908 mdvp = &mdvec_12160; 909 basetype = ISP_HA_SCSI_12160; 910 psize = 2 * sizeof (sdparam); 911 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 912 ISP1080_DMA_REGS_OFF; 913 } 914 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 915 mdvp = &mdvec_2100; 916 basetype = ISP_HA_FC_2100; 917 psize = sizeof (fcparam); 918 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 919 PCI_MBOX_REGS2100_OFF; 920 if (pci_get_revid(dev) < 3) { 921 /* 922 * XXX: Need to get the actual revision 923 * XXX: number of the 2100 FB. At any rate, 924 * XXX: lower cache line size for early revision 925 * XXX; boards. 926 */ 927 linesz = 1; 928 } 929 } 930 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 931 mdvp = &mdvec_2200; 932 basetype = ISP_HA_FC_2200; 933 psize = sizeof (fcparam); 934 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 935 PCI_MBOX_REGS2100_OFF; 936 } 937 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 938 mdvp = &mdvec_2300; 939 basetype = ISP_HA_FC_2300; 940 psize = sizeof (fcparam); 941 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 942 PCI_MBOX_REGS2300_OFF; 943 } 944 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 945 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 946 mdvp = &mdvec_2300; 947 basetype = ISP_HA_FC_2312; 948 psize = sizeof (fcparam); 949 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 950 PCI_MBOX_REGS2300_OFF; 951 } 952 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || 953 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { 954 mdvp = &mdvec_2300; 955 basetype = ISP_HA_FC_2322; 956 psize = sizeof (fcparam); 957 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 958 PCI_MBOX_REGS2300_OFF; 959 } 960 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) { 961 mdvp = &mdvec_2400; 962 basetype = ISP_HA_FC_2400; 963 psize = sizeof (fcparam); 964 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 965 PCI_MBOX_REGS2400_OFF; 966 } 967 isp = &pcs->pci_isp; 968 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 969 if (isp->isp_param == NULL) { 970 device_printf(dev, "cannot allocate parameter data\n"); 971 goto bad; 972 } 973 isp->isp_mdvec = mdvp; 974 isp->isp_type = basetype; 975 isp->isp_revision = pci_get_revid(dev); 976 isp->isp_dev = dev; 977 978 #if __FreeBSD_version >= 700000 979 /* 980 * Try and find firmware for this device. 981 */ 982 { 983 char fwname[32]; 984 unsigned int did = pci_get_device(dev); 985 986 /* 987 * Map a few pci ids to fw names 988 */ 989 switch (did) { 990 case PCI_PRODUCT_QLOGIC_ISP1020: 991 did = 0x1040; 992 break; 993 case PCI_PRODUCT_QLOGIC_ISP1240: 994 did = 0x1080; 995 break; 996 case PCI_PRODUCT_QLOGIC_ISP10160: 997 case PCI_PRODUCT_QLOGIC_ISP12160: 998 did = 0x12160; 999 break; 1000 case PCI_PRODUCT_QLOGIC_ISP6312: 1001 case PCI_PRODUCT_QLOGIC_ISP2312: 1002 did = 0x2300; 1003 break; 1004 case PCI_PRODUCT_QLOGIC_ISP6322: 1005 did = 0x2322; 1006 break; 1007 case PCI_PRODUCT_QLOGIC_ISP2422: 1008 did = 0x2400; 1009 break; 1010 default: 1011 break; 1012 } 1013 1014 isp->isp_osinfo.fw = NULL; 1015 if (isp->isp_role & ISP_ROLE_TARGET) { 1016 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 1017 isp->isp_osinfo.fw = firmware_get(fwname); 1018 } 1019 if (isp->isp_osinfo.fw == NULL) { 1020 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 1021 isp->isp_osinfo.fw = firmware_get(fwname); 1022 } 1023 if (isp->isp_osinfo.fw != NULL) { 1024 union { 1025 const void *fred; 1026 uint16_t *bob; 1027 } u; 1028 u.fred = isp->isp_osinfo.fw->data; 1029 isp->isp_mdvec->dv_ispfw = u.bob; 1030 } 1031 } 1032 #else 1033 if (isp_get_firmware_p) { 1034 int device = (int) pci_get_device(dev); 1035 #ifdef ISP_TARGET_MODE 1036 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 1037 #else 1038 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 1039 #endif 1040 } 1041 #endif 1042 1043 /* 1044 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 1045 * are set. 1046 */ 1047 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 1048 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 1049 1050 if (IS_2300(isp)) { /* per QLogic errata */ 1051 cmd &= ~PCIM_CMD_INVEN; 1052 } 1053 1054 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 1055 cmd &= ~PCIM_CMD_INTX_DISABLE; 1056 } 1057 1058 #ifdef WE_KNEW_WHAT_WE_WERE_DOING 1059 if (IS_24XX(isp)) { 1060 int reg; 1061 1062 cmd &= ~PCIM_CMD_INTX_DISABLE; 1063 1064 /* 1065 * Is this a PCI-X card? If so, set max read byte count. 1066 */ 1067 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 1068 uint16_t pxcmd; 1069 reg += 2; 1070 1071 pxcmd = pci_read_config(dev, reg, 2); 1072 pxcmd &= ~0xc; 1073 pxcmd |= 0x8; 1074 pci_write_config(dev, reg, 2, pxcmd); 1075 } 1076 1077 /* 1078 * Is this a PCI Express card? If so, set max read byte count. 1079 */ 1080 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1081 uint16_t pectl; 1082 1083 reg += 0x8; 1084 pectl = pci_read_config(dev, reg, 2); 1085 pectl &= ~0x7000; 1086 pectl |= 0x4000; 1087 pci_write_config(dev, reg, 2, pectl); 1088 } 1089 } 1090 #else 1091 if (IS_24XX(isp)) { 1092 cmd &= ~PCIM_CMD_INTX_DISABLE; 1093 } 1094 #endif 1095 1096 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 1097 1098 /* 1099 * Make sure the Cache Line Size register is set sensibly. 1100 */ 1101 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 1102 if (data != linesz) { 1103 data = PCI_DFLT_LNSZ; 1104 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 1105 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 1106 } 1107 1108 /* 1109 * Make sure the Latency Timer is sane. 1110 */ 1111 data = pci_read_config(dev, PCIR_LATTIMER, 1); 1112 if (data < PCI_DFLT_LTNCY) { 1113 data = PCI_DFLT_LTNCY; 1114 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 1115 pci_write_config(dev, PCIR_LATTIMER, data, 1); 1116 } 1117 1118 /* 1119 * Make sure we've disabled the ROM. 1120 */ 1121 data = pci_read_config(dev, PCIR_ROMADDR, 4); 1122 data &= ~1; 1123 pci_write_config(dev, PCIR_ROMADDR, data, 4); 1124 1125 iqd = 0; 1126 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 1127 RF_ACTIVE | RF_SHAREABLE); 1128 if (irq == NULL) { 1129 device_printf(dev, "could not allocate interrupt\n"); 1130 goto bad; 1131 } 1132 1133 #if __FreeBSD_version >= 500000 1134 /* Make sure the lock is set up. */ 1135 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 1136 locksetup++; 1137 #endif 1138 1139 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 1140 device_printf(dev, "could not setup interrupt\n"); 1141 goto bad; 1142 } 1143 1144 /* 1145 * Last minute checks... 1146 */ 1147 if (IS_23XX(isp) || IS_24XX(isp)) { 1148 isp->isp_port = pci_get_function(dev); 1149 } 1150 1151 if (IS_23XX(isp)) { 1152 /* 1153 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 1154 */ 1155 isp->isp_touched = 1; 1156 } 1157 1158 /* 1159 * Make sure we're in reset state. 1160 */ 1161 ISP_LOCK(isp); 1162 isp_reset(isp); 1163 if (isp->isp_state != ISP_RESETSTATE) { 1164 ISP_UNLOCK(isp); 1165 goto bad; 1166 } 1167 isp_init(isp); 1168 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 1169 isp_uninit(isp); 1170 ISP_UNLOCK(isp); 1171 goto bad; 1172 } 1173 isp_attach(isp); 1174 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 1175 isp_uninit(isp); 1176 ISP_UNLOCK(isp); 1177 goto bad; 1178 } 1179 /* 1180 * XXXX: Here is where we might unload the f/w module 1181 * XXXX: (or decrease the reference count to it). 1182 */ 1183 ISP_UNLOCK(isp); 1184 1185 return (0); 1186 1187 bad: 1188 1189 if (pcs && pcs->ih) { 1190 (void) bus_teardown_intr(dev, irq, pcs->ih); 1191 } 1192 1193 #if __FreeBSD_version >= 500000 1194 if (locksetup && isp) { 1195 mtx_destroy(&isp->isp_osinfo.lock); 1196 } 1197 #endif 1198 1199 if (irq) { 1200 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 1201 } 1202 1203 1204 if (regs) { 1205 (void) bus_release_resource(dev, rtp, rgd, regs); 1206 } 1207 1208 if (pcs) { 1209 if (pcs->pci_isp.isp_param) { 1210 #ifdef ISP_FW_CRASH_DUMP 1211 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { 1212 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); 1213 } 1214 #endif 1215 free(pcs->pci_isp.isp_param, M_DEVBUF); 1216 } 1217 } 1218 1219 /* 1220 * XXXX: Here is where we might unload the f/w module 1221 * XXXX: (or decrease the reference count to it). 1222 */ 1223 return (ENXIO); 1224 } 1225 1226 static int 1227 isp_pci_detach(device_t dev) 1228 { 1229 struct isp_pcisoftc *pcs; 1230 ispsoftc_t *isp; 1231 1232 pcs = device_get_softc(dev); 1233 if (pcs == NULL) { 1234 return (ENXIO); 1235 } 1236 isp = (ispsoftc_t *) pcs; 1237 ISP_DISABLE_INTS(isp); 1238 return (0); 1239 } 1240 1241 static void 1242 isp_pci_intr(void *arg) 1243 { 1244 ispsoftc_t *isp = arg; 1245 uint32_t isr; 1246 uint16_t sema, mbox; 1247 1248 ISP_LOCK(isp); 1249 isp->isp_intcnt++; 1250 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 1251 isp->isp_intbogus++; 1252 } else { 1253 isp_intr(isp, isr, sema, mbox); 1254 } 1255 ISP_UNLOCK(isp); 1256 } 1257 1258 1259 #define IspVirt2Off(a, x) \ 1260 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1261 _BLK_REG_SHFT] + ((x) & 0xfff)) 1262 1263 #define BXR2(pcs, off) \ 1264 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 1265 #define BXW2(pcs, off, v) \ 1266 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 1267 #define BXR4(pcs, off) \ 1268 bus_space_read_4(pcs->pci_st, pcs->pci_sh, off) 1269 #define BXW4(pcs, off, v) \ 1270 bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v) 1271 1272 1273 static __inline int 1274 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1275 { 1276 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1277 uint32_t val0, val1; 1278 int i = 0; 1279 1280 do { 1281 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 1282 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 1283 } while (val0 != val1 && ++i < 1000); 1284 if (val0 != val1) { 1285 return (1); 1286 } 1287 *rp = val0; 1288 return (0); 1289 } 1290 1291 static int 1292 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, 1293 uint16_t *semap, uint16_t *mbp) 1294 { 1295 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1296 uint16_t isr, sema; 1297 1298 if (IS_2100(isp)) { 1299 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1300 return (0); 1301 } 1302 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1303 return (0); 1304 } 1305 } else { 1306 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 1307 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 1308 } 1309 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1310 isr &= INT_PENDING_MASK(isp); 1311 sema &= BIU_SEMA_LOCK; 1312 if (isr == 0 && sema == 0) { 1313 return (0); 1314 } 1315 *isrp = isr; 1316 if ((*semap = sema) != 0) { 1317 if (IS_2100(isp)) { 1318 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1319 return (0); 1320 } 1321 } else { 1322 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 1323 } 1324 } 1325 return (1); 1326 } 1327 1328 static int 1329 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, 1330 uint16_t *semap, uint16_t *mbox0p) 1331 { 1332 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1333 uint32_t hccr; 1334 uint32_t r2hisr; 1335 1336 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1337 *isrp = 0; 1338 return (0); 1339 } 1340 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU_R2HSTSLO)); 1341 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1342 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1343 *isrp = 0; 1344 return (0); 1345 } 1346 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1347 case ISPR2HST_ROM_MBX_OK: 1348 case ISPR2HST_ROM_MBX_FAIL: 1349 case ISPR2HST_MBX_OK: 1350 case ISPR2HST_MBX_FAIL: 1351 case ISPR2HST_ASYNC_EVENT: 1352 *isrp = r2hisr & 0xffff; 1353 *mbox0p = (r2hisr >> 16); 1354 *semap = 1; 1355 return (1); 1356 case ISPR2HST_RIO_16: 1357 *isrp = r2hisr & 0xffff; 1358 *mbox0p = ASYNC_RIO1; 1359 *semap = 1; 1360 return (1); 1361 case ISPR2HST_FPOST: 1362 *isrp = r2hisr & 0xffff; 1363 *mbox0p = ASYNC_CMD_CMPLT; 1364 *semap = 1; 1365 return (1); 1366 case ISPR2HST_FPOST_CTIO: 1367 *isrp = r2hisr & 0xffff; 1368 *mbox0p = ASYNC_CTIO_DONE; 1369 *semap = 1; 1370 return (1); 1371 case ISPR2HST_RSPQ_UPDATE: 1372 *isrp = r2hisr & 0xffff; 1373 *mbox0p = 0; 1374 *semap = 0; 1375 return (1); 1376 default: 1377 hccr = ISP_READ(isp, HCCR); 1378 if (hccr & HCCR_PAUSE) { 1379 ISP_WRITE(isp, HCCR, HCCR_RESET); 1380 isp_prt(isp, ISP_LOGERR, 1381 "RISC paused at interrupt (%x->%x)", hccr, 1382 ISP_READ(isp, HCCR)); 1383 ISP_WRITE(isp, BIU_ICR, 0); 1384 } else { 1385 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", 1386 r2hisr); 1387 } 1388 return (0); 1389 } 1390 } 1391 1392 static int 1393 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, 1394 uint16_t *semap, uint16_t *mbox0p) 1395 { 1396 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1397 uint32_t r2hisr; 1398 1399 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO)); 1400 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1401 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1402 *isrp = 0; 1403 return (0); 1404 } 1405 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1406 case ISP2400R2HST_ROM_MBX_OK: 1407 case ISP2400R2HST_ROM_MBX_FAIL: 1408 case ISP2400R2HST_MBX_OK: 1409 case ISP2400R2HST_MBX_FAIL: 1410 case ISP2400R2HST_ASYNC_EVENT: 1411 *isrp = r2hisr & 0xffff; 1412 *mbox0p = (r2hisr >> 16); 1413 *semap = 1; 1414 return (1); 1415 case ISP2400R2HST_RSPQ_UPDATE: 1416 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1417 case ISP2400R2HST_ATIO_RQST_UPDATE: 1418 *isrp = r2hisr & 0xffff; 1419 *mbox0p = 0; 1420 *semap = 0; 1421 return (1); 1422 default: 1423 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1424 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1425 return (0); 1426 } 1427 } 1428 1429 static uint32_t 1430 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1431 { 1432 uint32_t rv; 1433 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1434 int oldconf = 0; 1435 1436 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1437 /* 1438 * We will assume that someone has paused the RISC processor. 1439 */ 1440 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1441 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1442 oldconf | BIU_PCI_CONF1_SXP); 1443 } 1444 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1445 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1446 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1447 } 1448 return (rv); 1449 } 1450 1451 static void 1452 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1453 { 1454 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1455 int oldconf = 0; 1456 volatile int junk; 1457 1458 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1459 /* 1460 * We will assume that someone has paused the RISC processor. 1461 */ 1462 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1463 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1464 oldconf | BIU_PCI_CONF1_SXP); 1465 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1466 } 1467 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1468 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1469 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1470 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1471 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1472 } 1473 } 1474 1475 static uint32_t 1476 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1477 { 1478 uint32_t rv, oc = 0; 1479 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1480 1481 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1482 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1483 uint32_t tc; 1484 /* 1485 * We will assume that someone has paused the RISC processor. 1486 */ 1487 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1488 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1489 if (regoff & SXP_BANK1_SELECT) 1490 tc |= BIU_PCI1080_CONF1_SXP1; 1491 else 1492 tc |= BIU_PCI1080_CONF1_SXP0; 1493 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1494 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1495 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1496 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1497 oc | BIU_PCI1080_CONF1_DMA); 1498 } 1499 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1500 if (oc) { 1501 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1502 } 1503 return (rv); 1504 } 1505 1506 static void 1507 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1508 { 1509 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1510 int oc = 0; 1511 volatile int junk; 1512 1513 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1514 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1515 uint32_t tc; 1516 /* 1517 * We will assume that someone has paused the RISC processor. 1518 */ 1519 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1520 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1521 if (regoff & SXP_BANK1_SELECT) 1522 tc |= BIU_PCI1080_CONF1_SXP1; 1523 else 1524 tc |= BIU_PCI1080_CONF1_SXP0; 1525 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1526 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1527 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1528 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1529 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1530 oc | BIU_PCI1080_CONF1_DMA); 1531 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1532 } 1533 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1534 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1535 if (oc) { 1536 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1537 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1538 } 1539 } 1540 1541 static uint32_t 1542 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1543 { 1544 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1545 uint32_t rv; 1546 int block = regoff & _BLK_REG_MASK; 1547 1548 switch (block) { 1549 case BIU_BLOCK: 1550 break; 1551 case MBOX_BLOCK: 1552 return (BXR2(pcs, IspVirt2Off(pcs, regoff))); 1553 case SXP_BLOCK: 1554 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1555 return (0xffffffff); 1556 case RISC_BLOCK: 1557 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1558 return (0xffffffff); 1559 case DMA_BLOCK: 1560 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1561 return (0xffffffff); 1562 default: 1563 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1564 return (0xffffffff); 1565 } 1566 1567 1568 switch (regoff) { 1569 case BIU2400_FLASH_ADDR: 1570 case BIU2400_FLASH_DATA: 1571 case BIU2400_ICR: 1572 case BIU2400_ISR: 1573 case BIU2400_CSR: 1574 case BIU2400_REQINP: 1575 case BIU2400_REQOUTP: 1576 case BIU2400_RSPINP: 1577 case BIU2400_RSPOUTP: 1578 case BIU2400_PRI_RQINP: 1579 case BIU2400_PRI_RSPINP: 1580 case BIU2400_ATIO_RSPINP: 1581 case BIU2400_ATIO_REQINP: 1582 case BIU2400_HCCR: 1583 case BIU2400_GPIOD: 1584 case BIU2400_GPIOE: 1585 case BIU2400_HSEMA: 1586 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1587 break; 1588 case BIU2400_R2HSTSLO: 1589 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1590 break; 1591 case BIU2400_R2HSTSHI: 1592 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16; 1593 break; 1594 default: 1595 isp_prt(isp, ISP_LOGERR, 1596 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1597 rv = 0xffffffff; 1598 break; 1599 } 1600 return (rv); 1601 } 1602 1603 static void 1604 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1605 { 1606 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1607 int block = regoff & _BLK_REG_MASK; 1608 volatile int junk; 1609 1610 switch (block) { 1611 case BIU_BLOCK: 1612 break; 1613 case MBOX_BLOCK: 1614 BXW2(pcs, IspVirt2Off(pcs, regoff), val); 1615 junk = BXR2(pcs, IspVirt2Off(pcs, regoff)); 1616 return; 1617 case SXP_BLOCK: 1618 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1619 return; 1620 case RISC_BLOCK: 1621 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1622 return; 1623 case DMA_BLOCK: 1624 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1625 return; 1626 default: 1627 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1628 regoff); 1629 break; 1630 } 1631 1632 switch (regoff) { 1633 case BIU2400_FLASH_ADDR: 1634 case BIU2400_FLASH_DATA: 1635 case BIU2400_ICR: 1636 case BIU2400_ISR: 1637 case BIU2400_CSR: 1638 case BIU2400_REQINP: 1639 case BIU2400_REQOUTP: 1640 case BIU2400_RSPINP: 1641 case BIU2400_RSPOUTP: 1642 case BIU2400_PRI_RQINP: 1643 case BIU2400_PRI_RSPINP: 1644 case BIU2400_ATIO_RSPINP: 1645 case BIU2400_ATIO_REQINP: 1646 case BIU2400_HCCR: 1647 case BIU2400_GPIOD: 1648 case BIU2400_GPIOE: 1649 case BIU2400_HSEMA: 1650 BXW4(pcs, IspVirt2Off(pcs, regoff), val); 1651 junk = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1652 break; 1653 default: 1654 isp_prt(isp, ISP_LOGERR, 1655 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1656 break; 1657 } 1658 } 1659 1660 1661 struct imush { 1662 ispsoftc_t *isp; 1663 int error; 1664 }; 1665 1666 static void imc(void *, bus_dma_segment_t *, int, int); 1667 1668 static void 1669 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1670 { 1671 struct imush *imushp = (struct imush *) arg; 1672 if (error) { 1673 imushp->error = error; 1674 } else { 1675 ispsoftc_t *isp =imushp->isp; 1676 bus_addr_t addr = segs->ds_addr; 1677 1678 isp->isp_rquest_dma = addr; 1679 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1680 isp->isp_result_dma = addr; 1681 if (IS_FC(isp)) { 1682 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1683 FCPARAM(isp)->isp_scdma = addr; 1684 } 1685 } 1686 } 1687 1688 /* 1689 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1690 */ 1691 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1692 1693 #if __FreeBSD_version < 500000 1694 #define BUS_DMA_ROOTARG NULL 1695 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ 1696 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) 1697 #elif __FreeBSD_version < 700020 1698 #define BUS_DMA_ROOTARG NULL 1699 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ 1700 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ 1701 busdma_lock_mutex, &Giant, z) 1702 #else 1703 #define BUS_DMA_ROOTARG bus_get_dma_tag(pcs->pci_dev) 1704 #define isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z) \ 1705 bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \ 1706 busdma_lock_mutex, &Giant, z) 1707 #endif 1708 1709 static int 1710 isp_pci_mbxdma(ispsoftc_t *isp) 1711 { 1712 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1713 caddr_t base; 1714 uint32_t len; 1715 int i, error, ns; 1716 bus_size_t slim; /* segment size */ 1717 bus_addr_t llim; /* low limit of unavailable dma */ 1718 bus_addr_t hlim; /* high limit of unavailable dma */ 1719 struct imush im; 1720 1721 /* 1722 * Already been here? If so, leave... 1723 */ 1724 if (isp->isp_rquest) { 1725 return (0); 1726 } 1727 1728 if (isp->isp_maxcmds == 0) { 1729 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1730 return (1); 1731 } 1732 1733 hlim = BUS_SPACE_MAXADDR; 1734 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1735 slim = (bus_size_t) (1ULL << 32); 1736 llim = BUS_SPACE_MAXADDR; 1737 } else { 1738 llim = BUS_SPACE_MAXADDR_32BIT; 1739 slim = (1 << 24); 1740 } 1741 1742 /* 1743 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1744 */ 1745 #ifdef ISP_TARGET_MODE 1746 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1747 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1748 return (1); 1749 } 1750 #endif 1751 1752 ISP_UNLOCK(isp); 1753 if (isp_dma_tag_create(BUS_DMA_ROOTARG, 1, slim, llim, 1754 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1755 &pcs->dmat)) { 1756 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1757 ISP_LOCK(isp); 1758 return (1); 1759 } 1760 1761 1762 len = sizeof (XS_T **) * isp->isp_maxcmds; 1763 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1764 if (isp->isp_xflist == NULL) { 1765 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1766 ISP_LOCK(isp); 1767 return (1); 1768 } 1769 #ifdef ISP_TARGET_MODE 1770 len = sizeof (void **) * isp->isp_maxcmds; 1771 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1772 if (isp->isp_tgtlist == NULL) { 1773 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1774 ISP_LOCK(isp); 1775 return (1); 1776 } 1777 #endif 1778 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1779 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1780 if (pcs->dmaps == NULL) { 1781 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1782 free(isp->isp_xflist, M_DEVBUF); 1783 #ifdef ISP_TARGET_MODE 1784 free(isp->isp_tgtlist, M_DEVBUF); 1785 #endif 1786 ISP_LOCK(isp); 1787 return (1); 1788 } 1789 1790 /* 1791 * Allocate and map the request, result queues, plus FC scratch area. 1792 */ 1793 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1794 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1795 if (IS_FC(isp)) { 1796 len += ISP2100_SCRLEN; 1797 } 1798 1799 ns = (len / PAGE_SIZE) + 1; 1800 /* 1801 * Create a tag for the control spaces- force it to within 32 bits. 1802 */ 1803 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim, 1804 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1805 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1806 isp_prt(isp, ISP_LOGERR, 1807 "cannot create a dma tag for control spaces"); 1808 free(pcs->dmaps, M_DEVBUF); 1809 free(isp->isp_xflist, M_DEVBUF); 1810 #ifdef ISP_TARGET_MODE 1811 free(isp->isp_tgtlist, M_DEVBUF); 1812 #endif 1813 ISP_LOCK(isp); 1814 return (1); 1815 } 1816 1817 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1818 &isp->isp_cdmap) != 0) { 1819 isp_prt(isp, ISP_LOGERR, 1820 "cannot allocate %d bytes of CCB memory", len); 1821 bus_dma_tag_destroy(isp->isp_cdmat); 1822 free(isp->isp_xflist, M_DEVBUF); 1823 #ifdef ISP_TARGET_MODE 1824 free(isp->isp_tgtlist, M_DEVBUF); 1825 #endif 1826 free(pcs->dmaps, M_DEVBUF); 1827 ISP_LOCK(isp); 1828 return (1); 1829 } 1830 1831 for (i = 0; i < isp->isp_maxcmds; i++) { 1832 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1833 if (error) { 1834 isp_prt(isp, ISP_LOGERR, 1835 "error %d creating per-cmd DMA maps", error); 1836 while (--i >= 0) { 1837 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1838 } 1839 goto bad; 1840 } 1841 } 1842 1843 im.isp = isp; 1844 im.error = 0; 1845 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1846 if (im.error) { 1847 isp_prt(isp, ISP_LOGERR, 1848 "error %d loading dma map for control areas", im.error); 1849 goto bad; 1850 } 1851 1852 isp->isp_rquest = base; 1853 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1854 isp->isp_result = base; 1855 if (IS_FC(isp)) { 1856 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1857 FCPARAM(isp)->isp_scratch = base; 1858 } 1859 ISP_LOCK(isp); 1860 return (0); 1861 1862 bad: 1863 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1864 bus_dma_tag_destroy(isp->isp_cdmat); 1865 free(isp->isp_xflist, M_DEVBUF); 1866 #ifdef ISP_TARGET_MODE 1867 free(isp->isp_tgtlist, M_DEVBUF); 1868 #endif 1869 free(pcs->dmaps, M_DEVBUF); 1870 ISP_LOCK(isp); 1871 isp->isp_rquest = NULL; 1872 return (1); 1873 } 1874 1875 typedef struct { 1876 ispsoftc_t *isp; 1877 void *cmd_token; 1878 void *rq; 1879 uint32_t *nxtip; 1880 uint32_t optr; 1881 int error; 1882 } mush_t; 1883 1884 #define MUSHERR_NOQENTRIES -2 1885 1886 #ifdef ISP_TARGET_MODE 1887 /* 1888 * We need to handle DMA for target mode differently from initiator mode. 1889 * 1890 * DMA mapping and construction and submission of CTIO Request Entries 1891 * and rendevous for completion are very tightly coupled because we start 1892 * out by knowing (per platform) how much data we have to move, but we 1893 * don't know, up front, how many DMA mapping segments will have to be used 1894 * cover that data, so we don't know how many CTIO Request Entries we 1895 * will end up using. Further, for performance reasons we may want to 1896 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1897 * 1898 * The standard vector still goes through isp_pci_dmasetup, but the callback 1899 * for the DMA mapping routines comes here instead with the whole transfer 1900 * mapped and a pointer to a partially filled in already allocated request 1901 * queue entry. We finish the job. 1902 */ 1903 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1904 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1905 1906 #define STATUS_WITH_DATA 1 1907 1908 static void 1909 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1910 { 1911 mush_t *mp; 1912 struct ccb_scsiio *csio; 1913 ispsoftc_t *isp; 1914 struct isp_pcisoftc *pcs; 1915 bus_dmamap_t *dp; 1916 ct_entry_t *cto, *qe; 1917 uint8_t scsi_status; 1918 uint32_t curi, nxti, handle; 1919 uint32_t sflags; 1920 int32_t resid; 1921 int nth_ctio, nctios, send_status; 1922 1923 mp = (mush_t *) arg; 1924 if (error) { 1925 mp->error = error; 1926 return; 1927 } 1928 1929 isp = mp->isp; 1930 csio = mp->cmd_token; 1931 cto = mp->rq; 1932 curi = isp->isp_reqidx; 1933 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1934 1935 cto->ct_xfrlen = 0; 1936 cto->ct_seg_count = 0; 1937 cto->ct_header.rqs_entry_count = 1; 1938 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1939 1940 if (nseg == 0) { 1941 cto->ct_header.rqs_seqno = 1; 1942 isp_prt(isp, ISP_LOGTDEBUG1, 1943 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1944 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1945 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1946 cto->ct_scsi_status, cto->ct_resid); 1947 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1948 isp_put_ctio(isp, cto, qe); 1949 return; 1950 } 1951 1952 nctios = nseg / ISP_RQDSEG; 1953 if (nseg % ISP_RQDSEG) { 1954 nctios++; 1955 } 1956 1957 /* 1958 * Save syshandle, and potentially any SCSI status, which we'll 1959 * reinsert on the last CTIO we're going to send. 1960 */ 1961 1962 handle = cto->ct_syshandle; 1963 cto->ct_syshandle = 0; 1964 cto->ct_header.rqs_seqno = 0; 1965 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1966 1967 if (send_status) { 1968 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1969 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1970 /* 1971 * Preserve residual. 1972 */ 1973 resid = cto->ct_resid; 1974 1975 /* 1976 * Save actual SCSI status. 1977 */ 1978 scsi_status = cto->ct_scsi_status; 1979 1980 #ifndef STATUS_WITH_DATA 1981 sflags |= CT_NO_DATA; 1982 /* 1983 * We can't do a status at the same time as a data CTIO, so 1984 * we need to synthesize an extra CTIO at this level. 1985 */ 1986 nctios++; 1987 #endif 1988 } else { 1989 sflags = scsi_status = resid = 0; 1990 } 1991 1992 cto->ct_resid = 0; 1993 cto->ct_scsi_status = 0; 1994 1995 pcs = (struct isp_pcisoftc *)isp; 1996 dp = &pcs->dmaps[isp_handle_index(handle)]; 1997 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1998 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1999 } else { 2000 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2001 } 2002 2003 nxti = *mp->nxtip; 2004 2005 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 2006 int seglim; 2007 2008 seglim = nseg; 2009 if (seglim) { 2010 int seg; 2011 2012 if (seglim > ISP_RQDSEG) 2013 seglim = ISP_RQDSEG; 2014 2015 for (seg = 0; seg < seglim; seg++, nseg--) { 2016 /* 2017 * Unlike normal initiator commands, we don't 2018 * do any swizzling here. 2019 */ 2020 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 2021 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 2022 cto->ct_xfrlen += dm_segs->ds_len; 2023 dm_segs++; 2024 } 2025 cto->ct_seg_count = seg; 2026 } else { 2027 /* 2028 * This case should only happen when we're sending an 2029 * extra CTIO with final status. 2030 */ 2031 if (send_status == 0) { 2032 isp_prt(isp, ISP_LOGWARN, 2033 "tdma_mk ran out of segments"); 2034 mp->error = EINVAL; 2035 return; 2036 } 2037 } 2038 2039 /* 2040 * At this point, the fields ct_lun, ct_iid, ct_tagval, 2041 * ct_tagtype, and ct_timeout have been carried over 2042 * unchanged from what our caller had set. 2043 * 2044 * The dataseg fields and the seg_count fields we just got 2045 * through setting. The data direction we've preserved all 2046 * along and only clear it if we're now sending status. 2047 */ 2048 2049 if (nth_ctio == nctios - 1) { 2050 /* 2051 * We're the last in a sequence of CTIOs, so mark 2052 * this CTIO and save the handle to the CCB such that 2053 * when this CTIO completes we can free dma resources 2054 * and do whatever else we need to do to finish the 2055 * rest of the command. We *don't* give this to the 2056 * firmware to work on- the caller will do that. 2057 */ 2058 2059 cto->ct_syshandle = handle; 2060 cto->ct_header.rqs_seqno = 1; 2061 2062 if (send_status) { 2063 cto->ct_scsi_status = scsi_status; 2064 cto->ct_flags |= sflags; 2065 cto->ct_resid = resid; 2066 } 2067 if (send_status) { 2068 isp_prt(isp, ISP_LOGTDEBUG1, 2069 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 2070 "scsi status %x resid %d", 2071 cto->ct_fwhandle, csio->ccb_h.target_lun, 2072 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 2073 cto->ct_scsi_status, cto->ct_resid); 2074 } else { 2075 isp_prt(isp, ISP_LOGTDEBUG1, 2076 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 2077 cto->ct_fwhandle, csio->ccb_h.target_lun, 2078 cto->ct_iid, cto->ct_tag_val, 2079 cto->ct_flags); 2080 } 2081 isp_put_ctio(isp, cto, qe); 2082 ISP_TDQE(isp, "last tdma_mk", curi, cto); 2083 if (nctios > 1) { 2084 MEMORYBARRIER(isp, SYNC_REQUEST, 2085 curi, QENTRY_LEN); 2086 } 2087 } else { 2088 ct_entry_t *oqe = qe; 2089 2090 /* 2091 * Make sure syshandle fields are clean 2092 */ 2093 cto->ct_syshandle = 0; 2094 cto->ct_header.rqs_seqno = 0; 2095 2096 isp_prt(isp, ISP_LOGTDEBUG1, 2097 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 2098 cto->ct_fwhandle, csio->ccb_h.target_lun, 2099 cto->ct_iid, cto->ct_flags); 2100 2101 /* 2102 * Get a new CTIO 2103 */ 2104 qe = (ct_entry_t *) 2105 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2106 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 2107 if (nxti == mp->optr) { 2108 isp_prt(isp, ISP_LOGTDEBUG0, 2109 "Queue Overflow in tdma_mk"); 2110 mp->error = MUSHERR_NOQENTRIES; 2111 return; 2112 } 2113 2114 /* 2115 * Now that we're done with the old CTIO, 2116 * flush it out to the request queue. 2117 */ 2118 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 2119 isp_put_ctio(isp, cto, oqe); 2120 if (nth_ctio != 0) { 2121 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 2122 QENTRY_LEN); 2123 } 2124 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 2125 2126 /* 2127 * Reset some fields in the CTIO so we can reuse 2128 * for the next one we'll flush to the request 2129 * queue. 2130 */ 2131 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 2132 cto->ct_header.rqs_entry_count = 1; 2133 cto->ct_header.rqs_flags = 0; 2134 cto->ct_status = 0; 2135 cto->ct_scsi_status = 0; 2136 cto->ct_xfrlen = 0; 2137 cto->ct_resid = 0; 2138 cto->ct_seg_count = 0; 2139 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 2140 } 2141 } 2142 *mp->nxtip = nxti; 2143 } 2144 2145 /* 2146 * We don't have to do multiple CTIOs here. Instead, we can just do 2147 * continuation segments as needed. This greatly simplifies the code 2148 * improves performance. 2149 */ 2150 2151 static void 2152 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2153 { 2154 mush_t *mp; 2155 struct ccb_scsiio *csio; 2156 ispsoftc_t *isp; 2157 ct2_entry_t *cto, *qe; 2158 uint32_t curi, nxti; 2159 ispds_t *ds; 2160 ispds64_t *ds64; 2161 int segcnt, seglim; 2162 2163 mp = (mush_t *) arg; 2164 if (error) { 2165 mp->error = error; 2166 return; 2167 } 2168 2169 isp = mp->isp; 2170 csio = mp->cmd_token; 2171 cto = mp->rq; 2172 2173 curi = isp->isp_reqidx; 2174 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 2175 2176 if (nseg == 0) { 2177 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 2178 isp_prt(isp, ISP_LOGWARN, 2179 "dma2_tgt_fc, a status CTIO2 without MODE1 " 2180 "set (0x%x)", cto->ct_flags); 2181 mp->error = EINVAL; 2182 return; 2183 } 2184 /* 2185 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 2186 * flags to NO DATA and clear relative offset flags. 2187 * We preserve the ct_resid and the response area. 2188 */ 2189 cto->ct_header.rqs_seqno = 1; 2190 cto->ct_seg_count = 0; 2191 cto->ct_reloff = 0; 2192 isp_prt(isp, ISP_LOGTDEBUG1, 2193 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 2194 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 2195 cto->ct_iid, cto->ct_flags, cto->ct_status, 2196 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 2197 if (FCPARAM(isp)->isp_2klogin) { 2198 isp_put_ctio2e(isp, 2199 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2200 } else { 2201 isp_put_ctio2(isp, cto, qe); 2202 } 2203 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 2204 return; 2205 } 2206 2207 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 2208 isp_prt(isp, ISP_LOGERR, 2209 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 2210 "(0x%x)", cto->ct_flags); 2211 mp->error = EINVAL; 2212 return; 2213 } 2214 2215 2216 nxti = *mp->nxtip; 2217 2218 /* 2219 * Check to see if we need to DAC addressing or not. 2220 * 2221 * Any address that's over the 4GB boundary causes this 2222 * to happen. 2223 */ 2224 segcnt = nseg; 2225 if (sizeof (bus_addr_t) > 4) { 2226 for (segcnt = 0; segcnt < nseg; segcnt++) { 2227 uint64_t addr = dm_segs[segcnt].ds_addr; 2228 if (addr >= 0x100000000LL) { 2229 break; 2230 } 2231 } 2232 } 2233 if (segcnt != nseg) { 2234 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 2235 seglim = ISP_RQDSEG_T3; 2236 ds64 = &cto->rsp.m0.u.ct_dataseg64[0]; 2237 ds = NULL; 2238 } else { 2239 seglim = ISP_RQDSEG_T2; 2240 ds64 = NULL; 2241 ds = &cto->rsp.m0.u.ct_dataseg[0]; 2242 } 2243 cto->ct_seg_count = 0; 2244 2245 /* 2246 * Set up the CTIO2 data segments. 2247 */ 2248 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; 2249 cto->ct_seg_count++, segcnt++) { 2250 if (ds64) { 2251 ds64->ds_basehi = 2252 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2253 ds64->ds_base = dm_segs[segcnt].ds_addr; 2254 ds64->ds_count = dm_segs[segcnt].ds_len; 2255 ds64++; 2256 } else { 2257 ds->ds_base = dm_segs[segcnt].ds_addr; 2258 ds->ds_count = dm_segs[segcnt].ds_len; 2259 ds++; 2260 } 2261 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2262 #if __FreeBSD_version < 500000 2263 isp_prt(isp, ISP_LOGTDEBUG1, 2264 "isp_send_ctio2: ent0[%d]0x%llx:%llu", 2265 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, 2266 (uint64_t)dm_segs[segcnt].ds_len); 2267 #else 2268 isp_prt(isp, ISP_LOGTDEBUG1, 2269 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 2270 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 2271 (uintmax_t)dm_segs[segcnt].ds_len); 2272 #endif 2273 } 2274 2275 while (segcnt < nseg) { 2276 uint32_t curip; 2277 int seg; 2278 ispcontreq_t local, *crq = &local, *qep; 2279 2280 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2281 curip = nxti; 2282 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 2283 if (nxti == mp->optr) { 2284 ISP_UNLOCK(isp); 2285 isp_prt(isp, ISP_LOGTDEBUG0, 2286 "tdma_mkfc: request queue overflow"); 2287 mp->error = MUSHERR_NOQENTRIES; 2288 return; 2289 } 2290 cto->ct_header.rqs_entry_count++; 2291 MEMZERO((void *)crq, sizeof (*crq)); 2292 crq->req_header.rqs_entry_count = 1; 2293 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { 2294 seglim = ISP_CDSEG64; 2295 ds = NULL; 2296 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; 2297 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2298 } else { 2299 seglim = ISP_CDSEG; 2300 ds = &crq->req_dataseg[0]; 2301 ds64 = NULL; 2302 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2303 } 2304 for (seg = 0; segcnt < nseg && seg < seglim; 2305 segcnt++, seg++) { 2306 if (ds64) { 2307 ds64->ds_basehi = 2308 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2309 ds64->ds_base = dm_segs[segcnt].ds_addr; 2310 ds64->ds_count = dm_segs[segcnt].ds_len; 2311 ds64++; 2312 } else { 2313 ds->ds_base = dm_segs[segcnt].ds_addr; 2314 ds->ds_count = dm_segs[segcnt].ds_len; 2315 ds++; 2316 } 2317 #if __FreeBSD_version < 500000 2318 isp_prt(isp, ISP_LOGTDEBUG1, 2319 "isp_send_ctio2: ent%d[%d]%llx:%llu", 2320 cto->ct_header.rqs_entry_count-1, seg, 2321 (uint64_t)dm_segs[segcnt].ds_addr, 2322 (uint64_t)dm_segs[segcnt].ds_len); 2323 #else 2324 isp_prt(isp, ISP_LOGTDEBUG1, 2325 "isp_send_ctio2: ent%d[%d]%jx:%ju", 2326 cto->ct_header.rqs_entry_count-1, seg, 2327 (uintmax_t)dm_segs[segcnt].ds_addr, 2328 (uintmax_t)dm_segs[segcnt].ds_len); 2329 #endif 2330 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2331 cto->ct_seg_count++; 2332 } 2333 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 2334 isp_put_cont_req(isp, crq, qep); 2335 ISP_TDQE(isp, "cont entry", curi, qep); 2336 } 2337 2338 /* 2339 * No do final twiddling for the CTIO itself. 2340 */ 2341 cto->ct_header.rqs_seqno = 1; 2342 isp_prt(isp, ISP_LOGTDEBUG1, 2343 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 2344 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 2345 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 2346 cto->ct_resid); 2347 if (FCPARAM(isp)->isp_2klogin) { 2348 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2349 } else { 2350 isp_put_ctio2(isp, cto, qe); 2351 } 2352 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 2353 *mp->nxtip = nxti; 2354 } 2355 #endif 2356 2357 static void dma_2400(void *, bus_dma_segment_t *, int, int); 2358 static void dma2_a64(void *, bus_dma_segment_t *, int, int); 2359 static void dma2(void *, bus_dma_segment_t *, int, int); 2360 2361 static void 2362 dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2363 { 2364 mush_t *mp; 2365 ispsoftc_t *isp; 2366 struct ccb_scsiio *csio; 2367 struct isp_pcisoftc *pcs; 2368 bus_dmamap_t *dp; 2369 bus_dma_segment_t *eseg; 2370 ispreqt7_t *rq; 2371 int seglim, datalen; 2372 uint32_t nxti; 2373 2374 mp = (mush_t *) arg; 2375 if (error) { 2376 mp->error = error; 2377 return; 2378 } 2379 2380 if (nseg < 1) { 2381 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2382 mp->error = EFAULT; 2383 return; 2384 } 2385 2386 csio = mp->cmd_token; 2387 isp = mp->isp; 2388 rq = mp->rq; 2389 pcs = (struct isp_pcisoftc *)mp->isp; 2390 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2391 nxti = *mp->nxtip; 2392 2393 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2394 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2395 } else { 2396 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2397 } 2398 datalen = XS_XFRLEN(csio); 2399 2400 /* 2401 * We're passed an initial partially filled in entry that 2402 * has most fields filled in except for data transfer 2403 * related values. 2404 * 2405 * Our job is to fill in the initial request queue entry and 2406 * then to start allocating and filling in continuation entries 2407 * until we've covered the entire transfer. 2408 */ 2409 2410 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS; 2411 rq->req_dl = datalen; 2412 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2413 rq->req_alen_datadir = 0x2; 2414 } else { 2415 rq->req_alen_datadir = 0x1; 2416 } 2417 2418 eseg = dm_segs + nseg; 2419 2420 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr); 2421 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr); 2422 rq->req_dataseg.ds_count = dm_segs->ds_len; 2423 2424 datalen -= dm_segs->ds_len; 2425 2426 dm_segs++; 2427 rq->req_seg_count++; 2428 2429 while (datalen > 0 && dm_segs != eseg) { 2430 uint32_t onxti; 2431 ispcontreq64_t local, *crq = &local, *cqe; 2432 2433 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2434 onxti = nxti; 2435 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2436 if (nxti == mp->optr) { 2437 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2438 mp->error = MUSHERR_NOQENTRIES; 2439 return; 2440 } 2441 rq->req_header.rqs_entry_count++; 2442 MEMZERO((void *)crq, sizeof (*crq)); 2443 crq->req_header.rqs_entry_count = 1; 2444 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2445 2446 seglim = 0; 2447 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2448 crq->req_dataseg[seglim].ds_base = 2449 DMA_LO32(dm_segs->ds_addr); 2450 crq->req_dataseg[seglim].ds_basehi = 2451 DMA_HI32(dm_segs->ds_addr); 2452 crq->req_dataseg[seglim].ds_count = 2453 dm_segs->ds_len; 2454 rq->req_seg_count++; 2455 dm_segs++; 2456 seglim++; 2457 datalen -= dm_segs->ds_len; 2458 } 2459 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2460 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2461 } 2462 isp_put_cont64_req(isp, crq, cqe); 2463 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2464 } 2465 *mp->nxtip = nxti; 2466 } 2467 2468 static void 2469 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2470 { 2471 mush_t *mp; 2472 ispsoftc_t *isp; 2473 struct ccb_scsiio *csio; 2474 struct isp_pcisoftc *pcs; 2475 bus_dmamap_t *dp; 2476 bus_dma_segment_t *eseg; 2477 ispreq64_t *rq; 2478 int seglim, datalen; 2479 uint32_t nxti; 2480 2481 mp = (mush_t *) arg; 2482 if (error) { 2483 mp->error = error; 2484 return; 2485 } 2486 2487 if (nseg < 1) { 2488 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2489 mp->error = EFAULT; 2490 return; 2491 } 2492 csio = mp->cmd_token; 2493 isp = mp->isp; 2494 rq = mp->rq; 2495 pcs = (struct isp_pcisoftc *)mp->isp; 2496 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2497 nxti = *mp->nxtip; 2498 2499 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2500 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2501 } else { 2502 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2503 } 2504 datalen = XS_XFRLEN(csio); 2505 2506 /* 2507 * We're passed an initial partially filled in entry that 2508 * has most fields filled in except for data transfer 2509 * related values. 2510 * 2511 * Our job is to fill in the initial request queue entry and 2512 * then to start allocating and filling in continuation entries 2513 * until we've covered the entire transfer. 2514 */ 2515 2516 if (IS_FC(isp)) { 2517 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 2518 seglim = ISP_RQDSEG_T3; 2519 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 2520 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2521 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2522 } else { 2523 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2524 } 2525 } else { 2526 rq->req_header.rqs_entry_type = RQSTYPE_A64; 2527 if (csio->cdb_len > 12) { 2528 seglim = 0; 2529 } else { 2530 seglim = ISP_RQDSEG_A64; 2531 } 2532 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2533 rq->req_flags |= REQFLAG_DATA_IN; 2534 } else { 2535 rq->req_flags |= REQFLAG_DATA_OUT; 2536 } 2537 } 2538 2539 eseg = dm_segs + nseg; 2540 2541 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2542 if (IS_FC(isp)) { 2543 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 2544 rq3->req_dataseg[rq3->req_seg_count].ds_base = 2545 DMA_LO32(dm_segs->ds_addr); 2546 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 2547 DMA_HI32(dm_segs->ds_addr); 2548 rq3->req_dataseg[rq3->req_seg_count].ds_count = 2549 dm_segs->ds_len; 2550 } else { 2551 rq->req_dataseg[rq->req_seg_count].ds_base = 2552 DMA_LO32(dm_segs->ds_addr); 2553 rq->req_dataseg[rq->req_seg_count].ds_basehi = 2554 DMA_HI32(dm_segs->ds_addr); 2555 rq->req_dataseg[rq->req_seg_count].ds_count = 2556 dm_segs->ds_len; 2557 } 2558 datalen -= dm_segs->ds_len; 2559 rq->req_seg_count++; 2560 dm_segs++; 2561 } 2562 2563 while (datalen > 0 && dm_segs != eseg) { 2564 uint32_t onxti; 2565 ispcontreq64_t local, *crq = &local, *cqe; 2566 2567 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2568 onxti = nxti; 2569 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2570 if (nxti == mp->optr) { 2571 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2572 mp->error = MUSHERR_NOQENTRIES; 2573 return; 2574 } 2575 rq->req_header.rqs_entry_count++; 2576 MEMZERO((void *)crq, sizeof (*crq)); 2577 crq->req_header.rqs_entry_count = 1; 2578 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2579 2580 seglim = 0; 2581 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2582 crq->req_dataseg[seglim].ds_base = 2583 DMA_LO32(dm_segs->ds_addr); 2584 crq->req_dataseg[seglim].ds_basehi = 2585 DMA_HI32(dm_segs->ds_addr); 2586 crq->req_dataseg[seglim].ds_count = 2587 dm_segs->ds_len; 2588 rq->req_seg_count++; 2589 dm_segs++; 2590 seglim++; 2591 datalen -= dm_segs->ds_len; 2592 } 2593 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2594 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2595 } 2596 isp_put_cont64_req(isp, crq, cqe); 2597 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2598 } 2599 *mp->nxtip = nxti; 2600 } 2601 2602 static void 2603 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2604 { 2605 mush_t *mp; 2606 ispsoftc_t *isp; 2607 struct ccb_scsiio *csio; 2608 struct isp_pcisoftc *pcs; 2609 bus_dmamap_t *dp; 2610 bus_dma_segment_t *eseg; 2611 ispreq_t *rq; 2612 int seglim, datalen; 2613 uint32_t nxti; 2614 2615 mp = (mush_t *) arg; 2616 if (error) { 2617 mp->error = error; 2618 return; 2619 } 2620 2621 if (nseg < 1) { 2622 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2623 mp->error = EFAULT; 2624 return; 2625 } 2626 csio = mp->cmd_token; 2627 isp = mp->isp; 2628 rq = mp->rq; 2629 pcs = (struct isp_pcisoftc *)mp->isp; 2630 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2631 nxti = *mp->nxtip; 2632 2633 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2634 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2635 } else { 2636 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2637 } 2638 2639 datalen = XS_XFRLEN(csio); 2640 2641 /* 2642 * We're passed an initial partially filled in entry that 2643 * has most fields filled in except for data transfer 2644 * related values. 2645 * 2646 * Our job is to fill in the initial request queue entry and 2647 * then to start allocating and filling in continuation entries 2648 * until we've covered the entire transfer. 2649 */ 2650 2651 if (IS_FC(isp)) { 2652 seglim = ISP_RQDSEG_T2; 2653 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 2654 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2655 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2656 } else { 2657 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2658 } 2659 } else { 2660 if (csio->cdb_len > 12) { 2661 seglim = 0; 2662 } else { 2663 seglim = ISP_RQDSEG; 2664 } 2665 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2666 rq->req_flags |= REQFLAG_DATA_IN; 2667 } else { 2668 rq->req_flags |= REQFLAG_DATA_OUT; 2669 } 2670 } 2671 2672 eseg = dm_segs + nseg; 2673 2674 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2675 if (IS_FC(isp)) { 2676 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 2677 rq2->req_dataseg[rq2->req_seg_count].ds_base = 2678 DMA_LO32(dm_segs->ds_addr); 2679 rq2->req_dataseg[rq2->req_seg_count].ds_count = 2680 dm_segs->ds_len; 2681 } else { 2682 rq->req_dataseg[rq->req_seg_count].ds_base = 2683 DMA_LO32(dm_segs->ds_addr); 2684 rq->req_dataseg[rq->req_seg_count].ds_count = 2685 dm_segs->ds_len; 2686 } 2687 datalen -= dm_segs->ds_len; 2688 rq->req_seg_count++; 2689 dm_segs++; 2690 } 2691 2692 while (datalen > 0 && dm_segs != eseg) { 2693 uint32_t onxti; 2694 ispcontreq_t local, *crq = &local, *cqe; 2695 2696 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2697 onxti = nxti; 2698 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2699 if (nxti == mp->optr) { 2700 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2701 mp->error = MUSHERR_NOQENTRIES; 2702 return; 2703 } 2704 rq->req_header.rqs_entry_count++; 2705 MEMZERO((void *)crq, sizeof (*crq)); 2706 crq->req_header.rqs_entry_count = 1; 2707 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2708 2709 seglim = 0; 2710 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 2711 crq->req_dataseg[seglim].ds_base = 2712 DMA_LO32(dm_segs->ds_addr); 2713 crq->req_dataseg[seglim].ds_count = 2714 dm_segs->ds_len; 2715 rq->req_seg_count++; 2716 dm_segs++; 2717 seglim++; 2718 datalen -= dm_segs->ds_len; 2719 } 2720 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2721 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2722 } 2723 isp_put_cont_req(isp, crq, cqe); 2724 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2725 } 2726 *mp->nxtip = nxti; 2727 } 2728 2729 /* 2730 * We enter with ISP_LOCK held 2731 */ 2732 static int 2733 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, 2734 uint32_t *nxtip, uint32_t optr) 2735 { 2736 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2737 ispreq_t *qep; 2738 bus_dmamap_t *dp = NULL; 2739 mush_t mush, *mp; 2740 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2741 2742 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 2743 #ifdef ISP_TARGET_MODE 2744 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2745 if (IS_FC(isp)) { 2746 eptr = tdma_mkfc; 2747 } else { 2748 eptr = tdma_mk; 2749 } 2750 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2751 (csio->dxfer_len == 0)) { 2752 mp = &mush; 2753 mp->isp = isp; 2754 mp->cmd_token = csio; 2755 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 2756 mp->nxtip = nxtip; 2757 mp->optr = optr; 2758 mp->error = 0; 2759 ISPLOCK_2_CAMLOCK(isp); 2760 (*eptr)(mp, NULL, 0, 0); 2761 CAMLOCK_2_ISPLOCK(isp); 2762 goto mbxsync; 2763 } 2764 } else 2765 #endif 2766 if (IS_24XX(isp)) { 2767 eptr = dma_2400; 2768 } else if (sizeof (bus_addr_t) > 4) { 2769 eptr = dma2_a64; 2770 } else { 2771 eptr = dma2; 2772 } 2773 2774 2775 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2776 (csio->dxfer_len == 0)) { 2777 rq->req_seg_count = 1; 2778 goto mbxsync; 2779 } 2780 2781 /* 2782 * Do a virtual grapevine step to collect info for 2783 * the callback dma allocation that we have to use... 2784 */ 2785 mp = &mush; 2786 mp->isp = isp; 2787 mp->cmd_token = csio; 2788 mp->rq = rq; 2789 mp->nxtip = nxtip; 2790 mp->optr = optr; 2791 mp->error = 0; 2792 2793 ISPLOCK_2_CAMLOCK(isp); 2794 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2795 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 2796 int error, s; 2797 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2798 s = splsoftvm(); 2799 error = bus_dmamap_load(pcs->dmat, *dp, 2800 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 2801 if (error == EINPROGRESS) { 2802 bus_dmamap_unload(pcs->dmat, *dp); 2803 mp->error = EINVAL; 2804 isp_prt(isp, ISP_LOGERR, 2805 "deferred dma allocation not supported"); 2806 } else if (error && mp->error == 0) { 2807 #ifdef DIAGNOSTIC 2808 isp_prt(isp, ISP_LOGERR, 2809 "error %d in dma mapping code", error); 2810 #endif 2811 mp->error = error; 2812 } 2813 splx(s); 2814 } else { 2815 /* Pointer to physical buffer */ 2816 struct bus_dma_segment seg; 2817 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2818 seg.ds_len = csio->dxfer_len; 2819 (*eptr)(mp, &seg, 1, 0); 2820 } 2821 } else { 2822 struct bus_dma_segment *segs; 2823 2824 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2825 isp_prt(isp, ISP_LOGERR, 2826 "Physical segment pointers unsupported"); 2827 mp->error = EINVAL; 2828 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2829 isp_prt(isp, ISP_LOGERR, 2830 "Virtual segment addresses unsupported"); 2831 mp->error = EINVAL; 2832 } else { 2833 /* Just use the segments provided */ 2834 segs = (struct bus_dma_segment *) csio->data_ptr; 2835 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2836 } 2837 } 2838 CAMLOCK_2_ISPLOCK(isp); 2839 if (mp->error) { 2840 int retval = CMD_COMPLETE; 2841 if (mp->error == MUSHERR_NOQENTRIES) { 2842 retval = CMD_EAGAIN; 2843 } else if (mp->error == EFBIG) { 2844 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2845 } else if (mp->error == EINVAL) { 2846 XS_SETERR(csio, CAM_REQ_INVALID); 2847 } else { 2848 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2849 } 2850 return (retval); 2851 } 2852 mbxsync: 2853 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2854 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); 2855 } 2856 switch (rq->req_header.rqs_entry_type) { 2857 case RQSTYPE_REQUEST: 2858 isp_put_request(isp, rq, qep); 2859 break; 2860 case RQSTYPE_CMDONLY: 2861 isp_put_extended_request(isp, (ispextreq_t *)rq, 2862 (ispextreq_t *)qep); 2863 break; 2864 case RQSTYPE_T2RQS: 2865 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2866 break; 2867 case RQSTYPE_A64: 2868 case RQSTYPE_T3RQS: 2869 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2870 break; 2871 case RQSTYPE_T7RQS: 2872 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep); 2873 break; 2874 } 2875 return (CMD_QUEUED); 2876 } 2877 2878 static void 2879 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle) 2880 { 2881 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2882 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2883 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2884 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2885 } else { 2886 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2887 } 2888 bus_dmamap_unload(pcs->dmat, *dp); 2889 } 2890 2891 2892 static void 2893 isp_pci_reset0(ispsoftc_t *isp) 2894 { 2895 ISP_DISABLE_INTS(isp); 2896 } 2897 2898 static void 2899 isp_pci_reset1(ispsoftc_t *isp) 2900 { 2901 if (!IS_24XX(isp)) { 2902 /* Make sure the BIOS is disabled */ 2903 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2904 } 2905 /* and enable interrupts */ 2906 ISP_ENABLE_INTS(isp); 2907 } 2908 2909 static void 2910 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2911 { 2912 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2913 if (msg) 2914 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2915 else 2916 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2917 if (IS_SCSI(isp)) 2918 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2919 else 2920 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2921 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2922 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2923 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2924 2925 2926 if (IS_SCSI(isp)) { 2927 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2928 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2929 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2930 ISP_READ(isp, CDMA_FIFO_STS)); 2931 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2932 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2933 ISP_READ(isp, DDMA_FIFO_STS)); 2934 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2935 ISP_READ(isp, SXP_INTERRUPT), 2936 ISP_READ(isp, SXP_GROSS_ERR), 2937 ISP_READ(isp, SXP_PINS_CTRL)); 2938 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2939 } 2940 printf(" mbox regs: %x %x %x %x %x\n", 2941 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2942 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2943 ISP_READ(isp, OUTMAILBOX4)); 2944 printf(" PCI Status Command/Status=%x\n", 2945 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2946 } 2947