1 /*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/bus.h> 40 #include <sys/stdint.h> 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 #include <sys/uio.h> 48 49 #ifdef __sparc64__ 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62 static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int); 63 static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t); 64 static void isp_pci_run_isr(ispsoftc_t *); 65 static void isp_pci_run_isr_2300(ispsoftc_t *); 66 static void isp_pci_run_isr_2400(ispsoftc_t *); 67 static int isp_pci_mbxdma(ispsoftc_t *); 68 static void isp_pci_mbxdmafree(ispsoftc_t *); 69 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 70 static int isp_pci_irqsetup(ispsoftc_t *); 71 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 72 73 static struct ispmdvec mdvec = { 74 isp_pci_run_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_common_dmateardown, 80 isp_pci_irqsetup, 81 isp_pci_dumpregs, 82 NULL, 83 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 84 }; 85 86 static struct ispmdvec mdvec_1080 = { 87 isp_pci_run_isr, 88 isp_pci_rd_reg_1080, 89 isp_pci_wr_reg_1080, 90 isp_pci_mbxdma, 91 isp_pci_dmasetup, 92 isp_common_dmateardown, 93 isp_pci_irqsetup, 94 isp_pci_dumpregs, 95 NULL, 96 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 97 }; 98 99 static struct ispmdvec mdvec_12160 = { 100 isp_pci_run_isr, 101 isp_pci_rd_reg_1080, 102 isp_pci_wr_reg_1080, 103 isp_pci_mbxdma, 104 isp_pci_dmasetup, 105 isp_common_dmateardown, 106 isp_pci_irqsetup, 107 isp_pci_dumpregs, 108 NULL, 109 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 110 }; 111 112 static struct ispmdvec mdvec_2100 = { 113 isp_pci_run_isr, 114 isp_pci_rd_reg, 115 isp_pci_wr_reg, 116 isp_pci_mbxdma, 117 isp_pci_dmasetup, 118 isp_common_dmateardown, 119 isp_pci_irqsetup, 120 isp_pci_dumpregs 121 }; 122 123 static struct ispmdvec mdvec_2200 = { 124 isp_pci_run_isr, 125 isp_pci_rd_reg, 126 isp_pci_wr_reg, 127 isp_pci_mbxdma, 128 isp_pci_dmasetup, 129 isp_common_dmateardown, 130 isp_pci_irqsetup, 131 isp_pci_dumpregs 132 }; 133 134 static struct ispmdvec mdvec_2300 = { 135 isp_pci_run_isr_2300, 136 isp_pci_rd_reg, 137 isp_pci_wr_reg, 138 isp_pci_mbxdma, 139 isp_pci_dmasetup, 140 isp_common_dmateardown, 141 isp_pci_irqsetup, 142 isp_pci_dumpregs 143 }; 144 145 static struct ispmdvec mdvec_2400 = { 146 isp_pci_run_isr_2400, 147 isp_pci_rd_reg_2400, 148 isp_pci_wr_reg_2400, 149 isp_pci_mbxdma, 150 isp_pci_dmasetup, 151 isp_common_dmateardown, 152 isp_pci_irqsetup, 153 NULL 154 }; 155 156 static struct ispmdvec mdvec_2500 = { 157 isp_pci_run_isr_2400, 158 isp_pci_rd_reg_2400, 159 isp_pci_wr_reg_2400, 160 isp_pci_mbxdma, 161 isp_pci_dmasetup, 162 isp_common_dmateardown, 163 isp_pci_irqsetup, 164 NULL 165 }; 166 167 static struct ispmdvec mdvec_2600 = { 168 isp_pci_run_isr_2400, 169 isp_pci_rd_reg_2600, 170 isp_pci_wr_reg_2600, 171 isp_pci_mbxdma, 172 isp_pci_dmasetup, 173 isp_common_dmateardown, 174 isp_pci_irqsetup, 175 NULL 176 }; 177 178 #ifndef PCIM_CMD_INVEN 179 #define PCIM_CMD_INVEN 0x10 180 #endif 181 #ifndef PCIM_CMD_BUSMASTEREN 182 #define PCIM_CMD_BUSMASTEREN 0x0004 183 #endif 184 #ifndef PCIM_CMD_PERRESPEN 185 #define PCIM_CMD_PERRESPEN 0x0040 186 #endif 187 #ifndef PCIM_CMD_SEREN 188 #define PCIM_CMD_SEREN 0x0100 189 #endif 190 #ifndef PCIM_CMD_INTX_DISABLE 191 #define PCIM_CMD_INTX_DISABLE 0x0400 192 #endif 193 194 #ifndef PCIR_COMMAND 195 #define PCIR_COMMAND 0x04 196 #endif 197 198 #ifndef PCIR_CACHELNSZ 199 #define PCIR_CACHELNSZ 0x0c 200 #endif 201 202 #ifndef PCIR_LATTIMER 203 #define PCIR_LATTIMER 0x0d 204 #endif 205 206 #ifndef PCIR_ROMADDR 207 #define PCIR_ROMADDR 0x30 208 #endif 209 210 #ifndef PCI_VENDOR_QLOGIC 211 #define PCI_VENDOR_QLOGIC 0x1077 212 #endif 213 214 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 215 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 216 #endif 217 218 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 219 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 220 #endif 221 222 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 223 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 224 #endif 225 226 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 227 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 228 #endif 229 230 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 231 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 232 #endif 233 234 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 235 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 236 #endif 237 238 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 239 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 240 #endif 241 242 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 243 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 244 #endif 245 246 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 247 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 248 #endif 249 250 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 251 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 252 #endif 253 254 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 255 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 256 #endif 257 258 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 259 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 260 #endif 261 262 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 263 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 264 #endif 265 266 #ifndef PCI_PRODUCT_QLOGIC_ISP2532 267 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 268 #endif 269 270 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 271 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 272 #endif 273 274 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 275 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 276 #endif 277 278 #ifndef PCI_PRODUCT_QLOGIC_ISP5432 279 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 280 #endif 281 282 #ifndef PCI_PRODUCT_QLOGIC_ISP2031 283 #define PCI_PRODUCT_QLOGIC_ISP2031 0x2031 284 #endif 285 286 #ifndef PCI_PRODUCT_QLOGIC_ISP8031 287 #define PCI_PRODUCT_QLOGIC_ISP8031 0x8031 288 #endif 289 290 #define PCI_QLOGIC_ISP5432 \ 291 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 292 293 #define PCI_QLOGIC_ISP1020 \ 294 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 295 296 #define PCI_QLOGIC_ISP1080 \ 297 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 298 299 #define PCI_QLOGIC_ISP10160 \ 300 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 301 302 #define PCI_QLOGIC_ISP12160 \ 303 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 304 305 #define PCI_QLOGIC_ISP1240 \ 306 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 307 308 #define PCI_QLOGIC_ISP1280 \ 309 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 310 311 #define PCI_QLOGIC_ISP2100 \ 312 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 313 314 #define PCI_QLOGIC_ISP2200 \ 315 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 316 317 #define PCI_QLOGIC_ISP2300 \ 318 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 319 320 #define PCI_QLOGIC_ISP2312 \ 321 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 322 323 #define PCI_QLOGIC_ISP2322 \ 324 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 325 326 #define PCI_QLOGIC_ISP2422 \ 327 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 328 329 #define PCI_QLOGIC_ISP2432 \ 330 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 331 332 #define PCI_QLOGIC_ISP2532 \ 333 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 334 335 #define PCI_QLOGIC_ISP6312 \ 336 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 337 338 #define PCI_QLOGIC_ISP6322 \ 339 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 340 341 #define PCI_QLOGIC_ISP2031 \ 342 ((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC) 343 344 #define PCI_QLOGIC_ISP8031 \ 345 ((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC) 346 347 /* 348 * Odd case for some AMI raid cards... We need to *not* attach to this. 349 */ 350 #define AMI_RAID_SUBVENDOR_ID 0x101e 351 352 #define PCI_DFLT_LTNCY 0x40 353 #define PCI_DFLT_LNSZ 0x10 354 355 static int isp_pci_probe (device_t); 356 static int isp_pci_attach (device_t); 357 static int isp_pci_detach (device_t); 358 359 360 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 361 struct isp_pcisoftc { 362 ispsoftc_t pci_isp; 363 device_t pci_dev; 364 struct resource * regs; 365 struct resource * regs1; 366 struct resource * regs2; 367 struct { 368 int iqd; 369 struct resource * irq; 370 void * ih; 371 } irq[ISP_MAX_IRQS]; 372 int rtp; 373 int rgd; 374 int rtp1; 375 int rgd1; 376 int rtp2; 377 int rgd2; 378 int16_t pci_poff[_NREG_BLKS]; 379 bus_dma_tag_t dmat; 380 int msicount; 381 }; 382 383 384 static device_method_t isp_pci_methods[] = { 385 /* Device interface */ 386 DEVMETHOD(device_probe, isp_pci_probe), 387 DEVMETHOD(device_attach, isp_pci_attach), 388 DEVMETHOD(device_detach, isp_pci_detach), 389 { 0, 0 } 390 }; 391 392 static driver_t isp_pci_driver = { 393 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 394 }; 395 static devclass_t isp_devclass; 396 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 397 MODULE_DEPEND(isp, cam, 1, 1, 1); 398 MODULE_DEPEND(isp, firmware, 1, 1, 1); 399 static int isp_nvports = 0; 400 401 static int 402 isp_pci_probe(device_t dev) 403 { 404 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 405 case PCI_QLOGIC_ISP1020: 406 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 407 break; 408 case PCI_QLOGIC_ISP1080: 409 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 410 break; 411 case PCI_QLOGIC_ISP1240: 412 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 413 break; 414 case PCI_QLOGIC_ISP1280: 415 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 416 break; 417 case PCI_QLOGIC_ISP10160: 418 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 419 break; 420 case PCI_QLOGIC_ISP12160: 421 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 422 return (ENXIO); 423 } 424 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 425 break; 426 case PCI_QLOGIC_ISP2100: 427 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 428 break; 429 case PCI_QLOGIC_ISP2200: 430 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 431 break; 432 case PCI_QLOGIC_ISP2300: 433 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 434 break; 435 case PCI_QLOGIC_ISP2312: 436 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 437 break; 438 case PCI_QLOGIC_ISP2322: 439 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 440 break; 441 case PCI_QLOGIC_ISP2422: 442 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 443 break; 444 case PCI_QLOGIC_ISP2432: 445 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 446 break; 447 case PCI_QLOGIC_ISP2532: 448 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 449 break; 450 case PCI_QLOGIC_ISP5432: 451 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 452 break; 453 case PCI_QLOGIC_ISP6312: 454 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 455 break; 456 case PCI_QLOGIC_ISP6322: 457 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 458 break; 459 case PCI_QLOGIC_ISP2031: 460 device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter"); 461 break; 462 case PCI_QLOGIC_ISP8031: 463 device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter"); 464 break; 465 default: 466 return (ENXIO); 467 } 468 if (isp_announced == 0 && bootverbose) { 469 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 470 "Core Version %d.%d\n", 471 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 472 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 473 isp_announced++; 474 } 475 /* 476 * XXXX: Here is where we might load the f/w module 477 * XXXX: (or increase a reference count to it). 478 */ 479 return (BUS_PROBE_DEFAULT); 480 } 481 482 static void 483 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 484 { 485 int tval; 486 487 tval = 0; 488 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 489 isp->isp_confopts |= ISP_CFG_NORELOAD; 490 } 491 tval = 0; 492 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 493 isp->isp_confopts |= ISP_CFG_NONVRAM; 494 } 495 tval = 0; 496 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 497 if (tval) { 498 isp->isp_dblev = tval; 499 } else { 500 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 501 } 502 if (bootverbose) { 503 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 504 } 505 tval = -1; 506 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 507 if (tval > 0 && tval <= 254) { 508 isp_nvports = tval; 509 } 510 tval = 7; 511 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 512 isp_quickboot_time = tval; 513 } 514 515 static void 516 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 517 { 518 const char *sptr; 519 int tval = 0; 520 char prefix[12], name[16]; 521 522 if (chan == 0) 523 prefix[0] = 0; 524 else 525 snprintf(prefix, sizeof(prefix), "chan%d.", chan); 526 snprintf(name, sizeof(name), "%siid", prefix); 527 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 528 name, &tval)) { 529 if (IS_FC(isp)) { 530 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 531 } else { 532 #ifdef __sparc64__ 533 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 534 #else 535 ISP_SPI_PC(isp, chan)->iid = 7; 536 #endif 537 } 538 } else { 539 if (IS_FC(isp)) { 540 ISP_FC_PC(isp, chan)->default_id = tval - chan; 541 } else { 542 ISP_SPI_PC(isp, chan)->iid = tval; 543 } 544 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 545 } 546 547 if (IS_SCSI(isp)) 548 return; 549 550 tval = -1; 551 snprintf(name, sizeof(name), "%srole", prefix); 552 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 553 name, &tval) == 0) { 554 switch (tval) { 555 case ISP_ROLE_NONE: 556 case ISP_ROLE_INITIATOR: 557 case ISP_ROLE_TARGET: 558 case ISP_ROLE_BOTH: 559 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); 560 break; 561 default: 562 tval = -1; 563 break; 564 } 565 } 566 if (tval == -1) { 567 tval = ISP_DEFAULT_ROLES; 568 } 569 ISP_FC_PC(isp, chan)->def_role = tval; 570 571 tval = 0; 572 snprintf(name, sizeof(name), "%sfullduplex", prefix); 573 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 574 name, &tval) == 0 && tval != 0) { 575 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 576 } 577 sptr = NULL; 578 snprintf(name, sizeof(name), "%stopology", prefix); 579 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 580 name, (const char **) &sptr) == 0 && sptr != NULL) { 581 if (strcmp(sptr, "lport") == 0) { 582 isp->isp_confopts |= ISP_CFG_LPORT; 583 } else if (strcmp(sptr, "nport") == 0) { 584 isp->isp_confopts |= ISP_CFG_NPORT; 585 } else if (strcmp(sptr, "lport-only") == 0) { 586 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 587 } else if (strcmp(sptr, "nport-only") == 0) { 588 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 589 } 590 } 591 592 #ifdef ISP_FCTAPE_OFF 593 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 594 #else 595 isp->isp_confopts |= ISP_CFG_FCTAPE; 596 #endif 597 598 tval = 0; 599 snprintf(name, sizeof(name), "%snofctape", prefix); 600 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 601 name, &tval); 602 if (tval) { 603 isp->isp_confopts &= ~ISP_CFG_FCTAPE; 604 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 605 } 606 607 tval = 0; 608 snprintf(name, sizeof(name), "%sfctape", prefix); 609 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 610 name, &tval); 611 if (tval) { 612 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; 613 isp->isp_confopts |= ISP_CFG_FCTAPE; 614 } 615 616 617 /* 618 * Because the resource_*_value functions can neither return 619 * 64 bit integer values, nor can they be directly coerced 620 * to interpret the right hand side of the assignment as 621 * you want them to interpret it, we have to force WWN 622 * hint replacement to specify WWN strings with a leading 623 * 'w' (e..g w50000000aaaa0001). Sigh. 624 */ 625 sptr = NULL; 626 snprintf(name, sizeof(name), "%sportwwn", prefix); 627 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 628 name, (const char **) &sptr); 629 if (tval == 0 && sptr != NULL && *sptr++ == 'w') { 630 char *eptr = NULL; 631 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 632 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 633 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 634 ISP_FC_PC(isp, chan)->def_wwpn = 0; 635 } 636 } 637 638 sptr = NULL; 639 snprintf(name, sizeof(name), "%snodewwn", prefix); 640 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 641 name, (const char **) &sptr); 642 if (tval == 0 && sptr != NULL && *sptr++ == 'w') { 643 char *eptr = NULL; 644 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 645 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 646 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 647 ISP_FC_PC(isp, chan)->def_wwnn = 0; 648 } 649 } 650 651 tval = -1; 652 snprintf(name, sizeof(name), "%sloop_down_limit", prefix); 653 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 654 name, &tval); 655 if (tval >= 0 && tval < 0xffff) { 656 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 657 } else { 658 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 659 } 660 661 tval = -1; 662 snprintf(name, sizeof(name), "%sgone_device_time", prefix); 663 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 664 name, &tval); 665 if (tval >= 0 && tval < 0xffff) { 666 ISP_FC_PC(isp, chan)->gone_device_time = tval; 667 } else { 668 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 669 } 670 } 671 672 static int 673 isp_pci_attach(device_t dev) 674 { 675 struct isp_pcisoftc *pcs = device_get_softc(dev); 676 ispsoftc_t *isp = &pcs->pci_isp; 677 int i; 678 uint32_t data, cmd, linesz, did; 679 size_t psize, xsize; 680 char fwname[32]; 681 682 pcs->pci_dev = dev; 683 isp->isp_dev = dev; 684 isp->isp_nchan = 1; 685 mtx_init(&isp->isp_lock, "isp", NULL, MTX_DEF); 686 687 /* 688 * Get Generic Options 689 */ 690 isp_nvports = 0; 691 isp_get_generic_options(dev, isp); 692 693 linesz = PCI_DFLT_LNSZ; 694 pcs->regs = pcs->regs2 = NULL; 695 pcs->rgd = pcs->rtp = 0; 696 697 pcs->pci_dev = dev; 698 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 699 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 700 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 701 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 702 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 703 704 switch (pci_get_devid(dev)) { 705 case PCI_QLOGIC_ISP1020: 706 did = 0x1040; 707 isp->isp_mdvec = &mdvec; 708 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 709 break; 710 case PCI_QLOGIC_ISP1080: 711 did = 0x1080; 712 isp->isp_mdvec = &mdvec_1080; 713 isp->isp_type = ISP_HA_SCSI_1080; 714 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 715 break; 716 case PCI_QLOGIC_ISP1240: 717 did = 0x1080; 718 isp->isp_mdvec = &mdvec_1080; 719 isp->isp_type = ISP_HA_SCSI_1240; 720 isp->isp_nchan = 2; 721 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 722 break; 723 case PCI_QLOGIC_ISP1280: 724 did = 0x1080; 725 isp->isp_mdvec = &mdvec_1080; 726 isp->isp_type = ISP_HA_SCSI_1280; 727 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 728 break; 729 case PCI_QLOGIC_ISP10160: 730 did = 0x12160; 731 isp->isp_mdvec = &mdvec_12160; 732 isp->isp_type = ISP_HA_SCSI_10160; 733 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 734 break; 735 case PCI_QLOGIC_ISP12160: 736 did = 0x12160; 737 isp->isp_nchan = 2; 738 isp->isp_mdvec = &mdvec_12160; 739 isp->isp_type = ISP_HA_SCSI_12160; 740 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 741 break; 742 case PCI_QLOGIC_ISP2100: 743 did = 0x2100; 744 isp->isp_mdvec = &mdvec_2100; 745 isp->isp_type = ISP_HA_FC_2100; 746 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 747 if (pci_get_revid(dev) < 3) { 748 /* 749 * XXX: Need to get the actual revision 750 * XXX: number of the 2100 FB. At any rate, 751 * XXX: lower cache line size for early revision 752 * XXX; boards. 753 */ 754 linesz = 1; 755 } 756 break; 757 case PCI_QLOGIC_ISP2200: 758 did = 0x2200; 759 isp->isp_mdvec = &mdvec_2200; 760 isp->isp_type = ISP_HA_FC_2200; 761 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 762 break; 763 case PCI_QLOGIC_ISP2300: 764 did = 0x2300; 765 isp->isp_mdvec = &mdvec_2300; 766 isp->isp_type = ISP_HA_FC_2300; 767 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 768 break; 769 case PCI_QLOGIC_ISP2312: 770 case PCI_QLOGIC_ISP6312: 771 did = 0x2300; 772 isp->isp_mdvec = &mdvec_2300; 773 isp->isp_type = ISP_HA_FC_2312; 774 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 775 break; 776 case PCI_QLOGIC_ISP2322: 777 case PCI_QLOGIC_ISP6322: 778 did = 0x2322; 779 isp->isp_mdvec = &mdvec_2300; 780 isp->isp_type = ISP_HA_FC_2322; 781 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 782 break; 783 case PCI_QLOGIC_ISP2422: 784 case PCI_QLOGIC_ISP2432: 785 did = 0x2400; 786 isp->isp_nchan += isp_nvports; 787 isp->isp_mdvec = &mdvec_2400; 788 isp->isp_type = ISP_HA_FC_2400; 789 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 790 break; 791 case PCI_QLOGIC_ISP2532: 792 did = 0x2500; 793 isp->isp_nchan += isp_nvports; 794 isp->isp_mdvec = &mdvec_2500; 795 isp->isp_type = ISP_HA_FC_2500; 796 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 797 break; 798 case PCI_QLOGIC_ISP5432: 799 did = 0x2500; 800 isp->isp_mdvec = &mdvec_2500; 801 isp->isp_type = ISP_HA_FC_2500; 802 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 803 break; 804 case PCI_QLOGIC_ISP2031: 805 case PCI_QLOGIC_ISP8031: 806 did = 0x2600; 807 isp->isp_nchan += isp_nvports; 808 isp->isp_mdvec = &mdvec_2600; 809 isp->isp_type = ISP_HA_FC_2600; 810 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 811 break; 812 default: 813 device_printf(dev, "unknown device type\n"); 814 goto bad; 815 break; 816 } 817 isp->isp_revision = pci_get_revid(dev); 818 819 if (IS_26XX(isp)) { 820 pcs->rtp = SYS_RES_MEMORY; 821 pcs->rgd = PCIR_BAR(0); 822 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 823 RF_ACTIVE); 824 pcs->rtp1 = SYS_RES_MEMORY; 825 pcs->rgd1 = PCIR_BAR(2); 826 pcs->regs1 = bus_alloc_resource_any(dev, pcs->rtp1, &pcs->rgd1, 827 RF_ACTIVE); 828 pcs->rtp2 = SYS_RES_MEMORY; 829 pcs->rgd2 = PCIR_BAR(4); 830 pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2, 831 RF_ACTIVE); 832 } else { 833 pcs->rtp = SYS_RES_MEMORY; 834 pcs->rgd = PCIR_BAR(1); 835 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 836 RF_ACTIVE); 837 if (pcs->regs == NULL) { 838 pcs->rtp = SYS_RES_IOPORT; 839 pcs->rgd = PCIR_BAR(0); 840 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, 841 &pcs->rgd, RF_ACTIVE); 842 } 843 } 844 if (pcs->regs == NULL) { 845 device_printf(dev, "Unable to map any ports\n"); 846 goto bad; 847 } 848 if (bootverbose) { 849 device_printf(dev, "Using %s space register mapping\n", 850 (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory"); 851 } 852 isp->isp_regs = pcs->regs; 853 isp->isp_regs2 = pcs->regs2; 854 855 if (IS_FC(isp)) { 856 psize = sizeof (fcparam); 857 xsize = sizeof (struct isp_fc); 858 } else { 859 psize = sizeof (sdparam); 860 xsize = sizeof (struct isp_spi); 861 } 862 psize *= isp->isp_nchan; 863 xsize *= isp->isp_nchan; 864 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 865 if (isp->isp_param == NULL) { 866 device_printf(dev, "cannot allocate parameter data\n"); 867 goto bad; 868 } 869 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 870 if (isp->isp_osinfo.pc.ptr == NULL) { 871 device_printf(dev, "cannot allocate parameter data\n"); 872 goto bad; 873 } 874 875 /* 876 * Now that we know who we are (roughly) get/set specific options 877 */ 878 for (i = 0; i < isp->isp_nchan; i++) { 879 isp_get_specific_options(dev, i, isp); 880 } 881 882 isp->isp_osinfo.fw = NULL; 883 if (isp->isp_osinfo.fw == NULL) { 884 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 885 isp->isp_osinfo.fw = firmware_get(fwname); 886 } 887 if (isp->isp_osinfo.fw != NULL) { 888 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); 889 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 890 } 891 892 /* 893 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. 894 */ 895 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 896 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 897 if (IS_2300(isp)) { /* per QLogic errata */ 898 cmd &= ~PCIM_CMD_INVEN; 899 } 900 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 901 cmd &= ~PCIM_CMD_INTX_DISABLE; 902 } 903 if (IS_24XX(isp)) { 904 cmd &= ~PCIM_CMD_INTX_DISABLE; 905 } 906 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 907 908 /* 909 * Make sure the Cache Line Size register is set sensibly. 910 */ 911 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 912 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 913 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); 914 data = linesz; 915 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 916 } 917 918 /* 919 * Make sure the Latency Timer is sane. 920 */ 921 data = pci_read_config(dev, PCIR_LATTIMER, 1); 922 if (data < PCI_DFLT_LTNCY) { 923 data = PCI_DFLT_LTNCY; 924 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); 925 pci_write_config(dev, PCIR_LATTIMER, data, 1); 926 } 927 928 /* 929 * Make sure we've disabled the ROM. 930 */ 931 data = pci_read_config(dev, PCIR_ROMADDR, 4); 932 data &= ~1; 933 pci_write_config(dev, PCIR_ROMADDR, data, 4); 934 935 /* 936 * Last minute checks... 937 */ 938 if (IS_23XX(isp) || IS_24XX(isp)) { 939 isp->isp_port = pci_get_function(dev); 940 } 941 942 /* 943 * Make sure we're in reset state. 944 */ 945 ISP_LOCK(isp); 946 if (isp_reinit(isp, 1) != 0) { 947 ISP_UNLOCK(isp); 948 goto bad; 949 } 950 ISP_UNLOCK(isp); 951 if (isp_attach(isp)) { 952 ISP_LOCK(isp); 953 isp_shutdown(isp); 954 ISP_UNLOCK(isp); 955 goto bad; 956 } 957 return (0); 958 959 bad: 960 for (i = 0; i < isp->isp_nirq; i++) { 961 (void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih); 962 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd, 963 pcs->irq[0].irq); 964 } 965 if (pcs->msicount) { 966 pci_release_msi(dev); 967 } 968 if (pcs->regs) 969 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 970 if (pcs->regs1) 971 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 972 if (pcs->regs2) 973 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 974 if (pcs->pci_isp.isp_param) { 975 free(pcs->pci_isp.isp_param, M_DEVBUF); 976 pcs->pci_isp.isp_param = NULL; 977 } 978 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 979 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 980 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 981 } 982 mtx_destroy(&isp->isp_lock); 983 return (ENXIO); 984 } 985 986 static int 987 isp_pci_detach(device_t dev) 988 { 989 struct isp_pcisoftc *pcs = device_get_softc(dev); 990 ispsoftc_t *isp = &pcs->pci_isp; 991 int i, status; 992 993 status = isp_detach(isp); 994 if (status) 995 return (status); 996 ISP_LOCK(isp); 997 isp_shutdown(isp); 998 ISP_UNLOCK(isp); 999 for (i = 0; i < isp->isp_nirq; i++) { 1000 (void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih); 1001 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd, 1002 pcs->irq[i].irq); 1003 } 1004 if (pcs->msicount) 1005 pci_release_msi(dev); 1006 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1007 if (pcs->regs1) 1008 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 1009 if (pcs->regs2) 1010 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 1011 isp_pci_mbxdmafree(isp); 1012 if (pcs->pci_isp.isp_param) { 1013 free(pcs->pci_isp.isp_param, M_DEVBUF); 1014 pcs->pci_isp.isp_param = NULL; 1015 } 1016 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1017 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1018 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1019 } 1020 mtx_destroy(&isp->isp_lock); 1021 return (0); 1022 } 1023 1024 #define IspVirt2Off(a, x) \ 1025 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1026 _BLK_REG_SHFT] + ((x) & 0xfff)) 1027 1028 #define BXR2(isp, off) bus_read_2((isp)->isp_regs, (off)) 1029 #define BXW2(isp, off, v) bus_write_2((isp)->isp_regs, (off), (v)) 1030 #define BXR4(isp, off) bus_read_4((isp)->isp_regs, (off)) 1031 #define BXW4(isp, off, v) bus_write_4((isp)->isp_regs, (off), (v)) 1032 #define B2R4(isp, off) bus_read_4((isp)->isp_regs2, (off)) 1033 #define B2W4(isp, off, v) bus_write_4((isp)->isp_regs2, (off), (v)) 1034 1035 static ISP_INLINE uint16_t 1036 isp_pci_rd_debounced(ispsoftc_t *isp, int off) 1037 { 1038 uint16_t val, prev; 1039 1040 val = BXR2(isp, IspVirt2Off(isp, off)); 1041 do { 1042 prev = val; 1043 val = BXR2(isp, IspVirt2Off(isp, off)); 1044 } while (val != prev); 1045 return (val); 1046 } 1047 1048 static void 1049 isp_pci_run_isr(ispsoftc_t *isp) 1050 { 1051 uint16_t isr, sema, info; 1052 1053 if (IS_2100(isp)) { 1054 isr = isp_pci_rd_debounced(isp, BIU_ISR); 1055 sema = isp_pci_rd_debounced(isp, BIU_SEMA); 1056 } else { 1057 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1058 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1059 } 1060 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1061 isr &= INT_PENDING_MASK(isp); 1062 sema &= BIU_SEMA_LOCK; 1063 if (isr == 0 && sema == 0) 1064 return; 1065 if (sema != 0) { 1066 if (IS_2100(isp)) 1067 info = isp_pci_rd_debounced(isp, OUTMAILBOX0); 1068 else 1069 info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1070 if (info & MBOX_COMMAND_COMPLETE) 1071 isp_intr_mbox(isp, info); 1072 else 1073 isp_intr_async(isp, info); 1074 if (!IS_FC(isp) && isp->isp_state == ISP_RUNSTATE) 1075 isp_intr_respq(isp); 1076 } else 1077 isp_intr_respq(isp); 1078 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); 1079 if (sema) 1080 ISP_WRITE(isp, BIU_SEMA, 0); 1081 } 1082 1083 static void 1084 isp_pci_run_isr_2300(ispsoftc_t *isp) 1085 { 1086 uint32_t hccr, r2hisr; 1087 uint16_t isr, info; 1088 1089 if ((BXR2(isp, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT) == 0) 1090 return; 1091 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1092 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1093 if ((r2hisr & BIU_R2HST_INTR) == 0) 1094 return; 1095 isr = r2hisr & BIU_R2HST_ISTAT_MASK; 1096 info = r2hisr >> 16; 1097 switch (isr) { 1098 case ISPR2HST_ROM_MBX_OK: 1099 case ISPR2HST_ROM_MBX_FAIL: 1100 case ISPR2HST_MBX_OK: 1101 case ISPR2HST_MBX_FAIL: 1102 isp_intr_mbox(isp, info); 1103 break; 1104 case ISPR2HST_ASYNC_EVENT: 1105 isp_intr_async(isp, info); 1106 break; 1107 case ISPR2HST_RIO_16: 1108 isp_intr_async(isp, ASYNC_RIO16_1); 1109 break; 1110 case ISPR2HST_FPOST: 1111 isp_intr_async(isp, ASYNC_CMD_CMPLT); 1112 break; 1113 case ISPR2HST_FPOST_CTIO: 1114 isp_intr_async(isp, ASYNC_CTIO_DONE); 1115 break; 1116 case ISPR2HST_RSPQ_UPDATE: 1117 isp_intr_respq(isp); 1118 break; 1119 default: 1120 hccr = ISP_READ(isp, HCCR); 1121 if (hccr & HCCR_PAUSE) { 1122 ISP_WRITE(isp, HCCR, HCCR_RESET); 1123 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1124 ISP_WRITE(isp, BIU_ICR, 0); 1125 } else { 1126 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1127 } 1128 } 1129 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); 1130 ISP_WRITE(isp, BIU_SEMA, 0); 1131 } 1132 1133 static void 1134 isp_pci_run_isr_2400(ispsoftc_t *isp) 1135 { 1136 uint32_t r2hisr; 1137 uint16_t isr, info; 1138 1139 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1140 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1141 if ((r2hisr & BIU_R2HST_INTR) == 0) 1142 return; 1143 isr = r2hisr & BIU_R2HST_ISTAT_MASK; 1144 info = (r2hisr >> 16); 1145 switch (isr) { 1146 case ISPR2HST_ROM_MBX_OK: 1147 case ISPR2HST_ROM_MBX_FAIL: 1148 case ISPR2HST_MBX_OK: 1149 case ISPR2HST_MBX_FAIL: 1150 isp_intr_mbox(isp, info); 1151 break; 1152 case ISPR2HST_ASYNC_EVENT: 1153 isp_intr_async(isp, info); 1154 break; 1155 case ISPR2HST_RSPQ_UPDATE: 1156 isp_intr_respq(isp); 1157 break; 1158 case ISPR2HST_RSPQ_UPDATE2: 1159 #ifdef ISP_TARGET_MODE 1160 case ISPR2HST_ATIO_RSPQ_UPDATE: 1161 #endif 1162 isp_intr_respq(isp); 1163 /* FALLTHROUGH */ 1164 #ifdef ISP_TARGET_MODE 1165 case ISPR2HST_ATIO_UPDATE: 1166 case ISPR2HST_ATIO_UPDATE2: 1167 isp_intr_atioq(isp); 1168 #endif 1169 break; 1170 default: 1171 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1172 } 1173 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1174 } 1175 1176 static uint32_t 1177 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1178 { 1179 uint16_t rv; 1180 int oldconf = 0; 1181 1182 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1183 /* 1184 * We will assume that someone has paused the RISC processor. 1185 */ 1186 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1187 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1188 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1189 } 1190 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1191 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1192 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1193 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1194 } 1195 return (rv); 1196 } 1197 1198 static void 1199 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1200 { 1201 int oldconf = 0; 1202 1203 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1204 /* 1205 * We will assume that someone has paused the RISC processor. 1206 */ 1207 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1208 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1209 oldconf | BIU_PCI_CONF1_SXP); 1210 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1211 } 1212 BXW2(isp, IspVirt2Off(isp, regoff), val); 1213 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1214 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1215 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1216 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1217 } 1218 1219 } 1220 1221 static uint32_t 1222 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1223 { 1224 uint32_t rv, oc = 0; 1225 1226 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1227 uint32_t tc; 1228 /* 1229 * We will assume that someone has paused the RISC processor. 1230 */ 1231 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1232 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1233 if (regoff & SXP_BANK1_SELECT) 1234 tc |= BIU_PCI1080_CONF1_SXP1; 1235 else 1236 tc |= BIU_PCI1080_CONF1_SXP0; 1237 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1238 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1239 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1240 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1241 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1242 oc | BIU_PCI1080_CONF1_DMA); 1243 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1244 } 1245 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1246 if (oc) { 1247 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1248 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1249 } 1250 return (rv); 1251 } 1252 1253 static void 1254 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1255 { 1256 int oc = 0; 1257 1258 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1259 uint32_t tc; 1260 /* 1261 * We will assume that someone has paused the RISC processor. 1262 */ 1263 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1264 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1265 if (regoff & SXP_BANK1_SELECT) 1266 tc |= BIU_PCI1080_CONF1_SXP1; 1267 else 1268 tc |= BIU_PCI1080_CONF1_SXP0; 1269 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1270 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1271 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1272 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1273 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1274 oc | BIU_PCI1080_CONF1_DMA); 1275 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1276 } 1277 BXW2(isp, IspVirt2Off(isp, regoff), val); 1278 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1279 if (oc) { 1280 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1281 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1282 } 1283 } 1284 1285 static uint32_t 1286 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1287 { 1288 uint32_t rv; 1289 int block = regoff & _BLK_REG_MASK; 1290 1291 switch (block) { 1292 case BIU_BLOCK: 1293 break; 1294 case MBOX_BLOCK: 1295 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1296 case SXP_BLOCK: 1297 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK read at 0x%x", regoff); 1298 return (0xffffffff); 1299 case RISC_BLOCK: 1300 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK read at 0x%x", regoff); 1301 return (0xffffffff); 1302 case DMA_BLOCK: 1303 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK read at 0x%x", regoff); 1304 return (0xffffffff); 1305 default: 1306 isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff); 1307 return (0xffffffff); 1308 } 1309 1310 switch (regoff) { 1311 case BIU2400_FLASH_ADDR: 1312 case BIU2400_FLASH_DATA: 1313 case BIU2400_ICR: 1314 case BIU2400_ISR: 1315 case BIU2400_CSR: 1316 case BIU2400_REQINP: 1317 case BIU2400_REQOUTP: 1318 case BIU2400_RSPINP: 1319 case BIU2400_RSPOUTP: 1320 case BIU2400_PRI_REQINP: 1321 case BIU2400_PRI_REQOUTP: 1322 case BIU2400_ATIO_RSPINP: 1323 case BIU2400_ATIO_RSPOUTP: 1324 case BIU2400_HCCR: 1325 case BIU2400_GPIOD: 1326 case BIU2400_GPIOE: 1327 case BIU2400_HSEMA: 1328 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1329 break; 1330 case BIU2400_R2HSTSLO: 1331 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1332 break; 1333 case BIU2400_R2HSTSHI: 1334 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1335 break; 1336 default: 1337 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1338 regoff); 1339 rv = 0xffffffff; 1340 break; 1341 } 1342 return (rv); 1343 } 1344 1345 static void 1346 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1347 { 1348 int block = regoff & _BLK_REG_MASK; 1349 1350 switch (block) { 1351 case BIU_BLOCK: 1352 break; 1353 case MBOX_BLOCK: 1354 BXW2(isp, IspVirt2Off(isp, regoff), val); 1355 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1356 return; 1357 case SXP_BLOCK: 1358 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK write at 0x%x", regoff); 1359 return; 1360 case RISC_BLOCK: 1361 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK write at 0x%x", regoff); 1362 return; 1363 case DMA_BLOCK: 1364 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK write at 0x%x", regoff); 1365 return; 1366 default: 1367 isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff); 1368 break; 1369 } 1370 1371 switch (regoff) { 1372 case BIU2400_FLASH_ADDR: 1373 case BIU2400_FLASH_DATA: 1374 case BIU2400_ICR: 1375 case BIU2400_ISR: 1376 case BIU2400_CSR: 1377 case BIU2400_REQINP: 1378 case BIU2400_REQOUTP: 1379 case BIU2400_RSPINP: 1380 case BIU2400_RSPOUTP: 1381 case BIU2400_PRI_REQINP: 1382 case BIU2400_PRI_REQOUTP: 1383 case BIU2400_ATIO_RSPINP: 1384 case BIU2400_ATIO_RSPOUTP: 1385 case BIU2400_HCCR: 1386 case BIU2400_GPIOD: 1387 case BIU2400_GPIOE: 1388 case BIU2400_HSEMA: 1389 BXW4(isp, IspVirt2Off(isp, regoff), val); 1390 #ifdef MEMORYBARRIERW 1391 if (regoff == BIU2400_REQINP || 1392 regoff == BIU2400_RSPOUTP || 1393 regoff == BIU2400_PRI_REQINP || 1394 regoff == BIU2400_ATIO_RSPOUTP) 1395 MEMORYBARRIERW(isp, SYNC_REG, 1396 IspVirt2Off(isp, regoff), 4, -1) 1397 else 1398 #endif 1399 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1400 break; 1401 default: 1402 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1403 regoff); 1404 break; 1405 } 1406 } 1407 1408 static uint32_t 1409 isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff) 1410 { 1411 uint32_t rv; 1412 1413 switch (regoff) { 1414 case BIU2400_PRI_REQINP: 1415 case BIU2400_PRI_REQOUTP: 1416 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1417 regoff); 1418 rv = 0xffffffff; 1419 break; 1420 case BIU2400_REQINP: 1421 rv = B2R4(isp, 0x00); 1422 break; 1423 case BIU2400_REQOUTP: 1424 rv = B2R4(isp, 0x04); 1425 break; 1426 case BIU2400_RSPINP: 1427 rv = B2R4(isp, 0x08); 1428 break; 1429 case BIU2400_RSPOUTP: 1430 rv = B2R4(isp, 0x0c); 1431 break; 1432 case BIU2400_ATIO_RSPINP: 1433 rv = B2R4(isp, 0x10); 1434 break; 1435 case BIU2400_ATIO_RSPOUTP: 1436 rv = B2R4(isp, 0x14); 1437 break; 1438 default: 1439 rv = isp_pci_rd_reg_2400(isp, regoff); 1440 break; 1441 } 1442 return (rv); 1443 } 1444 1445 static void 1446 isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val) 1447 { 1448 int off; 1449 1450 switch (regoff) { 1451 case BIU2400_PRI_REQINP: 1452 case BIU2400_PRI_REQOUTP: 1453 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1454 regoff); 1455 return; 1456 case BIU2400_REQINP: 1457 off = 0x00; 1458 break; 1459 case BIU2400_REQOUTP: 1460 off = 0x04; 1461 break; 1462 case BIU2400_RSPINP: 1463 off = 0x08; 1464 break; 1465 case BIU2400_RSPOUTP: 1466 off = 0x0c; 1467 break; 1468 case BIU2400_ATIO_RSPINP: 1469 off = 0x10; 1470 break; 1471 case BIU2400_ATIO_RSPOUTP: 1472 off = 0x14; 1473 break; 1474 default: 1475 isp_pci_wr_reg_2400(isp, regoff, val); 1476 return; 1477 } 1478 B2W4(isp, off, val); 1479 } 1480 1481 1482 struct imush { 1483 bus_addr_t maddr; 1484 int error; 1485 }; 1486 1487 static void 1488 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1489 { 1490 struct imush *imushp = (struct imush *) arg; 1491 1492 if (!(imushp->error = error)) 1493 imushp->maddr = segs[0].ds_addr; 1494 } 1495 1496 static int 1497 isp_pci_mbxdma(ispsoftc_t *isp) 1498 { 1499 caddr_t base; 1500 uint32_t len, nsegs; 1501 int i, error, cmap = 0; 1502 bus_size_t slim; /* segment size */ 1503 bus_addr_t llim; /* low limit of unavailable dma */ 1504 bus_addr_t hlim; /* high limit of unavailable dma */ 1505 struct imush im; 1506 isp_ecmd_t *ecmd; 1507 1508 /* Already been here? If so, leave... */ 1509 if (isp->isp_xflist != NULL) 1510 return (0); 1511 if (isp->isp_rquest != NULL && isp->isp_maxcmds == 0) 1512 return (0); 1513 ISP_UNLOCK(isp); 1514 if (isp->isp_rquest != NULL) 1515 goto gotmaxcmds; 1516 1517 hlim = BUS_SPACE_MAXADDR; 1518 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1519 if (sizeof (bus_size_t) > 4) 1520 slim = (bus_size_t) (1ULL << 32); 1521 else 1522 slim = (bus_size_t) (1UL << 31); 1523 llim = BUS_SPACE_MAXADDR; 1524 } else { 1525 slim = (1UL << 24); 1526 llim = BUS_SPACE_MAXADDR_32BIT; 1527 } 1528 if (sizeof (bus_size_t) > 4) 1529 nsegs = ISP_NSEG64_MAX; 1530 else 1531 nsegs = ISP_NSEG_MAX; 1532 1533 if (bus_dma_tag_create(bus_get_dma_tag(ISP_PCD(isp)), 1, 1534 slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, 1535 busdma_lock_mutex, &isp->isp_lock, &isp->isp_osinfo.dmat)) { 1536 ISP_LOCK(isp); 1537 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1538 return (1); 1539 } 1540 1541 /* 1542 * Allocate and map the request queue and a region for external 1543 * DMA addressable command/status structures (22XX and later). 1544 */ 1545 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1546 if (isp->isp_type >= ISP_HA_FC_2200) 1547 len += (N_XCMDS * XCMD_SIZE); 1548 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1549 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1550 len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock, 1551 &isp->isp_osinfo.reqdmat)) { 1552 isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag"); 1553 goto bad; 1554 } 1555 if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base, 1556 BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) { 1557 isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory"); 1558 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1559 goto bad; 1560 } 1561 isp->isp_rquest = base; 1562 im.error = 0; 1563 if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap, 1564 base, len, imc, &im, 0) || im.error) { 1565 isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error); 1566 goto bad; 1567 } 1568 isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx", 1569 (uintmax_t)im.maddr, (uintmax_t)len); 1570 isp->isp_rquest_dma = im.maddr; 1571 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1572 im.maddr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1573 if (isp->isp_type >= ISP_HA_FC_2200) { 1574 isp->isp_osinfo.ecmd_dma = im.maddr; 1575 isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base; 1576 isp->isp_osinfo.ecmd_base = isp->isp_osinfo.ecmd_free; 1577 for (ecmd = isp->isp_osinfo.ecmd_free; 1578 ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { 1579 if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) 1580 ecmd->next = NULL; 1581 else 1582 ecmd->next = ecmd + 1; 1583 } 1584 } 1585 1586 /* 1587 * Allocate and map the result queue. 1588 */ 1589 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1590 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1591 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1592 len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock, 1593 &isp->isp_osinfo.respdmat)) { 1594 isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag"); 1595 goto bad; 1596 } 1597 if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base, 1598 BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) { 1599 isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory"); 1600 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1601 goto bad; 1602 } 1603 isp->isp_result = base; 1604 im.error = 0; 1605 if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap, 1606 base, len, imc, &im, 0) || im.error) { 1607 isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error); 1608 goto bad; 1609 } 1610 isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx", 1611 (uintmax_t)im.maddr, (uintmax_t)len); 1612 isp->isp_result_dma = im.maddr; 1613 1614 #ifdef ISP_TARGET_MODE 1615 /* 1616 * Allocate and map ATIO queue on 24xx with target mode. 1617 */ 1618 if (IS_24XX(isp)) { 1619 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1620 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1621 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1622 len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock, 1623 &isp->isp_osinfo.atiodmat)) { 1624 isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag"); 1625 goto bad; 1626 } 1627 if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base, 1628 BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) { 1629 isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory"); 1630 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1631 goto bad; 1632 } 1633 isp->isp_atioq = base; 1634 im.error = 0; 1635 if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap, 1636 base, len, imc, &im, 0) || im.error) { 1637 isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error); 1638 goto bad; 1639 } 1640 isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx", 1641 (uintmax_t)im.maddr, (uintmax_t)len); 1642 isp->isp_atioq_dma = im.maddr; 1643 } 1644 #endif 1645 1646 if (IS_FC(isp)) { 1647 if (bus_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1648 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1649 2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, busdma_lock_mutex, 1650 &isp->isp_lock, &isp->isp_osinfo.iocbdmat)) { 1651 goto bad; 1652 } 1653 if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat, 1654 (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0) 1655 goto bad; 1656 isp->isp_iocb = base; 1657 im.error = 0; 1658 if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, 1659 base, 2*QENTRY_LEN, imc, &im, 0) || im.error) 1660 goto bad; 1661 isp->isp_iocb_dma = im.maddr; 1662 1663 if (bus_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1664 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1665 ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, busdma_lock_mutex, 1666 &isp->isp_lock, &isp->isp_osinfo.scdmat)) 1667 goto bad; 1668 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1669 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1670 if (bus_dmamem_alloc(isp->isp_osinfo.scdmat, 1671 (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0) 1672 goto bad; 1673 FCPARAM(isp, cmap)->isp_scratch = base; 1674 im.error = 0; 1675 if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap, 1676 base, ISP_FC_SCRLEN, imc, &im, 0) || im.error) { 1677 bus_dmamem_free(isp->isp_osinfo.scdmat, 1678 base, fc->scmap); 1679 FCPARAM(isp, cmap)->isp_scratch = NULL; 1680 goto bad; 1681 } 1682 FCPARAM(isp, cmap)->isp_scdma = im.maddr; 1683 if (!IS_2100(isp)) { 1684 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { 1685 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); 1686 if (n == NULL) { 1687 while (fc->nexus_free_list) { 1688 n = fc->nexus_free_list; 1689 fc->nexus_free_list = n->next; 1690 free(n, M_DEVBUF); 1691 } 1692 goto bad; 1693 } 1694 n->next = fc->nexus_free_list; 1695 fc->nexus_free_list = n; 1696 } 1697 } 1698 } 1699 } 1700 1701 if (isp->isp_maxcmds == 0) { 1702 ISP_LOCK(isp); 1703 return (0); 1704 } 1705 1706 gotmaxcmds: 1707 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1708 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) 1709 malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1710 for (i = 0; i < isp->isp_maxcmds; i++) { 1711 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1712 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1713 if (error) { 1714 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1715 while (--i >= 0) { 1716 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1717 isp->isp_osinfo.pcmd_pool[i].dmap); 1718 } 1719 goto bad; 1720 } 1721 callout_init_mtx(&pcmd->wdog, &isp->isp_lock, 0); 1722 if (i == isp->isp_maxcmds-1) 1723 pcmd->next = NULL; 1724 else 1725 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1726 } 1727 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1728 1729 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1730 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1731 for (len = 0; len < isp->isp_maxcmds - 1; len++) 1732 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1733 isp->isp_xffree = isp->isp_xflist; 1734 1735 ISP_LOCK(isp); 1736 return (0); 1737 1738 bad: 1739 isp_pci_mbxdmafree(isp); 1740 ISP_LOCK(isp); 1741 return (1); 1742 } 1743 1744 static void 1745 isp_pci_mbxdmafree(ispsoftc_t *isp) 1746 { 1747 int i; 1748 1749 if (isp->isp_xflist != NULL) { 1750 free(isp->isp_xflist, M_DEVBUF); 1751 isp->isp_xflist = NULL; 1752 } 1753 if (isp->isp_osinfo.pcmd_pool != NULL) { 1754 for (i = 0; i < isp->isp_maxcmds; i++) { 1755 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1756 isp->isp_osinfo.pcmd_pool[i].dmap); 1757 } 1758 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1759 isp->isp_osinfo.pcmd_pool = NULL; 1760 } 1761 if (IS_FC(isp)) { 1762 for (i = 0; i < isp->isp_nchan; i++) { 1763 struct isp_fc *fc = ISP_FC_PC(isp, i); 1764 if (FCPARAM(isp, i)->isp_scdma != 0) { 1765 bus_dmamap_unload(isp->isp_osinfo.scdmat, 1766 fc->scmap); 1767 FCPARAM(isp, i)->isp_scdma = 0; 1768 } 1769 if (FCPARAM(isp, i)->isp_scratch != NULL) { 1770 bus_dmamem_free(isp->isp_osinfo.scdmat, 1771 FCPARAM(isp, i)->isp_scratch, fc->scmap); 1772 FCPARAM(isp, i)->isp_scratch = NULL; 1773 } 1774 while (fc->nexus_free_list) { 1775 struct isp_nexus *n = fc->nexus_free_list; 1776 fc->nexus_free_list = n->next; 1777 free(n, M_DEVBUF); 1778 } 1779 } 1780 if (isp->isp_iocb_dma != 0) { 1781 bus_dma_tag_destroy(isp->isp_osinfo.scdmat); 1782 bus_dmamap_unload(isp->isp_osinfo.iocbdmat, 1783 isp->isp_osinfo.iocbmap); 1784 isp->isp_iocb_dma = 0; 1785 } 1786 if (isp->isp_iocb != NULL) { 1787 bus_dmamem_free(isp->isp_osinfo.iocbdmat, 1788 isp->isp_iocb, isp->isp_osinfo.iocbmap); 1789 bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat); 1790 } 1791 } 1792 #ifdef ISP_TARGET_MODE 1793 if (IS_24XX(isp)) { 1794 if (isp->isp_atioq_dma != 0) { 1795 bus_dmamap_unload(isp->isp_osinfo.atiodmat, 1796 isp->isp_osinfo.atiomap); 1797 isp->isp_atioq_dma = 0; 1798 } 1799 if (isp->isp_atioq != NULL) { 1800 bus_dmamem_free(isp->isp_osinfo.atiodmat, isp->isp_atioq, 1801 isp->isp_osinfo.atiomap); 1802 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1803 isp->isp_atioq = NULL; 1804 } 1805 } 1806 #endif 1807 if (isp->isp_result_dma != 0) { 1808 bus_dmamap_unload(isp->isp_osinfo.respdmat, 1809 isp->isp_osinfo.respmap); 1810 isp->isp_result_dma = 0; 1811 } 1812 if (isp->isp_result != NULL) { 1813 bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result, 1814 isp->isp_osinfo.respmap); 1815 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1816 isp->isp_result = NULL; 1817 } 1818 if (isp->isp_rquest_dma != 0) { 1819 bus_dmamap_unload(isp->isp_osinfo.reqdmat, 1820 isp->isp_osinfo.reqmap); 1821 isp->isp_rquest_dma = 0; 1822 } 1823 if (isp->isp_rquest != NULL) { 1824 bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest, 1825 isp->isp_osinfo.reqmap); 1826 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1827 isp->isp_rquest = NULL; 1828 } 1829 } 1830 1831 typedef struct { 1832 ispsoftc_t *isp; 1833 void *cmd_token; 1834 void *rq; /* original request */ 1835 int error; 1836 } mush_t; 1837 1838 #define MUSHERR_NOQENTRIES -2 1839 1840 static void 1841 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1842 { 1843 mush_t *mp = (mush_t *) arg; 1844 ispsoftc_t *isp= mp->isp; 1845 struct ccb_scsiio *csio = mp->cmd_token; 1846 isp_ddir_t ddir; 1847 int sdir; 1848 1849 if (error) { 1850 mp->error = error; 1851 return; 1852 } 1853 if (nseg == 0) { 1854 ddir = ISP_NOXFR; 1855 } else { 1856 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1857 ddir = ISP_FROM_DEVICE; 1858 } else { 1859 ddir = ISP_TO_DEVICE; 1860 } 1861 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^ 1862 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) { 1863 sdir = BUS_DMASYNC_PREREAD; 1864 } else { 1865 sdir = BUS_DMASYNC_PREWRITE; 1866 } 1867 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 1868 sdir); 1869 } 1870 1871 error = isp_send_cmd(isp, mp->rq, dm_segs, nseg, XS_XFRLEN(csio), 1872 ddir, (ispds64_t *)csio->req_map); 1873 switch (error) { 1874 case CMD_EAGAIN: 1875 mp->error = MUSHERR_NOQENTRIES; 1876 break; 1877 case CMD_QUEUED: 1878 break; 1879 default: 1880 mp->error = EIO; 1881 break; 1882 } 1883 } 1884 1885 static int 1886 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1887 { 1888 mush_t mush, *mp; 1889 int error; 1890 1891 mp = &mush; 1892 mp->isp = isp; 1893 mp->cmd_token = csio; 1894 mp->rq = ff; 1895 mp->error = 0; 1896 1897 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 1898 (union ccb *)csio, dma2, mp, 0); 1899 if (error == EINPROGRESS) { 1900 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 1901 mp->error = EINVAL; 1902 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 1903 } else if (error && mp->error == 0) { 1904 #ifdef DIAGNOSTIC 1905 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1906 #endif 1907 mp->error = error; 1908 } 1909 if (mp->error) { 1910 int retval = CMD_COMPLETE; 1911 if (mp->error == MUSHERR_NOQENTRIES) { 1912 retval = CMD_EAGAIN; 1913 } else if (mp->error == EFBIG) { 1914 csio->ccb_h.status = CAM_REQ_TOO_BIG; 1915 } else if (mp->error == EINVAL) { 1916 csio->ccb_h.status = CAM_REQ_INVALID; 1917 } else { 1918 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 1919 } 1920 return (retval); 1921 } 1922 return (CMD_QUEUED); 1923 } 1924 1925 static int 1926 isp_pci_irqsetup(ispsoftc_t *isp) 1927 { 1928 device_t dev = isp->isp_osinfo.dev; 1929 struct isp_pcisoftc *pcs = device_get_softc(dev); 1930 driver_intr_t *f; 1931 int i, max_irq; 1932 1933 /* Allocate IRQs only once. */ 1934 if (isp->isp_nirq > 0) 1935 return (0); 1936 1937 ISP_UNLOCK(isp); 1938 if (ISP_CAP_MSIX(isp)) { 1939 max_irq = min(ISP_MAX_IRQS, IS_26XX(isp) ? 3 : 2); 1940 pcs->msicount = imin(pci_msix_count(dev), max_irq); 1941 if (pcs->msicount > 0 && 1942 pci_alloc_msix(dev, &pcs->msicount) != 0) 1943 pcs->msicount = 0; 1944 } 1945 if (pcs->msicount == 0) { 1946 pcs->msicount = imin(pci_msi_count(dev), 1); 1947 if (pcs->msicount > 0 && 1948 pci_alloc_msi(dev, &pcs->msicount) != 0) 1949 pcs->msicount = 0; 1950 } 1951 for (i = 0; i < MAX(1, pcs->msicount); i++) { 1952 pcs->irq[i].iqd = i + (pcs->msicount > 0); 1953 pcs->irq[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1954 &pcs->irq[i].iqd, RF_ACTIVE | RF_SHAREABLE); 1955 if (pcs->irq[i].irq == NULL) { 1956 device_printf(dev, "could not allocate interrupt\n"); 1957 break; 1958 } 1959 if (i == 0) 1960 f = isp_platform_intr; 1961 else if (i == 1) 1962 f = isp_platform_intr_resp; 1963 else 1964 f = isp_platform_intr_atio; 1965 if (bus_setup_intr(dev, pcs->irq[i].irq, ISP_IFLAGS, NULL, 1966 f, isp, &pcs->irq[i].ih)) { 1967 device_printf(dev, "could not setup interrupt\n"); 1968 (void) bus_release_resource(dev, SYS_RES_IRQ, 1969 pcs->irq[i].iqd, pcs->irq[i].irq); 1970 break; 1971 } 1972 if (pcs->msicount > 1) { 1973 bus_describe_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih, 1974 "%d", i); 1975 } 1976 isp->isp_nirq = i + 1; 1977 } 1978 ISP_LOCK(isp); 1979 1980 return (isp->isp_nirq == 0); 1981 } 1982 1983 static void 1984 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 1985 { 1986 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1987 if (msg) 1988 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1989 else 1990 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1991 if (IS_SCSI(isp)) 1992 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1993 else 1994 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1995 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1996 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1997 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1998 1999 2000 if (IS_SCSI(isp)) { 2001 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2002 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2003 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2004 ISP_READ(isp, CDMA_FIFO_STS)); 2005 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2006 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2007 ISP_READ(isp, DDMA_FIFO_STS)); 2008 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2009 ISP_READ(isp, SXP_INTERRUPT), 2010 ISP_READ(isp, SXP_GROSS_ERR), 2011 ISP_READ(isp, SXP_PINS_CTRL)); 2012 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2013 } 2014 printf(" mbox regs: %x %x %x %x %x\n", 2015 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2016 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2017 ISP_READ(isp, OUTMAILBOX4)); 2018 printf(" PCI Status Command/Status=%x\n", 2019 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2020 } 2021