1 /*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/bus.h> 40 #include <sys/stdint.h> 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 #include <sys/uio.h> 48 49 #ifdef __sparc64__ 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62 static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int); 63 static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t); 64 static void isp_pci_run_isr(ispsoftc_t *); 65 static void isp_pci_run_isr_2300(ispsoftc_t *); 66 static void isp_pci_run_isr_2400(ispsoftc_t *); 67 static int isp_pci_mbxdma(ispsoftc_t *); 68 static void isp_pci_mbxdmafree(ispsoftc_t *); 69 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 70 static int isp_pci_irqsetup(ispsoftc_t *); 71 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 72 73 static struct ispmdvec mdvec = { 74 isp_pci_run_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_common_dmateardown, 80 isp_pci_irqsetup, 81 isp_pci_dumpregs, 82 NULL, 83 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 84 }; 85 86 static struct ispmdvec mdvec_1080 = { 87 isp_pci_run_isr, 88 isp_pci_rd_reg_1080, 89 isp_pci_wr_reg_1080, 90 isp_pci_mbxdma, 91 isp_pci_dmasetup, 92 isp_common_dmateardown, 93 isp_pci_irqsetup, 94 isp_pci_dumpregs, 95 NULL, 96 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 97 }; 98 99 static struct ispmdvec mdvec_12160 = { 100 isp_pci_run_isr, 101 isp_pci_rd_reg_1080, 102 isp_pci_wr_reg_1080, 103 isp_pci_mbxdma, 104 isp_pci_dmasetup, 105 isp_common_dmateardown, 106 isp_pci_irqsetup, 107 isp_pci_dumpregs, 108 NULL, 109 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 110 }; 111 112 static struct ispmdvec mdvec_2100 = { 113 isp_pci_run_isr, 114 isp_pci_rd_reg, 115 isp_pci_wr_reg, 116 isp_pci_mbxdma, 117 isp_pci_dmasetup, 118 isp_common_dmateardown, 119 isp_pci_irqsetup, 120 isp_pci_dumpregs 121 }; 122 123 static struct ispmdvec mdvec_2200 = { 124 isp_pci_run_isr, 125 isp_pci_rd_reg, 126 isp_pci_wr_reg, 127 isp_pci_mbxdma, 128 isp_pci_dmasetup, 129 isp_common_dmateardown, 130 isp_pci_irqsetup, 131 isp_pci_dumpregs 132 }; 133 134 static struct ispmdvec mdvec_2300 = { 135 isp_pci_run_isr_2300, 136 isp_pci_rd_reg, 137 isp_pci_wr_reg, 138 isp_pci_mbxdma, 139 isp_pci_dmasetup, 140 isp_common_dmateardown, 141 isp_pci_irqsetup, 142 isp_pci_dumpregs 143 }; 144 145 static struct ispmdvec mdvec_2400 = { 146 isp_pci_run_isr_2400, 147 isp_pci_rd_reg_2400, 148 isp_pci_wr_reg_2400, 149 isp_pci_mbxdma, 150 isp_pci_dmasetup, 151 isp_common_dmateardown, 152 isp_pci_irqsetup, 153 NULL 154 }; 155 156 static struct ispmdvec mdvec_2500 = { 157 isp_pci_run_isr_2400, 158 isp_pci_rd_reg_2400, 159 isp_pci_wr_reg_2400, 160 isp_pci_mbxdma, 161 isp_pci_dmasetup, 162 isp_common_dmateardown, 163 isp_pci_irqsetup, 164 NULL 165 }; 166 167 static struct ispmdvec mdvec_2600 = { 168 isp_pci_run_isr_2400, 169 isp_pci_rd_reg_2600, 170 isp_pci_wr_reg_2600, 171 isp_pci_mbxdma, 172 isp_pci_dmasetup, 173 isp_common_dmateardown, 174 isp_pci_irqsetup, 175 NULL 176 }; 177 178 #ifndef PCIM_CMD_INVEN 179 #define PCIM_CMD_INVEN 0x10 180 #endif 181 #ifndef PCIM_CMD_BUSMASTEREN 182 #define PCIM_CMD_BUSMASTEREN 0x0004 183 #endif 184 #ifndef PCIM_CMD_PERRESPEN 185 #define PCIM_CMD_PERRESPEN 0x0040 186 #endif 187 #ifndef PCIM_CMD_SEREN 188 #define PCIM_CMD_SEREN 0x0100 189 #endif 190 #ifndef PCIM_CMD_INTX_DISABLE 191 #define PCIM_CMD_INTX_DISABLE 0x0400 192 #endif 193 194 #ifndef PCIR_COMMAND 195 #define PCIR_COMMAND 0x04 196 #endif 197 198 #ifndef PCIR_CACHELNSZ 199 #define PCIR_CACHELNSZ 0x0c 200 #endif 201 202 #ifndef PCIR_LATTIMER 203 #define PCIR_LATTIMER 0x0d 204 #endif 205 206 #ifndef PCIR_ROMADDR 207 #define PCIR_ROMADDR 0x30 208 #endif 209 210 #ifndef PCI_VENDOR_QLOGIC 211 #define PCI_VENDOR_QLOGIC 0x1077 212 #endif 213 214 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 215 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 216 #endif 217 218 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 219 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 220 #endif 221 222 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 223 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 224 #endif 225 226 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 227 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 228 #endif 229 230 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 231 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 232 #endif 233 234 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 235 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 236 #endif 237 238 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 239 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 240 #endif 241 242 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 243 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 244 #endif 245 246 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 247 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 248 #endif 249 250 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 251 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 252 #endif 253 254 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 255 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 256 #endif 257 258 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 259 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 260 #endif 261 262 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 263 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 264 #endif 265 266 #ifndef PCI_PRODUCT_QLOGIC_ISP2532 267 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 268 #endif 269 270 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 271 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 272 #endif 273 274 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 275 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 276 #endif 277 278 #ifndef PCI_PRODUCT_QLOGIC_ISP5432 279 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 280 #endif 281 282 #ifndef PCI_PRODUCT_QLOGIC_ISP2031 283 #define PCI_PRODUCT_QLOGIC_ISP2031 0x2031 284 #endif 285 286 #ifndef PCI_PRODUCT_QLOGIC_ISP8031 287 #define PCI_PRODUCT_QLOGIC_ISP8031 0x8031 288 #endif 289 290 #define PCI_QLOGIC_ISP5432 \ 291 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 292 293 #define PCI_QLOGIC_ISP1020 \ 294 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 295 296 #define PCI_QLOGIC_ISP1080 \ 297 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 298 299 #define PCI_QLOGIC_ISP10160 \ 300 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 301 302 #define PCI_QLOGIC_ISP12160 \ 303 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 304 305 #define PCI_QLOGIC_ISP1240 \ 306 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 307 308 #define PCI_QLOGIC_ISP1280 \ 309 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 310 311 #define PCI_QLOGIC_ISP2100 \ 312 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 313 314 #define PCI_QLOGIC_ISP2200 \ 315 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 316 317 #define PCI_QLOGIC_ISP2300 \ 318 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 319 320 #define PCI_QLOGIC_ISP2312 \ 321 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 322 323 #define PCI_QLOGIC_ISP2322 \ 324 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 325 326 #define PCI_QLOGIC_ISP2422 \ 327 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 328 329 #define PCI_QLOGIC_ISP2432 \ 330 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 331 332 #define PCI_QLOGIC_ISP2532 \ 333 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 334 335 #define PCI_QLOGIC_ISP6312 \ 336 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 337 338 #define PCI_QLOGIC_ISP6322 \ 339 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 340 341 #define PCI_QLOGIC_ISP2031 \ 342 ((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC) 343 344 #define PCI_QLOGIC_ISP8031 \ 345 ((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC) 346 347 /* 348 * Odd case for some AMI raid cards... We need to *not* attach to this. 349 */ 350 #define AMI_RAID_SUBVENDOR_ID 0x101e 351 352 #define PCI_DFLT_LTNCY 0x40 353 #define PCI_DFLT_LNSZ 0x10 354 355 static int isp_pci_probe (device_t); 356 static int isp_pci_attach (device_t); 357 static int isp_pci_detach (device_t); 358 359 360 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 361 struct isp_pcisoftc { 362 ispsoftc_t pci_isp; 363 device_t pci_dev; 364 struct resource * regs; 365 struct resource * regs1; 366 struct resource * regs2; 367 void * irq; 368 int iqd; 369 int rtp; 370 int rgd; 371 int rtp1; 372 int rgd1; 373 int rtp2; 374 int rgd2; 375 void * ih; 376 int16_t pci_poff[_NREG_BLKS]; 377 bus_dma_tag_t dmat; 378 int msicount; 379 }; 380 381 382 static device_method_t isp_pci_methods[] = { 383 /* Device interface */ 384 DEVMETHOD(device_probe, isp_pci_probe), 385 DEVMETHOD(device_attach, isp_pci_attach), 386 DEVMETHOD(device_detach, isp_pci_detach), 387 { 0, 0 } 388 }; 389 390 static driver_t isp_pci_driver = { 391 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 392 }; 393 static devclass_t isp_devclass; 394 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 395 MODULE_DEPEND(isp, cam, 1, 1, 1); 396 MODULE_DEPEND(isp, firmware, 1, 1, 1); 397 static int isp_nvports = 0; 398 399 static int 400 isp_pci_probe(device_t dev) 401 { 402 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 403 case PCI_QLOGIC_ISP1020: 404 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 405 break; 406 case PCI_QLOGIC_ISP1080: 407 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 408 break; 409 case PCI_QLOGIC_ISP1240: 410 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 411 break; 412 case PCI_QLOGIC_ISP1280: 413 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 414 break; 415 case PCI_QLOGIC_ISP10160: 416 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 417 break; 418 case PCI_QLOGIC_ISP12160: 419 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 420 return (ENXIO); 421 } 422 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 423 break; 424 case PCI_QLOGIC_ISP2100: 425 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 426 break; 427 case PCI_QLOGIC_ISP2200: 428 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 429 break; 430 case PCI_QLOGIC_ISP2300: 431 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 432 break; 433 case PCI_QLOGIC_ISP2312: 434 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 435 break; 436 case PCI_QLOGIC_ISP2322: 437 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 438 break; 439 case PCI_QLOGIC_ISP2422: 440 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 441 break; 442 case PCI_QLOGIC_ISP2432: 443 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 444 break; 445 case PCI_QLOGIC_ISP2532: 446 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 447 break; 448 case PCI_QLOGIC_ISP5432: 449 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 450 break; 451 case PCI_QLOGIC_ISP6312: 452 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 453 break; 454 case PCI_QLOGIC_ISP6322: 455 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 456 break; 457 case PCI_QLOGIC_ISP2031: 458 device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter"); 459 break; 460 case PCI_QLOGIC_ISP8031: 461 device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter"); 462 break; 463 default: 464 return (ENXIO); 465 } 466 if (isp_announced == 0 && bootverbose) { 467 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 468 "Core Version %d.%d\n", 469 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 470 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 471 isp_announced++; 472 } 473 /* 474 * XXXX: Here is where we might load the f/w module 475 * XXXX: (or increase a reference count to it). 476 */ 477 return (BUS_PROBE_DEFAULT); 478 } 479 480 static void 481 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 482 { 483 int tval; 484 485 tval = 0; 486 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 487 isp->isp_confopts |= ISP_CFG_NORELOAD; 488 } 489 tval = 0; 490 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 491 isp->isp_confopts |= ISP_CFG_NONVRAM; 492 } 493 tval = 0; 494 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 495 if (tval) { 496 isp->isp_dblev = tval; 497 } else { 498 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 499 } 500 if (bootverbose) { 501 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 502 } 503 tval = -1; 504 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 505 if (tval > 0 && tval <= 254) { 506 isp_nvports = tval; 507 } 508 tval = 7; 509 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 510 isp_quickboot_time = tval; 511 } 512 513 static void 514 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 515 { 516 const char *sptr; 517 int tval = 0; 518 char prefix[12], name[16]; 519 520 if (chan == 0) 521 prefix[0] = 0; 522 else 523 snprintf(prefix, sizeof(prefix), "chan%d.", chan); 524 snprintf(name, sizeof(name), "%siid", prefix); 525 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 526 name, &tval)) { 527 if (IS_FC(isp)) { 528 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 529 } else { 530 #ifdef __sparc64__ 531 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 532 #else 533 ISP_SPI_PC(isp, chan)->iid = 7; 534 #endif 535 } 536 } else { 537 if (IS_FC(isp)) { 538 ISP_FC_PC(isp, chan)->default_id = tval - chan; 539 } else { 540 ISP_SPI_PC(isp, chan)->iid = tval; 541 } 542 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 543 } 544 545 if (IS_SCSI(isp)) 546 return; 547 548 tval = -1; 549 snprintf(name, sizeof(name), "%srole", prefix); 550 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 551 name, &tval) == 0) { 552 switch (tval) { 553 case ISP_ROLE_NONE: 554 case ISP_ROLE_INITIATOR: 555 case ISP_ROLE_TARGET: 556 case ISP_ROLE_BOTH: 557 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); 558 break; 559 default: 560 tval = -1; 561 break; 562 } 563 } 564 if (tval == -1) { 565 tval = ISP_DEFAULT_ROLES; 566 } 567 ISP_FC_PC(isp, chan)->def_role = tval; 568 569 tval = 0; 570 snprintf(name, sizeof(name), "%sfullduplex", prefix); 571 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 572 name, &tval) == 0 && tval != 0) { 573 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 574 } 575 sptr = NULL; 576 snprintf(name, sizeof(name), "%stopology", prefix); 577 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 578 name, (const char **) &sptr) == 0 && sptr != NULL) { 579 if (strcmp(sptr, "lport") == 0) { 580 isp->isp_confopts |= ISP_CFG_LPORT; 581 } else if (strcmp(sptr, "nport") == 0) { 582 isp->isp_confopts |= ISP_CFG_NPORT; 583 } else if (strcmp(sptr, "lport-only") == 0) { 584 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 585 } else if (strcmp(sptr, "nport-only") == 0) { 586 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 587 } 588 } 589 590 #ifdef ISP_FCTAPE_OFF 591 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 592 #else 593 isp->isp_confopts |= ISP_CFG_FCTAPE; 594 #endif 595 596 tval = 0; 597 snprintf(name, sizeof(name), "%snofctape", prefix); 598 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 599 name, &tval); 600 if (tval) { 601 isp->isp_confopts &= ~ISP_CFG_FCTAPE; 602 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 603 } 604 605 tval = 0; 606 snprintf(name, sizeof(name), "%sfctape", prefix); 607 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 608 name, &tval); 609 if (tval) { 610 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; 611 isp->isp_confopts |= ISP_CFG_FCTAPE; 612 } 613 614 615 /* 616 * Because the resource_*_value functions can neither return 617 * 64 bit integer values, nor can they be directly coerced 618 * to interpret the right hand side of the assignment as 619 * you want them to interpret it, we have to force WWN 620 * hint replacement to specify WWN strings with a leading 621 * 'w' (e..g w50000000aaaa0001). Sigh. 622 */ 623 sptr = NULL; 624 snprintf(name, sizeof(name), "%sportwwn", prefix); 625 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 626 name, (const char **) &sptr); 627 if (tval == 0 && sptr != NULL && *sptr++ == 'w') { 628 char *eptr = NULL; 629 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 630 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 631 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 632 ISP_FC_PC(isp, chan)->def_wwpn = 0; 633 } 634 } 635 636 sptr = NULL; 637 snprintf(name, sizeof(name), "%snodewwn", prefix); 638 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 639 name, (const char **) &sptr); 640 if (tval == 0 && sptr != NULL && *sptr++ == 'w') { 641 char *eptr = NULL; 642 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 643 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 644 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 645 ISP_FC_PC(isp, chan)->def_wwnn = 0; 646 } 647 } 648 649 tval = -1; 650 snprintf(name, sizeof(name), "%sloop_down_limit", prefix); 651 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 652 name, &tval); 653 if (tval >= 0 && tval < 0xffff) { 654 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 655 } else { 656 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 657 } 658 659 tval = -1; 660 snprintf(name, sizeof(name), "%sgone_device_time", prefix); 661 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 662 name, &tval); 663 if (tval >= 0 && tval < 0xffff) { 664 ISP_FC_PC(isp, chan)->gone_device_time = tval; 665 } else { 666 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 667 } 668 } 669 670 static int 671 isp_pci_attach(device_t dev) 672 { 673 struct isp_pcisoftc *pcs = device_get_softc(dev); 674 ispsoftc_t *isp = &pcs->pci_isp; 675 int i; 676 uint32_t data, cmd, linesz, did; 677 size_t psize, xsize; 678 char fwname[32]; 679 680 pcs->pci_dev = dev; 681 isp->isp_dev = dev; 682 isp->isp_nchan = 1; 683 if (sizeof (bus_addr_t) > 4) 684 isp->isp_osinfo.sixtyfourbit = 1; 685 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 686 687 /* 688 * Get Generic Options 689 */ 690 isp_nvports = 0; 691 isp_get_generic_options(dev, isp); 692 693 linesz = PCI_DFLT_LNSZ; 694 pcs->irq = pcs->regs = pcs->regs2 = NULL; 695 pcs->rgd = pcs->rtp = pcs->iqd = 0; 696 697 pcs->pci_dev = dev; 698 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 699 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 700 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 701 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 702 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 703 704 switch (pci_get_devid(dev)) { 705 case PCI_QLOGIC_ISP1020: 706 did = 0x1040; 707 isp->isp_mdvec = &mdvec; 708 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 709 break; 710 case PCI_QLOGIC_ISP1080: 711 did = 0x1080; 712 isp->isp_mdvec = &mdvec_1080; 713 isp->isp_type = ISP_HA_SCSI_1080; 714 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 715 break; 716 case PCI_QLOGIC_ISP1240: 717 did = 0x1080; 718 isp->isp_mdvec = &mdvec_1080; 719 isp->isp_type = ISP_HA_SCSI_1240; 720 isp->isp_nchan = 2; 721 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 722 break; 723 case PCI_QLOGIC_ISP1280: 724 did = 0x1080; 725 isp->isp_mdvec = &mdvec_1080; 726 isp->isp_type = ISP_HA_SCSI_1280; 727 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 728 break; 729 case PCI_QLOGIC_ISP10160: 730 did = 0x12160; 731 isp->isp_mdvec = &mdvec_12160; 732 isp->isp_type = ISP_HA_SCSI_10160; 733 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 734 break; 735 case PCI_QLOGIC_ISP12160: 736 did = 0x12160; 737 isp->isp_nchan = 2; 738 isp->isp_mdvec = &mdvec_12160; 739 isp->isp_type = ISP_HA_SCSI_12160; 740 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 741 break; 742 case PCI_QLOGIC_ISP2100: 743 did = 0x2100; 744 isp->isp_mdvec = &mdvec_2100; 745 isp->isp_type = ISP_HA_FC_2100; 746 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 747 if (pci_get_revid(dev) < 3) { 748 /* 749 * XXX: Need to get the actual revision 750 * XXX: number of the 2100 FB. At any rate, 751 * XXX: lower cache line size for early revision 752 * XXX; boards. 753 */ 754 linesz = 1; 755 } 756 break; 757 case PCI_QLOGIC_ISP2200: 758 did = 0x2200; 759 isp->isp_mdvec = &mdvec_2200; 760 isp->isp_type = ISP_HA_FC_2200; 761 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 762 break; 763 case PCI_QLOGIC_ISP2300: 764 did = 0x2300; 765 isp->isp_mdvec = &mdvec_2300; 766 isp->isp_type = ISP_HA_FC_2300; 767 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 768 break; 769 case PCI_QLOGIC_ISP2312: 770 case PCI_QLOGIC_ISP6312: 771 did = 0x2300; 772 isp->isp_mdvec = &mdvec_2300; 773 isp->isp_type = ISP_HA_FC_2312; 774 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 775 break; 776 case PCI_QLOGIC_ISP2322: 777 case PCI_QLOGIC_ISP6322: 778 did = 0x2322; 779 isp->isp_mdvec = &mdvec_2300; 780 isp->isp_type = ISP_HA_FC_2322; 781 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 782 break; 783 case PCI_QLOGIC_ISP2422: 784 case PCI_QLOGIC_ISP2432: 785 did = 0x2400; 786 isp->isp_nchan += isp_nvports; 787 isp->isp_mdvec = &mdvec_2400; 788 isp->isp_type = ISP_HA_FC_2400; 789 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 790 break; 791 case PCI_QLOGIC_ISP2532: 792 did = 0x2500; 793 isp->isp_nchan += isp_nvports; 794 isp->isp_mdvec = &mdvec_2500; 795 isp->isp_type = ISP_HA_FC_2500; 796 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 797 break; 798 case PCI_QLOGIC_ISP5432: 799 did = 0x2500; 800 isp->isp_mdvec = &mdvec_2500; 801 isp->isp_type = ISP_HA_FC_2500; 802 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 803 break; 804 case PCI_QLOGIC_ISP2031: 805 case PCI_QLOGIC_ISP8031: 806 did = 0x2600; 807 isp->isp_nchan += isp_nvports; 808 isp->isp_mdvec = &mdvec_2600; 809 isp->isp_type = ISP_HA_FC_2600; 810 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 811 break; 812 default: 813 device_printf(dev, "unknown device type\n"); 814 goto bad; 815 break; 816 } 817 isp->isp_revision = pci_get_revid(dev); 818 819 if (IS_26XX(isp)) { 820 pcs->rtp = SYS_RES_MEMORY; 821 pcs->rgd = PCIR_BAR(0); 822 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 823 RF_ACTIVE); 824 pcs->rtp1 = SYS_RES_MEMORY; 825 pcs->rgd1 = PCIR_BAR(2); 826 pcs->regs1 = bus_alloc_resource_any(dev, pcs->rtp1, &pcs->rgd1, 827 RF_ACTIVE); 828 pcs->rtp2 = SYS_RES_MEMORY; 829 pcs->rgd2 = PCIR_BAR(4); 830 pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2, 831 RF_ACTIVE); 832 } else { 833 pcs->rtp = SYS_RES_MEMORY; 834 pcs->rgd = PCIR_BAR(1); 835 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 836 RF_ACTIVE); 837 if (pcs->regs == NULL) { 838 pcs->rtp = SYS_RES_IOPORT; 839 pcs->rgd = PCIR_BAR(0); 840 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, 841 &pcs->rgd, RF_ACTIVE); 842 } 843 } 844 if (pcs->regs == NULL) { 845 device_printf(dev, "Unable to map any ports\n"); 846 goto bad; 847 } 848 if (bootverbose) { 849 device_printf(dev, "Using %s space register mapping\n", 850 (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory"); 851 } 852 isp->isp_regs = pcs->regs; 853 isp->isp_regs2 = pcs->regs2; 854 855 if (IS_FC(isp)) { 856 psize = sizeof (fcparam); 857 xsize = sizeof (struct isp_fc); 858 } else { 859 psize = sizeof (sdparam); 860 xsize = sizeof (struct isp_spi); 861 } 862 psize *= isp->isp_nchan; 863 xsize *= isp->isp_nchan; 864 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 865 if (isp->isp_param == NULL) { 866 device_printf(dev, "cannot allocate parameter data\n"); 867 goto bad; 868 } 869 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 870 if (isp->isp_osinfo.pc.ptr == NULL) { 871 device_printf(dev, "cannot allocate parameter data\n"); 872 goto bad; 873 } 874 875 /* 876 * Now that we know who we are (roughly) get/set specific options 877 */ 878 for (i = 0; i < isp->isp_nchan; i++) { 879 isp_get_specific_options(dev, i, isp); 880 } 881 882 isp->isp_osinfo.fw = NULL; 883 if (isp->isp_osinfo.fw == NULL) { 884 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 885 isp->isp_osinfo.fw = firmware_get(fwname); 886 } 887 if (isp->isp_osinfo.fw != NULL) { 888 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); 889 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 890 } 891 892 /* 893 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. 894 */ 895 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 896 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 897 if (IS_2300(isp)) { /* per QLogic errata */ 898 cmd &= ~PCIM_CMD_INVEN; 899 } 900 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 901 cmd &= ~PCIM_CMD_INTX_DISABLE; 902 } 903 if (IS_24XX(isp)) { 904 cmd &= ~PCIM_CMD_INTX_DISABLE; 905 } 906 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 907 908 /* 909 * Make sure the Cache Line Size register is set sensibly. 910 */ 911 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 912 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 913 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); 914 data = linesz; 915 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 916 } 917 918 /* 919 * Make sure the Latency Timer is sane. 920 */ 921 data = pci_read_config(dev, PCIR_LATTIMER, 1); 922 if (data < PCI_DFLT_LTNCY) { 923 data = PCI_DFLT_LTNCY; 924 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); 925 pci_write_config(dev, PCIR_LATTIMER, data, 1); 926 } 927 928 /* 929 * Make sure we've disabled the ROM. 930 */ 931 data = pci_read_config(dev, PCIR_ROMADDR, 4); 932 data &= ~1; 933 pci_write_config(dev, PCIR_ROMADDR, data, 4); 934 935 if (IS_26XX(isp)) { 936 /* 26XX chips support only MSI-X, so start from them. */ 937 pcs->msicount = imin(pci_msix_count(dev), 1); 938 if (pcs->msicount > 0 && 939 (i = pci_alloc_msix(dev, &pcs->msicount)) == 0) { 940 pcs->iqd = 1; 941 } else { 942 pcs->msicount = 0; 943 } 944 } 945 if (pcs->msicount == 0 && (IS_24XX(isp) || IS_2322(isp))) { 946 /* 947 * Older chips support both MSI and MSI-X, but I have 948 * feeling that older firmware may not support MSI-X, 949 * but we have no way to check the firmware flag here. 950 */ 951 pcs->msicount = imin(pci_msi_count(dev), 1); 952 if (pcs->msicount > 0 && 953 pci_alloc_msi(dev, &pcs->msicount) == 0) { 954 pcs->iqd = 1; 955 } else { 956 pcs->msicount = 0; 957 } 958 } 959 pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE); 960 if (pcs->irq == NULL) { 961 device_printf(dev, "could not allocate interrupt\n"); 962 goto bad; 963 } 964 965 if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { 966 device_printf(dev, "could not setup interrupt\n"); 967 goto bad; 968 } 969 970 /* 971 * Last minute checks... 972 */ 973 if (IS_23XX(isp) || IS_24XX(isp)) { 974 isp->isp_port = pci_get_function(dev); 975 } 976 977 /* 978 * Make sure we're in reset state. 979 */ 980 ISP_LOCK(isp); 981 if (isp_reinit(isp, 1) != 0) { 982 ISP_UNLOCK(isp); 983 goto bad; 984 } 985 ISP_UNLOCK(isp); 986 if (isp_attach(isp)) { 987 ISP_LOCK(isp); 988 isp_shutdown(isp); 989 ISP_UNLOCK(isp); 990 goto bad; 991 } 992 return (0); 993 994 bad: 995 if (pcs->ih) { 996 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 997 } 998 if (pcs->irq) { 999 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1000 } 1001 if (pcs->msicount) { 1002 pci_release_msi(dev); 1003 } 1004 if (pcs->regs) 1005 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1006 if (pcs->regs1) 1007 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 1008 if (pcs->regs2) 1009 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 1010 if (pcs->pci_isp.isp_param) { 1011 free(pcs->pci_isp.isp_param, M_DEVBUF); 1012 pcs->pci_isp.isp_param = NULL; 1013 } 1014 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1015 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1016 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1017 } 1018 mtx_destroy(&isp->isp_osinfo.lock); 1019 return (ENXIO); 1020 } 1021 1022 static int 1023 isp_pci_detach(device_t dev) 1024 { 1025 struct isp_pcisoftc *pcs = device_get_softc(dev); 1026 ispsoftc_t *isp = &pcs->pci_isp; 1027 int status; 1028 1029 status = isp_detach(isp); 1030 if (status) 1031 return (status); 1032 ISP_LOCK(isp); 1033 isp_shutdown(isp); 1034 ISP_UNLOCK(isp); 1035 if (pcs->ih) 1036 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 1037 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1038 if (pcs->msicount) 1039 pci_release_msi(dev); 1040 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1041 if (pcs->regs1) 1042 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 1043 if (pcs->regs2) 1044 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 1045 isp_pci_mbxdmafree(isp); 1046 if (pcs->pci_isp.isp_param) { 1047 free(pcs->pci_isp.isp_param, M_DEVBUF); 1048 pcs->pci_isp.isp_param = NULL; 1049 } 1050 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1051 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1052 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1053 } 1054 mtx_destroy(&isp->isp_osinfo.lock); 1055 return (0); 1056 } 1057 1058 #define IspVirt2Off(a, x) \ 1059 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1060 _BLK_REG_SHFT] + ((x) & 0xfff)) 1061 1062 #define BXR2(isp, off) bus_read_2((isp)->isp_regs, (off)) 1063 #define BXW2(isp, off, v) bus_write_2((isp)->isp_regs, (off), (v)) 1064 #define BXR4(isp, off) bus_read_4((isp)->isp_regs, (off)) 1065 #define BXW4(isp, off, v) bus_write_4((isp)->isp_regs, (off), (v)) 1066 #define B2R4(isp, off) bus_read_4((isp)->isp_regs2, (off)) 1067 #define B2W4(isp, off, v) bus_write_4((isp)->isp_regs2, (off), (v)) 1068 1069 static ISP_INLINE uint16_t 1070 isp_pci_rd_debounced(ispsoftc_t *isp, int off) 1071 { 1072 uint16_t val, prev; 1073 1074 val = BXR2(isp, IspVirt2Off(isp, off)); 1075 do { 1076 prev = val; 1077 val = BXR2(isp, IspVirt2Off(isp, off)); 1078 } while (val != prev); 1079 return (val); 1080 } 1081 1082 static void 1083 isp_pci_run_isr(ispsoftc_t *isp) 1084 { 1085 uint16_t isr, sema, info; 1086 1087 if (IS_2100(isp)) { 1088 isr = isp_pci_rd_debounced(isp, BIU_ISR); 1089 sema = isp_pci_rd_debounced(isp, BIU_SEMA); 1090 } else { 1091 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1092 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1093 } 1094 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1095 isr &= INT_PENDING_MASK(isp); 1096 sema &= BIU_SEMA_LOCK; 1097 if (isr == 0 && sema == 0) 1098 return; 1099 if (sema != 0) { 1100 if (IS_2100(isp)) 1101 info = isp_pci_rd_debounced(isp, OUTMAILBOX0); 1102 else 1103 info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1104 if (info & MBOX_COMMAND_COMPLETE) 1105 isp_intr_mbox(isp, info); 1106 else 1107 isp_intr_async(isp, info); 1108 if (!IS_FC(isp) && isp->isp_state == ISP_RUNSTATE) 1109 isp_intr_respq(isp); 1110 } else 1111 isp_intr_respq(isp); 1112 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); 1113 if (sema) 1114 ISP_WRITE(isp, BIU_SEMA, 0); 1115 } 1116 1117 static void 1118 isp_pci_run_isr_2300(ispsoftc_t *isp) 1119 { 1120 uint32_t hccr, r2hisr; 1121 uint16_t isr, info; 1122 1123 if ((BXR2(isp, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT) == 0) 1124 return; 1125 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1126 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1127 if ((r2hisr & BIU_R2HST_INTR) == 0) 1128 return; 1129 isr = r2hisr & BIU_R2HST_ISTAT_MASK; 1130 info = r2hisr >> 16; 1131 switch (isr) { 1132 case ISPR2HST_ROM_MBX_OK: 1133 case ISPR2HST_ROM_MBX_FAIL: 1134 case ISPR2HST_MBX_OK: 1135 case ISPR2HST_MBX_FAIL: 1136 isp_intr_mbox(isp, info); 1137 break; 1138 case ISPR2HST_ASYNC_EVENT: 1139 isp_intr_async(isp, info); 1140 break; 1141 case ISPR2HST_RIO_16: 1142 isp_intr_async(isp, ASYNC_RIO16_1); 1143 break; 1144 case ISPR2HST_FPOST: 1145 isp_intr_async(isp, ASYNC_CMD_CMPLT); 1146 break; 1147 case ISPR2HST_FPOST_CTIO: 1148 isp_intr_async(isp, ASYNC_CTIO_DONE); 1149 break; 1150 case ISPR2HST_RSPQ_UPDATE: 1151 isp_intr_respq(isp); 1152 break; 1153 default: 1154 hccr = ISP_READ(isp, HCCR); 1155 if (hccr & HCCR_PAUSE) { 1156 ISP_WRITE(isp, HCCR, HCCR_RESET); 1157 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1158 ISP_WRITE(isp, BIU_ICR, 0); 1159 } else { 1160 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1161 } 1162 } 1163 ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT); 1164 ISP_WRITE(isp, BIU_SEMA, 0); 1165 } 1166 1167 static void 1168 isp_pci_run_isr_2400(ispsoftc_t *isp) 1169 { 1170 uint32_t r2hisr; 1171 uint16_t isr, info; 1172 1173 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1174 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1175 if ((r2hisr & BIU_R2HST_INTR) == 0) 1176 return; 1177 isr = r2hisr & BIU_R2HST_ISTAT_MASK; 1178 info = (r2hisr >> 16); 1179 switch (isr) { 1180 case ISPR2HST_ROM_MBX_OK: 1181 case ISPR2HST_ROM_MBX_FAIL: 1182 case ISPR2HST_MBX_OK: 1183 case ISPR2HST_MBX_FAIL: 1184 isp_intr_mbox(isp, info); 1185 break; 1186 case ISPR2HST_ASYNC_EVENT: 1187 isp_intr_async(isp, info); 1188 break; 1189 case ISPR2HST_RSPQ_UPDATE: 1190 isp_intr_respq(isp); 1191 break; 1192 case ISPR2HST_RSPQ_UPDATE2: 1193 #ifdef ISP_TARGET_MODE 1194 case ISPR2HST_ATIO_RSPQ_UPDATE: 1195 #endif 1196 isp_intr_respq(isp); 1197 /* FALLTHROUGH */ 1198 #ifdef ISP_TARGET_MODE 1199 case ISPR2HST_ATIO_UPDATE: 1200 case ISPR2HST_ATIO_UPDATE2: 1201 isp_intr_atioq(isp); 1202 #endif 1203 break; 1204 default: 1205 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1206 } 1207 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1208 } 1209 1210 static uint32_t 1211 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1212 { 1213 uint16_t rv; 1214 int oldconf = 0; 1215 1216 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1217 /* 1218 * We will assume that someone has paused the RISC processor. 1219 */ 1220 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1221 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1222 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1223 } 1224 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1225 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1226 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1227 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1228 } 1229 return (rv); 1230 } 1231 1232 static void 1233 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1234 { 1235 int oldconf = 0; 1236 1237 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1238 /* 1239 * We will assume that someone has paused the RISC processor. 1240 */ 1241 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1242 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1243 oldconf | BIU_PCI_CONF1_SXP); 1244 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1245 } 1246 BXW2(isp, IspVirt2Off(isp, regoff), val); 1247 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1248 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1249 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1250 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1251 } 1252 1253 } 1254 1255 static uint32_t 1256 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1257 { 1258 uint32_t rv, oc = 0; 1259 1260 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1261 uint32_t tc; 1262 /* 1263 * We will assume that someone has paused the RISC processor. 1264 */ 1265 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1266 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1267 if (regoff & SXP_BANK1_SELECT) 1268 tc |= BIU_PCI1080_CONF1_SXP1; 1269 else 1270 tc |= BIU_PCI1080_CONF1_SXP0; 1271 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1272 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1273 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1274 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1275 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1276 oc | BIU_PCI1080_CONF1_DMA); 1277 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1278 } 1279 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1280 if (oc) { 1281 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1282 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1283 } 1284 return (rv); 1285 } 1286 1287 static void 1288 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1289 { 1290 int oc = 0; 1291 1292 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1293 uint32_t tc; 1294 /* 1295 * We will assume that someone has paused the RISC processor. 1296 */ 1297 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1298 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1299 if (regoff & SXP_BANK1_SELECT) 1300 tc |= BIU_PCI1080_CONF1_SXP1; 1301 else 1302 tc |= BIU_PCI1080_CONF1_SXP0; 1303 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1304 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1305 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1306 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1307 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1308 oc | BIU_PCI1080_CONF1_DMA); 1309 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1310 } 1311 BXW2(isp, IspVirt2Off(isp, regoff), val); 1312 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1313 if (oc) { 1314 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1315 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1316 } 1317 } 1318 1319 static uint32_t 1320 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1321 { 1322 uint32_t rv; 1323 int block = regoff & _BLK_REG_MASK; 1324 1325 switch (block) { 1326 case BIU_BLOCK: 1327 break; 1328 case MBOX_BLOCK: 1329 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1330 case SXP_BLOCK: 1331 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK read at 0x%x", regoff); 1332 return (0xffffffff); 1333 case RISC_BLOCK: 1334 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK read at 0x%x", regoff); 1335 return (0xffffffff); 1336 case DMA_BLOCK: 1337 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK read at 0x%x", regoff); 1338 return (0xffffffff); 1339 default: 1340 isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff); 1341 return (0xffffffff); 1342 } 1343 1344 switch (regoff) { 1345 case BIU2400_FLASH_ADDR: 1346 case BIU2400_FLASH_DATA: 1347 case BIU2400_ICR: 1348 case BIU2400_ISR: 1349 case BIU2400_CSR: 1350 case BIU2400_REQINP: 1351 case BIU2400_REQOUTP: 1352 case BIU2400_RSPINP: 1353 case BIU2400_RSPOUTP: 1354 case BIU2400_PRI_REQINP: 1355 case BIU2400_PRI_REQOUTP: 1356 case BIU2400_ATIO_RSPINP: 1357 case BIU2400_ATIO_RSPOUTP: 1358 case BIU2400_HCCR: 1359 case BIU2400_GPIOD: 1360 case BIU2400_GPIOE: 1361 case BIU2400_HSEMA: 1362 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1363 break; 1364 case BIU2400_R2HSTSLO: 1365 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1366 break; 1367 case BIU2400_R2HSTSHI: 1368 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1369 break; 1370 default: 1371 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1372 regoff); 1373 rv = 0xffffffff; 1374 break; 1375 } 1376 return (rv); 1377 } 1378 1379 static void 1380 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1381 { 1382 int block = regoff & _BLK_REG_MASK; 1383 1384 switch (block) { 1385 case BIU_BLOCK: 1386 break; 1387 case MBOX_BLOCK: 1388 BXW2(isp, IspVirt2Off(isp, regoff), val); 1389 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1390 return; 1391 case SXP_BLOCK: 1392 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK write at 0x%x", regoff); 1393 return; 1394 case RISC_BLOCK: 1395 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK write at 0x%x", regoff); 1396 return; 1397 case DMA_BLOCK: 1398 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK write at 0x%x", regoff); 1399 return; 1400 default: 1401 isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff); 1402 break; 1403 } 1404 1405 switch (regoff) { 1406 case BIU2400_FLASH_ADDR: 1407 case BIU2400_FLASH_DATA: 1408 case BIU2400_ICR: 1409 case BIU2400_ISR: 1410 case BIU2400_CSR: 1411 case BIU2400_REQINP: 1412 case BIU2400_REQOUTP: 1413 case BIU2400_RSPINP: 1414 case BIU2400_RSPOUTP: 1415 case BIU2400_PRI_REQINP: 1416 case BIU2400_PRI_REQOUTP: 1417 case BIU2400_ATIO_RSPINP: 1418 case BIU2400_ATIO_RSPOUTP: 1419 case BIU2400_HCCR: 1420 case BIU2400_GPIOD: 1421 case BIU2400_GPIOE: 1422 case BIU2400_HSEMA: 1423 BXW4(isp, IspVirt2Off(isp, regoff), val); 1424 #ifdef MEMORYBARRIERW 1425 if (regoff == BIU2400_REQINP || 1426 regoff == BIU2400_RSPOUTP || 1427 regoff == BIU2400_PRI_REQINP || 1428 regoff == BIU2400_ATIO_RSPOUTP) 1429 MEMORYBARRIERW(isp, SYNC_REG, 1430 IspVirt2Off(isp, regoff), 4, -1) 1431 else 1432 #endif 1433 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1434 break; 1435 default: 1436 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1437 regoff); 1438 break; 1439 } 1440 } 1441 1442 static uint32_t 1443 isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff) 1444 { 1445 uint32_t rv; 1446 1447 switch (regoff) { 1448 case BIU2400_PRI_REQINP: 1449 case BIU2400_PRI_REQOUTP: 1450 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1451 regoff); 1452 rv = 0xffffffff; 1453 break; 1454 case BIU2400_REQINP: 1455 rv = B2R4(isp, 0x00); 1456 break; 1457 case BIU2400_REQOUTP: 1458 rv = B2R4(isp, 0x04); 1459 break; 1460 case BIU2400_RSPINP: 1461 rv = B2R4(isp, 0x08); 1462 break; 1463 case BIU2400_RSPOUTP: 1464 rv = B2R4(isp, 0x0c); 1465 break; 1466 case BIU2400_ATIO_RSPINP: 1467 rv = B2R4(isp, 0x10); 1468 break; 1469 case BIU2400_ATIO_RSPOUTP: 1470 rv = B2R4(isp, 0x14); 1471 break; 1472 default: 1473 rv = isp_pci_rd_reg_2400(isp, regoff); 1474 break; 1475 } 1476 return (rv); 1477 } 1478 1479 static void 1480 isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val) 1481 { 1482 int off; 1483 1484 switch (regoff) { 1485 case BIU2400_PRI_REQINP: 1486 case BIU2400_PRI_REQOUTP: 1487 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1488 regoff); 1489 return; 1490 case BIU2400_REQINP: 1491 off = 0x00; 1492 break; 1493 case BIU2400_REQOUTP: 1494 off = 0x04; 1495 break; 1496 case BIU2400_RSPINP: 1497 off = 0x08; 1498 break; 1499 case BIU2400_RSPOUTP: 1500 off = 0x0c; 1501 break; 1502 case BIU2400_ATIO_RSPINP: 1503 off = 0x10; 1504 break; 1505 case BIU2400_ATIO_RSPOUTP: 1506 off = 0x14; 1507 break; 1508 default: 1509 isp_pci_wr_reg_2400(isp, regoff, val); 1510 return; 1511 } 1512 B2W4(isp, off, val); 1513 } 1514 1515 1516 struct imush { 1517 bus_addr_t maddr; 1518 int error; 1519 }; 1520 1521 static void 1522 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1523 { 1524 struct imush *imushp = (struct imush *) arg; 1525 1526 if (!(imushp->error = error)) 1527 imushp->maddr = segs[0].ds_addr; 1528 } 1529 1530 static int 1531 isp_pci_mbxdma(ispsoftc_t *isp) 1532 { 1533 caddr_t base; 1534 uint32_t len, nsegs; 1535 int i, error, cmap = 0; 1536 bus_size_t slim; /* segment size */ 1537 bus_addr_t llim; /* low limit of unavailable dma */ 1538 bus_addr_t hlim; /* high limit of unavailable dma */ 1539 struct imush im; 1540 isp_ecmd_t *ecmd; 1541 1542 /* Already been here? If so, leave... */ 1543 if (isp->isp_xflist != NULL) 1544 return (0); 1545 if (isp->isp_rquest != NULL && isp->isp_maxcmds == 0) 1546 return (0); 1547 ISP_UNLOCK(isp); 1548 if (isp->isp_rquest != NULL) 1549 goto gotmaxcmds; 1550 1551 hlim = BUS_SPACE_MAXADDR; 1552 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1553 if (sizeof (bus_size_t) > 4) 1554 slim = (bus_size_t) (1ULL << 32); 1555 else 1556 slim = (bus_size_t) (1UL << 31); 1557 llim = BUS_SPACE_MAXADDR; 1558 } else { 1559 slim = (1UL << 24); 1560 llim = BUS_SPACE_MAXADDR_32BIT; 1561 } 1562 if (isp->isp_osinfo.sixtyfourbit) 1563 nsegs = ISP_NSEG64_MAX; 1564 else 1565 nsegs = ISP_NSEG_MAX; 1566 1567 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, 1568 slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, 1569 &isp->isp_osinfo.dmat)) { 1570 ISP_LOCK(isp); 1571 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1572 return (1); 1573 } 1574 1575 /* 1576 * Allocate and map the request queue and a region for external 1577 * DMA addressable command/status structures (22XX and later). 1578 */ 1579 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1580 if (isp->isp_type >= ISP_HA_FC_2200) 1581 len += (N_XCMDS * XCMD_SIZE); 1582 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1583 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1584 len, 1, len, 0, &isp->isp_osinfo.reqdmat)) { 1585 isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag"); 1586 goto bad; 1587 } 1588 if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base, 1589 BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) { 1590 isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory"); 1591 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1592 goto bad; 1593 } 1594 isp->isp_rquest = base; 1595 im.error = 0; 1596 if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap, 1597 base, len, imc, &im, 0) || im.error) { 1598 isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error); 1599 goto bad; 1600 } 1601 isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx", 1602 (uintmax_t)im.maddr, (uintmax_t)len); 1603 isp->isp_rquest_dma = im.maddr; 1604 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1605 im.maddr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1606 if (isp->isp_type >= ISP_HA_FC_2200) { 1607 isp->isp_osinfo.ecmd_dma = im.maddr; 1608 isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base; 1609 isp->isp_osinfo.ecmd_base = isp->isp_osinfo.ecmd_free; 1610 for (ecmd = isp->isp_osinfo.ecmd_free; 1611 ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { 1612 if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) 1613 ecmd->next = NULL; 1614 else 1615 ecmd->next = ecmd + 1; 1616 } 1617 } 1618 1619 /* 1620 * Allocate and map the result queue. 1621 */ 1622 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1623 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1624 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1625 len, 1, len, 0, &isp->isp_osinfo.respdmat)) { 1626 isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag"); 1627 goto bad; 1628 } 1629 if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base, 1630 BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) { 1631 isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory"); 1632 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1633 goto bad; 1634 } 1635 isp->isp_result = base; 1636 im.error = 0; 1637 if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap, 1638 base, len, imc, &im, 0) || im.error) { 1639 isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error); 1640 goto bad; 1641 } 1642 isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx", 1643 (uintmax_t)im.maddr, (uintmax_t)len); 1644 isp->isp_result_dma = im.maddr; 1645 1646 #ifdef ISP_TARGET_MODE 1647 /* 1648 * Allocate and map ATIO queue on 24xx with target mode. 1649 */ 1650 if (IS_24XX(isp)) { 1651 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1652 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1653 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1654 len, 1, len, 0, &isp->isp_osinfo.atiodmat)) { 1655 isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag"); 1656 goto bad; 1657 } 1658 if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base, 1659 BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) { 1660 isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory"); 1661 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1662 goto bad; 1663 } 1664 isp->isp_atioq = base; 1665 im.error = 0; 1666 if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap, 1667 base, len, imc, &im, 0) || im.error) { 1668 isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error); 1669 goto bad; 1670 } 1671 isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx", 1672 (uintmax_t)im.maddr, (uintmax_t)len); 1673 isp->isp_atioq_dma = im.maddr; 1674 } 1675 #endif 1676 1677 if (IS_FC(isp)) { 1678 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1679 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1680 2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, &isp->isp_osinfo.iocbdmat)) { 1681 goto bad; 1682 } 1683 if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat, 1684 (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0) 1685 goto bad; 1686 isp->isp_iocb = base; 1687 im.error = 0; 1688 if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, 1689 base, 2*QENTRY_LEN, imc, &im, 0) || im.error) 1690 goto bad; 1691 isp->isp_iocb_dma = im.maddr; 1692 1693 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1694 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1695 ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, &isp->isp_osinfo.scdmat)) 1696 goto bad; 1697 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1698 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1699 if (bus_dmamem_alloc(isp->isp_osinfo.scdmat, 1700 (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0) 1701 goto bad; 1702 FCPARAM(isp, cmap)->isp_scratch = base; 1703 im.error = 0; 1704 if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap, 1705 base, ISP_FC_SCRLEN, imc, &im, 0) || im.error) { 1706 bus_dmamem_free(isp->isp_osinfo.scdmat, 1707 base, fc->scmap); 1708 FCPARAM(isp, cmap)->isp_scratch = NULL; 1709 goto bad; 1710 } 1711 FCPARAM(isp, cmap)->isp_scdma = im.maddr; 1712 if (!IS_2100(isp)) { 1713 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { 1714 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); 1715 if (n == NULL) { 1716 while (fc->nexus_free_list) { 1717 n = fc->nexus_free_list; 1718 fc->nexus_free_list = n->next; 1719 free(n, M_DEVBUF); 1720 } 1721 goto bad; 1722 } 1723 n->next = fc->nexus_free_list; 1724 fc->nexus_free_list = n; 1725 } 1726 } 1727 } 1728 } 1729 1730 if (isp->isp_maxcmds == 0) { 1731 ISP_LOCK(isp); 1732 return (0); 1733 } 1734 1735 gotmaxcmds: 1736 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1737 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) 1738 malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1739 for (i = 0; i < isp->isp_maxcmds; i++) { 1740 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1741 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1742 if (error) { 1743 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1744 while (--i >= 0) { 1745 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1746 isp->isp_osinfo.pcmd_pool[i].dmap); 1747 } 1748 goto bad; 1749 } 1750 callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); 1751 if (i == isp->isp_maxcmds-1) 1752 pcmd->next = NULL; 1753 else 1754 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1755 } 1756 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1757 1758 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1759 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1760 for (len = 0; len < isp->isp_maxcmds - 1; len++) 1761 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1762 isp->isp_xffree = isp->isp_xflist; 1763 1764 ISP_LOCK(isp); 1765 return (0); 1766 1767 bad: 1768 isp_pci_mbxdmafree(isp); 1769 ISP_LOCK(isp); 1770 return (1); 1771 } 1772 1773 static void 1774 isp_pci_mbxdmafree(ispsoftc_t *isp) 1775 { 1776 int i; 1777 1778 if (isp->isp_xflist != NULL) { 1779 free(isp->isp_xflist, M_DEVBUF); 1780 isp->isp_xflist = NULL; 1781 } 1782 if (isp->isp_osinfo.pcmd_pool != NULL) { 1783 for (i = 0; i < isp->isp_maxcmds; i++) { 1784 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1785 isp->isp_osinfo.pcmd_pool[i].dmap); 1786 } 1787 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1788 isp->isp_osinfo.pcmd_pool = NULL; 1789 } 1790 if (IS_FC(isp)) { 1791 for (i = 0; i < isp->isp_nchan; i++) { 1792 struct isp_fc *fc = ISP_FC_PC(isp, i); 1793 if (FCPARAM(isp, i)->isp_scdma != 0) { 1794 bus_dmamap_unload(isp->isp_osinfo.scdmat, 1795 fc->scmap); 1796 FCPARAM(isp, i)->isp_scdma = 0; 1797 } 1798 if (FCPARAM(isp, i)->isp_scratch != NULL) { 1799 bus_dmamem_free(isp->isp_osinfo.scdmat, 1800 FCPARAM(isp, i)->isp_scratch, fc->scmap); 1801 FCPARAM(isp, i)->isp_scratch = NULL; 1802 } 1803 while (fc->nexus_free_list) { 1804 struct isp_nexus *n = fc->nexus_free_list; 1805 fc->nexus_free_list = n->next; 1806 free(n, M_DEVBUF); 1807 } 1808 } 1809 if (isp->isp_iocb_dma != 0) { 1810 bus_dma_tag_destroy(isp->isp_osinfo.scdmat); 1811 bus_dmamap_unload(isp->isp_osinfo.iocbdmat, 1812 isp->isp_osinfo.iocbmap); 1813 isp->isp_iocb_dma = 0; 1814 } 1815 if (isp->isp_iocb != NULL) { 1816 bus_dmamem_free(isp->isp_osinfo.iocbdmat, 1817 isp->isp_iocb, isp->isp_osinfo.iocbmap); 1818 bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat); 1819 } 1820 } 1821 #ifdef ISP_TARGET_MODE 1822 if (IS_24XX(isp)) { 1823 if (isp->isp_atioq_dma != 0) { 1824 bus_dmamap_unload(isp->isp_osinfo.atiodmat, 1825 isp->isp_osinfo.atiomap); 1826 isp->isp_atioq_dma = 0; 1827 } 1828 if (isp->isp_atioq != NULL) { 1829 bus_dmamem_free(isp->isp_osinfo.atiodmat, isp->isp_atioq, 1830 isp->isp_osinfo.atiomap); 1831 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1832 isp->isp_atioq = NULL; 1833 } 1834 } 1835 #endif 1836 if (isp->isp_result_dma != 0) { 1837 bus_dmamap_unload(isp->isp_osinfo.respdmat, 1838 isp->isp_osinfo.respmap); 1839 isp->isp_result_dma = 0; 1840 } 1841 if (isp->isp_result != NULL) { 1842 bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result, 1843 isp->isp_osinfo.respmap); 1844 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1845 isp->isp_result = NULL; 1846 } 1847 if (isp->isp_rquest_dma != 0) { 1848 bus_dmamap_unload(isp->isp_osinfo.reqdmat, 1849 isp->isp_osinfo.reqmap); 1850 isp->isp_rquest_dma = 0; 1851 } 1852 if (isp->isp_rquest != NULL) { 1853 bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest, 1854 isp->isp_osinfo.reqmap); 1855 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1856 isp->isp_rquest = NULL; 1857 } 1858 } 1859 1860 typedef struct { 1861 ispsoftc_t *isp; 1862 void *cmd_token; 1863 void *rq; /* original request */ 1864 int error; 1865 bus_size_t mapsize; 1866 } mush_t; 1867 1868 #define MUSHERR_NOQENTRIES -2 1869 1870 #ifdef ISP_TARGET_MODE 1871 static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1872 static void tdma2(void *, bus_dma_segment_t *, int, int); 1873 1874 static void 1875 tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1876 { 1877 mush_t *mp; 1878 mp = (mush_t *)arg; 1879 mp->mapsize = mapsize; 1880 tdma2(arg, dm_segs, nseg, error); 1881 } 1882 1883 static void 1884 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1885 { 1886 mush_t *mp; 1887 ispsoftc_t *isp; 1888 struct ccb_scsiio *csio; 1889 isp_ddir_t ddir; 1890 ispreq_t *rq; 1891 1892 mp = (mush_t *) arg; 1893 if (error) { 1894 mp->error = error; 1895 return; 1896 } 1897 csio = mp->cmd_token; 1898 isp = mp->isp; 1899 rq = mp->rq; 1900 if (nseg) { 1901 if (isp->isp_osinfo.sixtyfourbit) { 1902 if (nseg >= ISP_NSEG64_MAX) { 1903 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1904 mp->error = EFAULT; 1905 return; 1906 } 1907 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1908 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1909 } 1910 } else { 1911 if (nseg >= ISP_NSEG_MAX) { 1912 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1913 mp->error = EFAULT; 1914 return; 1915 } 1916 } 1917 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1918 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1919 ddir = ISP_TO_DEVICE; 1920 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1921 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1922 ddir = ISP_FROM_DEVICE; 1923 } else { 1924 dm_segs = NULL; 1925 nseg = 0; 1926 ddir = ISP_NOXFR; 1927 } 1928 } else { 1929 dm_segs = NULL; 1930 nseg = 0; 1931 ddir = ISP_NOXFR; 1932 } 1933 1934 error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len); 1935 switch (error) { 1936 case CMD_EAGAIN: 1937 mp->error = MUSHERR_NOQENTRIES; 1938 case CMD_QUEUED: 1939 break; 1940 default: 1941 mp->error = EIO; 1942 } 1943 } 1944 #endif 1945 1946 static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1947 static void dma2(void *, bus_dma_segment_t *, int, int); 1948 1949 static void 1950 dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1951 { 1952 mush_t *mp; 1953 mp = (mush_t *)arg; 1954 mp->mapsize = mapsize; 1955 dma2(arg, dm_segs, nseg, error); 1956 } 1957 1958 static void 1959 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1960 { 1961 mush_t *mp; 1962 ispsoftc_t *isp; 1963 struct ccb_scsiio *csio; 1964 isp_ddir_t ddir; 1965 ispreq_t *rq; 1966 1967 mp = (mush_t *) arg; 1968 if (error) { 1969 mp->error = error; 1970 return; 1971 } 1972 csio = mp->cmd_token; 1973 isp = mp->isp; 1974 rq = mp->rq; 1975 if (nseg) { 1976 if (isp->isp_osinfo.sixtyfourbit) { 1977 if (nseg >= ISP_NSEG64_MAX) { 1978 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1979 mp->error = EFAULT; 1980 return; 1981 } 1982 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1983 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1984 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1985 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1986 } 1987 } else { 1988 if (nseg >= ISP_NSEG_MAX) { 1989 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1990 mp->error = EFAULT; 1991 return; 1992 } 1993 } 1994 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1995 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1996 ddir = ISP_FROM_DEVICE; 1997 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1998 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1999 ddir = ISP_TO_DEVICE; 2000 } else { 2001 ddir = ISP_NOXFR; 2002 } 2003 } else { 2004 dm_segs = NULL; 2005 nseg = 0; 2006 ddir = ISP_NOXFR; 2007 } 2008 2009 error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map); 2010 switch (error) { 2011 case CMD_EAGAIN: 2012 mp->error = MUSHERR_NOQENTRIES; 2013 break; 2014 case CMD_QUEUED: 2015 break; 2016 default: 2017 mp->error = EIO; 2018 break; 2019 } 2020 } 2021 2022 static int 2023 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 2024 { 2025 mush_t mush, *mp; 2026 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2027 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); 2028 int error; 2029 2030 mp = &mush; 2031 mp->isp = isp; 2032 mp->cmd_token = csio; 2033 mp->rq = ff; 2034 mp->error = 0; 2035 mp->mapsize = 0; 2036 2037 #ifdef ISP_TARGET_MODE 2038 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2039 eptr = tdma2; 2040 eptr2 = tdma2_2; 2041 } else 2042 #endif 2043 { 2044 eptr = dma2; 2045 eptr2 = dma2_2; 2046 } 2047 2048 2049 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 2050 (union ccb *)csio, eptr, mp, 0); 2051 if (error == EINPROGRESS) { 2052 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 2053 mp->error = EINVAL; 2054 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 2055 } else if (error && mp->error == 0) { 2056 #ifdef DIAGNOSTIC 2057 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 2058 #endif 2059 mp->error = error; 2060 } 2061 if (mp->error) { 2062 int retval = CMD_COMPLETE; 2063 if (mp->error == MUSHERR_NOQENTRIES) { 2064 retval = CMD_EAGAIN; 2065 } else if (mp->error == EFBIG) { 2066 csio->ccb_h.status = CAM_REQ_TOO_BIG; 2067 } else if (mp->error == EINVAL) { 2068 csio->ccb_h.status = CAM_REQ_INVALID; 2069 } else { 2070 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 2071 } 2072 return (retval); 2073 } 2074 return (CMD_QUEUED); 2075 } 2076 2077 static int 2078 isp_pci_irqsetup(ispsoftc_t *isp) 2079 { 2080 2081 return (0); 2082 } 2083 2084 static void 2085 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2086 { 2087 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2088 if (msg) 2089 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2090 else 2091 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2092 if (IS_SCSI(isp)) 2093 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2094 else 2095 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2096 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2097 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2098 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2099 2100 2101 if (IS_SCSI(isp)) { 2102 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2103 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2104 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2105 ISP_READ(isp, CDMA_FIFO_STS)); 2106 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2107 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2108 ISP_READ(isp, DDMA_FIFO_STS)); 2109 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2110 ISP_READ(isp, SXP_INTERRUPT), 2111 ISP_READ(isp, SXP_GROSS_ERR), 2112 ISP_READ(isp, SXP_PINS_CTRL)); 2113 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2114 } 2115 printf(" mbox regs: %x %x %x %x %x\n", 2116 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2117 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2118 ISP_READ(isp, OUTMAILBOX4)); 2119 printf(" PCI Status Command/Status=%x\n", 2120 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2121 } 2122