1 /*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/bus.h> 40 #include <sys/stdint.h> 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 #include <sys/uio.h> 48 49 #ifdef __sparc64__ 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62 static int isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 63 static int isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 64 static int isp_pci_rd_isr_2400(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 65 static int isp_pci_mbxdma(ispsoftc_t *); 66 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 67 68 69 static void isp_pci_reset0(ispsoftc_t *); 70 static void isp_pci_reset1(ispsoftc_t *); 71 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 72 73 static struct ispmdvec mdvec = { 74 isp_pci_rd_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_common_dmateardown, 80 isp_pci_reset0, 81 isp_pci_reset1, 82 isp_pci_dumpregs, 83 NULL, 84 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 85 }; 86 87 static struct ispmdvec mdvec_1080 = { 88 isp_pci_rd_isr, 89 isp_pci_rd_reg_1080, 90 isp_pci_wr_reg_1080, 91 isp_pci_mbxdma, 92 isp_pci_dmasetup, 93 isp_common_dmateardown, 94 isp_pci_reset0, 95 isp_pci_reset1, 96 isp_pci_dumpregs, 97 NULL, 98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 99 }; 100 101 static struct ispmdvec mdvec_12160 = { 102 isp_pci_rd_isr, 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_common_dmateardown, 108 isp_pci_reset0, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 NULL, 112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 113 }; 114 115 static struct ispmdvec mdvec_2100 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg, 118 isp_pci_wr_reg, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_common_dmateardown, 122 isp_pci_reset0, 123 isp_pci_reset1, 124 isp_pci_dumpregs 125 }; 126 127 static struct ispmdvec mdvec_2200 = { 128 isp_pci_rd_isr, 129 isp_pci_rd_reg, 130 isp_pci_wr_reg, 131 isp_pci_mbxdma, 132 isp_pci_dmasetup, 133 isp_common_dmateardown, 134 isp_pci_reset0, 135 isp_pci_reset1, 136 isp_pci_dumpregs 137 }; 138 139 static struct ispmdvec mdvec_2300 = { 140 isp_pci_rd_isr_2300, 141 isp_pci_rd_reg, 142 isp_pci_wr_reg, 143 isp_pci_mbxdma, 144 isp_pci_dmasetup, 145 isp_common_dmateardown, 146 isp_pci_reset0, 147 isp_pci_reset1, 148 isp_pci_dumpregs 149 }; 150 151 static struct ispmdvec mdvec_2400 = { 152 isp_pci_rd_isr_2400, 153 isp_pci_rd_reg_2400, 154 isp_pci_wr_reg_2400, 155 isp_pci_mbxdma, 156 isp_pci_dmasetup, 157 isp_common_dmateardown, 158 isp_pci_reset0, 159 isp_pci_reset1, 160 NULL 161 }; 162 163 static struct ispmdvec mdvec_2500 = { 164 isp_pci_rd_isr_2400, 165 isp_pci_rd_reg_2400, 166 isp_pci_wr_reg_2400, 167 isp_pci_mbxdma, 168 isp_pci_dmasetup, 169 isp_common_dmateardown, 170 isp_pci_reset0, 171 isp_pci_reset1, 172 NULL 173 }; 174 175 #ifndef PCIM_CMD_INVEN 176 #define PCIM_CMD_INVEN 0x10 177 #endif 178 #ifndef PCIM_CMD_BUSMASTEREN 179 #define PCIM_CMD_BUSMASTEREN 0x0004 180 #endif 181 #ifndef PCIM_CMD_PERRESPEN 182 #define PCIM_CMD_PERRESPEN 0x0040 183 #endif 184 #ifndef PCIM_CMD_SEREN 185 #define PCIM_CMD_SEREN 0x0100 186 #endif 187 #ifndef PCIM_CMD_INTX_DISABLE 188 #define PCIM_CMD_INTX_DISABLE 0x0400 189 #endif 190 191 #ifndef PCIR_COMMAND 192 #define PCIR_COMMAND 0x04 193 #endif 194 195 #ifndef PCIR_CACHELNSZ 196 #define PCIR_CACHELNSZ 0x0c 197 #endif 198 199 #ifndef PCIR_LATTIMER 200 #define PCIR_LATTIMER 0x0d 201 #endif 202 203 #ifndef PCIR_ROMADDR 204 #define PCIR_ROMADDR 0x30 205 #endif 206 207 #ifndef PCI_VENDOR_QLOGIC 208 #define PCI_VENDOR_QLOGIC 0x1077 209 #endif 210 211 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 212 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 213 #endif 214 215 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 216 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 217 #endif 218 219 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 220 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 221 #endif 222 223 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 224 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 225 #endif 226 227 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 228 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 229 #endif 230 231 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 232 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 233 #endif 234 235 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 236 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 237 #endif 238 239 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 240 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 241 #endif 242 243 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 244 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 245 #endif 246 247 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 248 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 249 #endif 250 251 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 252 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 253 #endif 254 255 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 256 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 257 #endif 258 259 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 260 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 261 #endif 262 263 #ifndef PCI_PRODUCT_QLOGIC_ISP2532 264 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 265 #endif 266 267 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 268 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 269 #endif 270 271 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 272 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 273 #endif 274 275 #ifndef PCI_PRODUCT_QLOGIC_ISP5432 276 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 277 #endif 278 279 #define PCI_QLOGIC_ISP5432 \ 280 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 281 282 #define PCI_QLOGIC_ISP1020 \ 283 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 284 285 #define PCI_QLOGIC_ISP1080 \ 286 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 287 288 #define PCI_QLOGIC_ISP10160 \ 289 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 290 291 #define PCI_QLOGIC_ISP12160 \ 292 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 293 294 #define PCI_QLOGIC_ISP1240 \ 295 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 296 297 #define PCI_QLOGIC_ISP1280 \ 298 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 299 300 #define PCI_QLOGIC_ISP2100 \ 301 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 302 303 #define PCI_QLOGIC_ISP2200 \ 304 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 305 306 #define PCI_QLOGIC_ISP2300 \ 307 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 308 309 #define PCI_QLOGIC_ISP2312 \ 310 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 311 312 #define PCI_QLOGIC_ISP2322 \ 313 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 314 315 #define PCI_QLOGIC_ISP2422 \ 316 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 317 318 #define PCI_QLOGIC_ISP2432 \ 319 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 320 321 #define PCI_QLOGIC_ISP2532 \ 322 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 323 324 #define PCI_QLOGIC_ISP6312 \ 325 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 326 327 #define PCI_QLOGIC_ISP6322 \ 328 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 329 330 /* 331 * Odd case for some AMI raid cards... We need to *not* attach to this. 332 */ 333 #define AMI_RAID_SUBVENDOR_ID 0x101e 334 335 #define IO_MAP_REG 0x10 336 #define MEM_MAP_REG 0x14 337 338 #define PCI_DFLT_LTNCY 0x40 339 #define PCI_DFLT_LNSZ 0x10 340 341 static int isp_pci_probe (device_t); 342 static int isp_pci_attach (device_t); 343 static int isp_pci_detach (device_t); 344 345 346 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 347 struct isp_pcisoftc { 348 ispsoftc_t pci_isp; 349 device_t pci_dev; 350 struct resource * regs; 351 void * irq; 352 int iqd; 353 int rtp; 354 int rgd; 355 void * ih; 356 int16_t pci_poff[_NREG_BLKS]; 357 bus_dma_tag_t dmat; 358 int msicount; 359 }; 360 361 362 static device_method_t isp_pci_methods[] = { 363 /* Device interface */ 364 DEVMETHOD(device_probe, isp_pci_probe), 365 DEVMETHOD(device_attach, isp_pci_attach), 366 DEVMETHOD(device_detach, isp_pci_detach), 367 { 0, 0 } 368 }; 369 370 static driver_t isp_pci_driver = { 371 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 372 }; 373 static devclass_t isp_devclass; 374 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 375 MODULE_DEPEND(isp, cam, 1, 1, 1); 376 MODULE_DEPEND(isp, firmware, 1, 1, 1); 377 static int isp_nvports = 0; 378 379 static int 380 isp_pci_probe(device_t dev) 381 { 382 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 383 case PCI_QLOGIC_ISP1020: 384 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 385 break; 386 case PCI_QLOGIC_ISP1080: 387 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 388 break; 389 case PCI_QLOGIC_ISP1240: 390 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 391 break; 392 case PCI_QLOGIC_ISP1280: 393 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 394 break; 395 case PCI_QLOGIC_ISP10160: 396 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 397 break; 398 case PCI_QLOGIC_ISP12160: 399 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 400 return (ENXIO); 401 } 402 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 403 break; 404 case PCI_QLOGIC_ISP2100: 405 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 406 break; 407 case PCI_QLOGIC_ISP2200: 408 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 409 break; 410 case PCI_QLOGIC_ISP2300: 411 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 412 break; 413 case PCI_QLOGIC_ISP2312: 414 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 415 break; 416 case PCI_QLOGIC_ISP2322: 417 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 418 break; 419 case PCI_QLOGIC_ISP2422: 420 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 421 break; 422 case PCI_QLOGIC_ISP2432: 423 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 424 break; 425 case PCI_QLOGIC_ISP2532: 426 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 427 break; 428 case PCI_QLOGIC_ISP5432: 429 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 430 break; 431 case PCI_QLOGIC_ISP6312: 432 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 433 break; 434 case PCI_QLOGIC_ISP6322: 435 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 436 break; 437 default: 438 return (ENXIO); 439 } 440 if (isp_announced == 0 && bootverbose) { 441 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 442 "Core Version %d.%d\n", 443 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 444 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 445 isp_announced++; 446 } 447 /* 448 * XXXX: Here is where we might load the f/w module 449 * XXXX: (or increase a reference count to it). 450 */ 451 return (BUS_PROBE_DEFAULT); 452 } 453 454 static void 455 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 456 { 457 int tval; 458 459 tval = 0; 460 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 461 isp->isp_confopts |= ISP_CFG_NORELOAD; 462 } 463 tval = 0; 464 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 465 isp->isp_confopts |= ISP_CFG_NONVRAM; 466 } 467 tval = 0; 468 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 469 if (tval) { 470 isp->isp_dblev = tval; 471 } else { 472 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 473 } 474 if (bootverbose) { 475 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 476 } 477 tval = -1; 478 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 479 if (tval > 0 && tval <= 254) { 480 isp_nvports = tval; 481 } 482 tval = 7; 483 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 484 isp_quickboot_time = tval; 485 } 486 487 static void 488 isp_get_pci_options(device_t dev, int *m1, int *m2) 489 { 490 int tval; 491 /* 492 * Which we should try first - memory mapping or i/o mapping? 493 * 494 * We used to try memory first followed by i/o on alpha, otherwise 495 * the reverse, but we should just try memory first all the time now. 496 */ 497 *m1 = PCIM_CMD_MEMEN; 498 *m2 = PCIM_CMD_PORTEN; 499 500 tval = 0; 501 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &tval) == 0 && tval != 0) { 502 *m1 = PCIM_CMD_PORTEN; 503 *m2 = PCIM_CMD_MEMEN; 504 } 505 tval = 0; 506 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_memmap", &tval) == 0 && tval != 0) { 507 *m1 = PCIM_CMD_MEMEN; 508 *m2 = PCIM_CMD_PORTEN; 509 } 510 } 511 512 static void 513 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 514 { 515 const char *sptr; 516 int tval = 0; 517 char prefix[12], name[16]; 518 519 if (chan == 0) 520 prefix[0] = 0; 521 else 522 snprintf(prefix, sizeof(prefix), "chan%d.", chan); 523 snprintf(name, sizeof(name), "%siid", prefix); 524 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 525 name, &tval)) { 526 if (IS_FC(isp)) { 527 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 528 } else { 529 #ifdef __sparc64__ 530 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 531 #else 532 ISP_SPI_PC(isp, chan)->iid = 7; 533 #endif 534 } 535 } else { 536 if (IS_FC(isp)) { 537 ISP_FC_PC(isp, chan)->default_id = tval - chan; 538 } else { 539 ISP_SPI_PC(isp, chan)->iid = tval; 540 } 541 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 542 } 543 544 if (IS_SCSI(isp)) 545 return; 546 547 tval = -1; 548 snprintf(name, sizeof(name), "%srole", prefix); 549 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 550 name, &tval) == 0) { 551 switch (tval) { 552 case ISP_ROLE_NONE: 553 case ISP_ROLE_INITIATOR: 554 case ISP_ROLE_TARGET: 555 case ISP_ROLE_BOTH: 556 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); 557 break; 558 default: 559 tval = -1; 560 break; 561 } 562 } 563 if (tval == -1) { 564 tval = ISP_DEFAULT_ROLES; 565 } 566 ISP_FC_PC(isp, chan)->def_role = tval; 567 568 tval = 0; 569 snprintf(name, sizeof(name), "%sfullduplex", prefix); 570 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 571 name, &tval) == 0 && tval != 0) { 572 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 573 } 574 sptr = 0; 575 snprintf(name, sizeof(name), "%stopology", prefix); 576 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 577 name, (const char **) &sptr) == 0 && sptr != 0) { 578 if (strcmp(sptr, "lport") == 0) { 579 isp->isp_confopts |= ISP_CFG_LPORT; 580 } else if (strcmp(sptr, "nport") == 0) { 581 isp->isp_confopts |= ISP_CFG_NPORT; 582 } else if (strcmp(sptr, "lport-only") == 0) { 583 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 584 } else if (strcmp(sptr, "nport-only") == 0) { 585 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 586 } 587 } 588 589 tval = 0; 590 snprintf(name, sizeof(name), "%snofctape", prefix); 591 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 592 name, &tval); 593 if (tval) { 594 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 595 } 596 597 tval = 0; 598 snprintf(name, sizeof(name), "%sfctape", prefix); 599 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 600 name, &tval); 601 if (tval) { 602 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; 603 isp->isp_confopts |= ISP_CFG_FCTAPE; 604 } 605 606 607 /* 608 * Because the resource_*_value functions can neither return 609 * 64 bit integer values, nor can they be directly coerced 610 * to interpret the right hand side of the assignment as 611 * you want them to interpret it, we have to force WWN 612 * hint replacement to specify WWN strings with a leading 613 * 'w' (e..g w50000000aaaa0001). Sigh. 614 */ 615 sptr = 0; 616 snprintf(name, sizeof(name), "%sportwwn", prefix); 617 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 618 name, (const char **) &sptr); 619 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 620 char *eptr = 0; 621 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 622 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 623 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 624 ISP_FC_PC(isp, chan)->def_wwpn = 0; 625 } 626 } 627 628 sptr = 0; 629 snprintf(name, sizeof(name), "%snodewwn", prefix); 630 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 631 name, (const char **) &sptr); 632 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 633 char *eptr = 0; 634 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 635 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 636 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 637 ISP_FC_PC(isp, chan)->def_wwnn = 0; 638 } 639 } 640 641 tval = -1; 642 snprintf(name, sizeof(name), "%sloop_down_limit", prefix); 643 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 644 name, &tval); 645 if (tval >= 0 && tval < 0xffff) { 646 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 647 } else { 648 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 649 } 650 651 tval = -1; 652 snprintf(name, sizeof(name), "%sgone_device_time", prefix); 653 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 654 name, &tval); 655 if (tval >= 0 && tval < 0xffff) { 656 ISP_FC_PC(isp, chan)->gone_device_time = tval; 657 } else { 658 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 659 } 660 } 661 662 static int 663 isp_pci_attach(device_t dev) 664 { 665 int i, m1, m2, locksetup = 0; 666 uint32_t data, cmd, linesz, did; 667 struct isp_pcisoftc *pcs; 668 ispsoftc_t *isp; 669 size_t psize, xsize; 670 char fwname[32]; 671 672 pcs = device_get_softc(dev); 673 if (pcs == NULL) { 674 device_printf(dev, "cannot get softc\n"); 675 return (ENOMEM); 676 } 677 memset(pcs, 0, sizeof (*pcs)); 678 679 pcs->pci_dev = dev; 680 isp = &pcs->pci_isp; 681 isp->isp_dev = dev; 682 isp->isp_nchan = 1; 683 if (sizeof (bus_addr_t) > 4) 684 isp->isp_osinfo.sixtyfourbit = 1; 685 686 /* 687 * Get Generic Options 688 */ 689 isp_nvports = 0; 690 isp_get_generic_options(dev, isp); 691 692 /* 693 * Get PCI options- which in this case are just mapping preferences. 694 */ 695 isp_get_pci_options(dev, &m1, &m2); 696 697 linesz = PCI_DFLT_LNSZ; 698 pcs->irq = pcs->regs = NULL; 699 pcs->rgd = pcs->rtp = pcs->iqd = 0; 700 701 pcs->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 702 pcs->rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 703 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); 704 if (pcs->regs == NULL) { 705 pcs->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 706 pcs->rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 707 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, RF_ACTIVE); 708 } 709 if (pcs->regs == NULL) { 710 device_printf(dev, "unable to map any ports\n"); 711 goto bad; 712 } 713 if (bootverbose) { 714 device_printf(dev, "using %s space register mapping\n", (pcs->rgd == IO_MAP_REG)? "I/O" : "Memory"); 715 } 716 isp->isp_bus_tag = rman_get_bustag(pcs->regs); 717 isp->isp_bus_handle = rman_get_bushandle(pcs->regs); 718 719 pcs->pci_dev = dev; 720 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 721 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 722 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 723 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 724 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 725 726 switch (pci_get_devid(dev)) { 727 case PCI_QLOGIC_ISP1020: 728 did = 0x1040; 729 isp->isp_mdvec = &mdvec; 730 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 731 break; 732 case PCI_QLOGIC_ISP1080: 733 did = 0x1080; 734 isp->isp_mdvec = &mdvec_1080; 735 isp->isp_type = ISP_HA_SCSI_1080; 736 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 737 break; 738 case PCI_QLOGIC_ISP1240: 739 did = 0x1080; 740 isp->isp_mdvec = &mdvec_1080; 741 isp->isp_type = ISP_HA_SCSI_1240; 742 isp->isp_nchan = 2; 743 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 744 break; 745 case PCI_QLOGIC_ISP1280: 746 did = 0x1080; 747 isp->isp_mdvec = &mdvec_1080; 748 isp->isp_type = ISP_HA_SCSI_1280; 749 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 750 break; 751 case PCI_QLOGIC_ISP10160: 752 did = 0x12160; 753 isp->isp_mdvec = &mdvec_12160; 754 isp->isp_type = ISP_HA_SCSI_10160; 755 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 756 break; 757 case PCI_QLOGIC_ISP12160: 758 did = 0x12160; 759 isp->isp_nchan = 2; 760 isp->isp_mdvec = &mdvec_12160; 761 isp->isp_type = ISP_HA_SCSI_12160; 762 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 763 break; 764 case PCI_QLOGIC_ISP2100: 765 did = 0x2100; 766 isp->isp_mdvec = &mdvec_2100; 767 isp->isp_type = ISP_HA_FC_2100; 768 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 769 if (pci_get_revid(dev) < 3) { 770 /* 771 * XXX: Need to get the actual revision 772 * XXX: number of the 2100 FB. At any rate, 773 * XXX: lower cache line size for early revision 774 * XXX; boards. 775 */ 776 linesz = 1; 777 } 778 break; 779 case PCI_QLOGIC_ISP2200: 780 did = 0x2200; 781 isp->isp_mdvec = &mdvec_2200; 782 isp->isp_type = ISP_HA_FC_2200; 783 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 784 break; 785 case PCI_QLOGIC_ISP2300: 786 did = 0x2300; 787 isp->isp_mdvec = &mdvec_2300; 788 isp->isp_type = ISP_HA_FC_2300; 789 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 790 break; 791 case PCI_QLOGIC_ISP2312: 792 case PCI_QLOGIC_ISP6312: 793 did = 0x2300; 794 isp->isp_mdvec = &mdvec_2300; 795 isp->isp_type = ISP_HA_FC_2312; 796 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 797 break; 798 case PCI_QLOGIC_ISP2322: 799 case PCI_QLOGIC_ISP6322: 800 did = 0x2322; 801 isp->isp_mdvec = &mdvec_2300; 802 isp->isp_type = ISP_HA_FC_2322; 803 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 804 break; 805 case PCI_QLOGIC_ISP2422: 806 case PCI_QLOGIC_ISP2432: 807 did = 0x2400; 808 isp->isp_nchan += isp_nvports; 809 isp->isp_mdvec = &mdvec_2400; 810 isp->isp_type = ISP_HA_FC_2400; 811 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 812 break; 813 case PCI_QLOGIC_ISP2532: 814 did = 0x2500; 815 isp->isp_nchan += isp_nvports; 816 isp->isp_mdvec = &mdvec_2500; 817 isp->isp_type = ISP_HA_FC_2500; 818 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 819 break; 820 case PCI_QLOGIC_ISP5432: 821 did = 0x2500; 822 isp->isp_mdvec = &mdvec_2500; 823 isp->isp_type = ISP_HA_FC_2500; 824 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 825 break; 826 default: 827 device_printf(dev, "unknown device type\n"); 828 goto bad; 829 break; 830 } 831 isp->isp_revision = pci_get_revid(dev); 832 833 if (IS_FC(isp)) { 834 psize = sizeof (fcparam); 835 xsize = sizeof (struct isp_fc); 836 } else { 837 psize = sizeof (sdparam); 838 xsize = sizeof (struct isp_spi); 839 } 840 psize *= isp->isp_nchan; 841 xsize *= isp->isp_nchan; 842 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 843 if (isp->isp_param == NULL) { 844 device_printf(dev, "cannot allocate parameter data\n"); 845 goto bad; 846 } 847 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 848 if (isp->isp_osinfo.pc.ptr == NULL) { 849 device_printf(dev, "cannot allocate parameter data\n"); 850 goto bad; 851 } 852 853 /* 854 * Now that we know who we are (roughly) get/set specific options 855 */ 856 for (i = 0; i < isp->isp_nchan; i++) { 857 isp_get_specific_options(dev, i, isp); 858 } 859 860 isp->isp_osinfo.fw = NULL; 861 if (isp->isp_osinfo.fw == NULL) { 862 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 863 isp->isp_osinfo.fw = firmware_get(fwname); 864 } 865 if (isp->isp_osinfo.fw != NULL) { 866 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); 867 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 868 } 869 870 /* 871 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. 872 */ 873 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 874 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 875 if (IS_2300(isp)) { /* per QLogic errata */ 876 cmd &= ~PCIM_CMD_INVEN; 877 } 878 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 879 cmd &= ~PCIM_CMD_INTX_DISABLE; 880 } 881 if (IS_24XX(isp)) { 882 cmd &= ~PCIM_CMD_INTX_DISABLE; 883 } 884 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 885 886 /* 887 * Make sure the Cache Line Size register is set sensibly. 888 */ 889 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 890 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 891 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); 892 data = linesz; 893 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 894 } 895 896 /* 897 * Make sure the Latency Timer is sane. 898 */ 899 data = pci_read_config(dev, PCIR_LATTIMER, 1); 900 if (data < PCI_DFLT_LTNCY) { 901 data = PCI_DFLT_LTNCY; 902 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); 903 pci_write_config(dev, PCIR_LATTIMER, data, 1); 904 } 905 906 /* 907 * Make sure we've disabled the ROM. 908 */ 909 data = pci_read_config(dev, PCIR_ROMADDR, 4); 910 data &= ~1; 911 pci_write_config(dev, PCIR_ROMADDR, data, 4); 912 913 /* 914 * Do MSI 915 * 916 * NB: MSI-X needs to be disabled for the 2432 (PCI-Express) 917 */ 918 if (IS_24XX(isp) || IS_2322(isp)) { 919 pcs->msicount = pci_msi_count(dev); 920 if (pcs->msicount > 1) { 921 pcs->msicount = 1; 922 } 923 if (pci_alloc_msi(dev, &pcs->msicount) == 0) { 924 pcs->iqd = 1; 925 } else { 926 pcs->iqd = 0; 927 } 928 } 929 pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE); 930 if (pcs->irq == NULL) { 931 device_printf(dev, "could not allocate interrupt\n"); 932 goto bad; 933 } 934 935 /* Make sure the lock is set up. */ 936 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 937 locksetup++; 938 939 if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { 940 device_printf(dev, "could not setup interrupt\n"); 941 goto bad; 942 } 943 944 /* 945 * Last minute checks... 946 */ 947 if (IS_23XX(isp) || IS_24XX(isp)) { 948 isp->isp_port = pci_get_function(dev); 949 } 950 951 /* 952 * Make sure we're in reset state. 953 */ 954 ISP_LOCK(isp); 955 if (isp_reinit(isp, 1) != 0) { 956 ISP_UNLOCK(isp); 957 goto bad; 958 } 959 ISP_UNLOCK(isp); 960 if (isp_attach(isp)) { 961 ISP_LOCK(isp); 962 isp_uninit(isp); 963 ISP_UNLOCK(isp); 964 goto bad; 965 } 966 return (0); 967 968 bad: 969 if (pcs->ih) { 970 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 971 } 972 if (locksetup) { 973 mtx_destroy(&isp->isp_osinfo.lock); 974 } 975 if (pcs->irq) { 976 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 977 } 978 if (pcs->msicount) { 979 pci_release_msi(dev); 980 } 981 if (pcs->regs) { 982 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 983 } 984 if (pcs->pci_isp.isp_param) { 985 free(pcs->pci_isp.isp_param, M_DEVBUF); 986 pcs->pci_isp.isp_param = NULL; 987 } 988 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 989 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 990 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 991 } 992 return (ENXIO); 993 } 994 995 static int 996 isp_pci_detach(device_t dev) 997 { 998 struct isp_pcisoftc *pcs; 999 ispsoftc_t *isp; 1000 int status; 1001 1002 pcs = device_get_softc(dev); 1003 if (pcs == NULL) { 1004 return (ENXIO); 1005 } 1006 isp = (ispsoftc_t *) pcs; 1007 status = isp_detach(isp); 1008 if (status) 1009 return (status); 1010 ISP_LOCK(isp); 1011 isp_uninit(isp); 1012 if (pcs->ih) { 1013 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 1014 } 1015 ISP_UNLOCK(isp); 1016 mtx_destroy(&isp->isp_osinfo.lock); 1017 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1018 if (pcs->msicount) { 1019 pci_release_msi(dev); 1020 } 1021 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1022 /* 1023 * XXX: THERE IS A LOT OF LEAKAGE HERE 1024 */ 1025 if (pcs->pci_isp.isp_param) { 1026 free(pcs->pci_isp.isp_param, M_DEVBUF); 1027 pcs->pci_isp.isp_param = NULL; 1028 } 1029 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1030 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1031 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1032 } 1033 return (0); 1034 } 1035 1036 #define IspVirt2Off(a, x) \ 1037 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1038 _BLK_REG_SHFT] + ((x) & 0xfff)) 1039 1040 #define BXR2(isp, off) \ 1041 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) 1042 #define BXW2(isp, off, v) \ 1043 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1044 #define BXR4(isp, off) \ 1045 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) 1046 #define BXW4(isp, off, v) \ 1047 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1048 1049 1050 static ISP_INLINE int 1051 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1052 { 1053 uint32_t val0, val1; 1054 int i = 0; 1055 1056 do { 1057 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1058 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1059 } while (val0 != val1 && ++i < 1000); 1060 if (val0 != val1) { 1061 return (1); 1062 } 1063 *rp = val0; 1064 return (0); 1065 } 1066 1067 static int 1068 isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1069 { 1070 uint16_t isr, sema; 1071 1072 if (IS_2100(isp)) { 1073 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1074 return (0); 1075 } 1076 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1077 return (0); 1078 } 1079 } else { 1080 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1081 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1082 } 1083 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1084 isr &= INT_PENDING_MASK(isp); 1085 sema &= BIU_SEMA_LOCK; 1086 if (isr == 0 && sema == 0) { 1087 return (0); 1088 } 1089 *isrp = isr; 1090 if ((*semap = sema) != 0) { 1091 if (IS_2100(isp)) { 1092 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, info)) { 1093 return (0); 1094 } 1095 } else { 1096 *info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1097 } 1098 } 1099 return (1); 1100 } 1101 1102 static int 1103 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1104 { 1105 uint32_t hccr, r2hisr; 1106 1107 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1108 *isrp = 0; 1109 return (0); 1110 } 1111 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1112 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1113 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1114 *isrp = 0; 1115 return (0); 1116 } 1117 switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { 1118 case ISPR2HST_ROM_MBX_OK: 1119 case ISPR2HST_ROM_MBX_FAIL: 1120 case ISPR2HST_MBX_OK: 1121 case ISPR2HST_MBX_FAIL: 1122 case ISPR2HST_ASYNC_EVENT: 1123 *semap = 1; 1124 break; 1125 case ISPR2HST_RIO_16: 1126 *info = ASYNC_RIO16_1; 1127 *semap = 1; 1128 return (1); 1129 case ISPR2HST_FPOST: 1130 *info = ASYNC_CMD_CMPLT; 1131 *semap = 1; 1132 return (1); 1133 case ISPR2HST_FPOST_CTIO: 1134 *info = ASYNC_CTIO_DONE; 1135 *semap = 1; 1136 return (1); 1137 case ISPR2HST_RSPQ_UPDATE: 1138 *semap = 0; 1139 break; 1140 default: 1141 hccr = ISP_READ(isp, HCCR); 1142 if (hccr & HCCR_PAUSE) { 1143 ISP_WRITE(isp, HCCR, HCCR_RESET); 1144 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1145 ISP_WRITE(isp, BIU_ICR, 0); 1146 } else { 1147 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1148 } 1149 return (0); 1150 } 1151 *info = (r2hisr >> 16); 1152 return (1); 1153 } 1154 1155 static int 1156 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1157 { 1158 uint32_t r2hisr; 1159 1160 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1161 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1162 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1163 *isrp = 0; 1164 return (0); 1165 } 1166 switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { 1167 case ISPR2HST_ROM_MBX_OK: 1168 case ISPR2HST_ROM_MBX_FAIL: 1169 case ISPR2HST_MBX_OK: 1170 case ISPR2HST_MBX_FAIL: 1171 case ISPR2HST_ASYNC_EVENT: 1172 *semap = 1; 1173 break; 1174 case ISPR2HST_RSPQ_UPDATE: 1175 case ISPR2HST_RSPQ_UPDATE2: 1176 case ISPR2HST_ATIO_UPDATE: 1177 case ISPR2HST_ATIO_RSPQ_UPDATE: 1178 case ISPR2HST_ATIO_UPDATE2: 1179 *semap = 0; 1180 break; 1181 default: 1182 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1183 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1184 return (0); 1185 } 1186 *info = (r2hisr >> 16); 1187 return (1); 1188 } 1189 1190 static uint32_t 1191 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1192 { 1193 uint16_t rv; 1194 int oldconf = 0; 1195 1196 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1197 /* 1198 * We will assume that someone has paused the RISC processor. 1199 */ 1200 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1201 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1202 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1203 } 1204 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1205 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1206 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1207 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1208 } 1209 return (rv); 1210 } 1211 1212 static void 1213 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1214 { 1215 int oldconf = 0; 1216 1217 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1218 /* 1219 * We will assume that someone has paused the RISC processor. 1220 */ 1221 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1222 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1223 oldconf | BIU_PCI_CONF1_SXP); 1224 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1225 } 1226 BXW2(isp, IspVirt2Off(isp, regoff), val); 1227 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1228 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1229 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1230 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1231 } 1232 1233 } 1234 1235 static uint32_t 1236 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1237 { 1238 uint32_t rv, oc = 0; 1239 1240 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1241 uint32_t tc; 1242 /* 1243 * We will assume that someone has paused the RISC processor. 1244 */ 1245 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1246 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1247 if (regoff & SXP_BANK1_SELECT) 1248 tc |= BIU_PCI1080_CONF1_SXP1; 1249 else 1250 tc |= BIU_PCI1080_CONF1_SXP0; 1251 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1252 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1253 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1254 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1255 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1256 oc | BIU_PCI1080_CONF1_DMA); 1257 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1258 } 1259 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1260 if (oc) { 1261 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1262 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1263 } 1264 return (rv); 1265 } 1266 1267 static void 1268 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1269 { 1270 int oc = 0; 1271 1272 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1273 uint32_t tc; 1274 /* 1275 * We will assume that someone has paused the RISC processor. 1276 */ 1277 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1278 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1279 if (regoff & SXP_BANK1_SELECT) 1280 tc |= BIU_PCI1080_CONF1_SXP1; 1281 else 1282 tc |= BIU_PCI1080_CONF1_SXP0; 1283 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1284 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1285 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1286 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1287 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1288 oc | BIU_PCI1080_CONF1_DMA); 1289 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1290 } 1291 BXW2(isp, IspVirt2Off(isp, regoff), val); 1292 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1293 if (oc) { 1294 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1295 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1296 } 1297 } 1298 1299 static uint32_t 1300 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1301 { 1302 uint32_t rv; 1303 int block = regoff & _BLK_REG_MASK; 1304 1305 switch (block) { 1306 case BIU_BLOCK: 1307 break; 1308 case MBOX_BLOCK: 1309 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1310 case SXP_BLOCK: 1311 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1312 return (0xffffffff); 1313 case RISC_BLOCK: 1314 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1315 return (0xffffffff); 1316 case DMA_BLOCK: 1317 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1318 return (0xffffffff); 1319 default: 1320 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1321 return (0xffffffff); 1322 } 1323 1324 1325 switch (regoff) { 1326 case BIU2400_FLASH_ADDR: 1327 case BIU2400_FLASH_DATA: 1328 case BIU2400_ICR: 1329 case BIU2400_ISR: 1330 case BIU2400_CSR: 1331 case BIU2400_REQINP: 1332 case BIU2400_REQOUTP: 1333 case BIU2400_RSPINP: 1334 case BIU2400_RSPOUTP: 1335 case BIU2400_PRI_REQINP: 1336 case BIU2400_PRI_REQOUTP: 1337 case BIU2400_ATIO_RSPINP: 1338 case BIU2400_ATIO_RSPOUTP: 1339 case BIU2400_HCCR: 1340 case BIU2400_GPIOD: 1341 case BIU2400_GPIOE: 1342 case BIU2400_HSEMA: 1343 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1344 break; 1345 case BIU2400_R2HSTSLO: 1346 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1347 break; 1348 case BIU2400_R2HSTSHI: 1349 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1350 break; 1351 default: 1352 isp_prt(isp, ISP_LOGERR, 1353 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1354 rv = 0xffffffff; 1355 break; 1356 } 1357 return (rv); 1358 } 1359 1360 static void 1361 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1362 { 1363 int block = regoff & _BLK_REG_MASK; 1364 1365 switch (block) { 1366 case BIU_BLOCK: 1367 break; 1368 case MBOX_BLOCK: 1369 BXW2(isp, IspVirt2Off(isp, regoff), val); 1370 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1371 return; 1372 case SXP_BLOCK: 1373 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1374 return; 1375 case RISC_BLOCK: 1376 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1377 return; 1378 case DMA_BLOCK: 1379 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1380 return; 1381 default: 1382 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1383 regoff); 1384 break; 1385 } 1386 1387 switch (regoff) { 1388 case BIU2400_FLASH_ADDR: 1389 case BIU2400_FLASH_DATA: 1390 case BIU2400_ICR: 1391 case BIU2400_ISR: 1392 case BIU2400_CSR: 1393 case BIU2400_REQINP: 1394 case BIU2400_REQOUTP: 1395 case BIU2400_RSPINP: 1396 case BIU2400_RSPOUTP: 1397 case BIU2400_PRI_REQINP: 1398 case BIU2400_PRI_REQOUTP: 1399 case BIU2400_ATIO_RSPINP: 1400 case BIU2400_ATIO_RSPOUTP: 1401 case BIU2400_HCCR: 1402 case BIU2400_GPIOD: 1403 case BIU2400_GPIOE: 1404 case BIU2400_HSEMA: 1405 BXW4(isp, IspVirt2Off(isp, regoff), val); 1406 #ifdef MEMORYBARRIERW 1407 if (regoff == BIU2400_REQINP || 1408 regoff == BIU2400_RSPOUTP || 1409 regoff == BIU2400_PRI_REQINP || 1410 regoff == BIU2400_ATIO_RSPOUTP) 1411 MEMORYBARRIERW(isp, SYNC_REG, 1412 IspVirt2Off(isp, regoff), 4, -1) 1413 else 1414 #endif 1415 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1416 break; 1417 default: 1418 isp_prt(isp, ISP_LOGERR, 1419 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1420 break; 1421 } 1422 } 1423 1424 1425 struct imush { 1426 ispsoftc_t *isp; 1427 caddr_t vbase; 1428 int chan; 1429 int error; 1430 }; 1431 1432 static void imc(void *, bus_dma_segment_t *, int, int); 1433 static void imc1(void *, bus_dma_segment_t *, int, int); 1434 1435 static void 1436 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1437 { 1438 struct imush *imushp = (struct imush *) arg; 1439 isp_ecmd_t *ecmd; 1440 1441 if (error) { 1442 imushp->error = error; 1443 return; 1444 } 1445 if (nseg != 1) { 1446 imushp->error = EINVAL; 1447 return; 1448 } 1449 isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1450 1451 imushp->isp->isp_rquest = imushp->vbase; 1452 imushp->isp->isp_rquest_dma = segs->ds_addr; 1453 segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1454 imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1455 1456 imushp->isp->isp_result_dma = segs->ds_addr; 1457 imushp->isp->isp_result = imushp->vbase; 1458 segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1459 imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1460 1461 if (imushp->isp->isp_type >= ISP_HA_FC_2200) { 1462 imushp->isp->isp_osinfo.ecmd_dma = segs->ds_addr; 1463 imushp->isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)imushp->vbase; 1464 imushp->isp->isp_osinfo.ecmd_base = imushp->isp->isp_osinfo.ecmd_free; 1465 for (ecmd = imushp->isp->isp_osinfo.ecmd_free; ecmd < &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { 1466 if (ecmd == &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) { 1467 ecmd->next = NULL; 1468 } else { 1469 ecmd->next = ecmd + 1; 1470 } 1471 } 1472 } 1473 #ifdef ISP_TARGET_MODE 1474 segs->ds_addr += (N_XCMDS * XCMD_SIZE); 1475 imushp->vbase += (N_XCMDS * XCMD_SIZE); 1476 if (IS_24XX(imushp->isp)) { 1477 imushp->isp->isp_atioq_dma = segs->ds_addr; 1478 imushp->isp->isp_atioq = imushp->vbase; 1479 } 1480 #endif 1481 } 1482 1483 static void 1484 imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1485 { 1486 struct imush *imushp = (struct imush *) arg; 1487 if (error) { 1488 imushp->error = error; 1489 return; 1490 } 1491 if (nseg != 1) { 1492 imushp->error = EINVAL; 1493 return; 1494 } 1495 isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1496 FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr; 1497 FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase; 1498 } 1499 1500 static int 1501 isp_pci_mbxdma(ispsoftc_t *isp) 1502 { 1503 caddr_t base; 1504 uint32_t len, nsegs; 1505 int i, error, cmap = 0; 1506 bus_size_t slim; /* segment size */ 1507 bus_addr_t llim; /* low limit of unavailable dma */ 1508 bus_addr_t hlim; /* high limit of unavailable dma */ 1509 struct imush im; 1510 1511 /* 1512 * Already been here? If so, leave... 1513 */ 1514 if (isp->isp_rquest) { 1515 return (0); 1516 } 1517 ISP_UNLOCK(isp); 1518 1519 if (isp->isp_maxcmds == 0) { 1520 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1521 ISP_LOCK(isp); 1522 return (1); 1523 } 1524 1525 hlim = BUS_SPACE_MAXADDR; 1526 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1527 if (sizeof (bus_size_t) > 4) { 1528 slim = (bus_size_t) (1ULL << 32); 1529 } else { 1530 slim = (bus_size_t) (1UL << 31); 1531 } 1532 llim = BUS_SPACE_MAXADDR; 1533 } else { 1534 llim = BUS_SPACE_MAXADDR_32BIT; 1535 slim = (1UL << 24); 1536 } 1537 1538 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1539 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1540 if (isp->isp_osinfo.pcmd_pool == NULL) { 1541 isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds"); 1542 ISP_LOCK(isp); 1543 return (1); 1544 } 1545 1546 if (isp->isp_osinfo.sixtyfourbit) { 1547 nsegs = ISP_NSEG64_MAX; 1548 } else { 1549 nsegs = ISP_NSEG_MAX; 1550 } 1551 1552 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, &isp->isp_osinfo.dmat)) { 1553 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1554 ISP_LOCK(isp); 1555 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1556 return (1); 1557 } 1558 1559 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1560 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1561 if (isp->isp_xflist == NULL) { 1562 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1563 ISP_LOCK(isp); 1564 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1565 return (1); 1566 } 1567 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1568 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1569 } 1570 isp->isp_xffree = isp->isp_xflist; 1571 #ifdef ISP_TARGET_MODE 1572 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1573 isp->isp_tgtlist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1574 if (isp->isp_tgtlist == NULL) { 1575 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1576 free(isp->isp_xflist, M_DEVBUF); 1577 ISP_LOCK(isp); 1578 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1579 return (1); 1580 } 1581 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1582 isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1]; 1583 } 1584 isp->isp_tgtfree = isp->isp_tgtlist; 1585 #endif 1586 1587 /* 1588 * Allocate and map the request and result queues (and ATIO queue 1589 * if we're a 2400 supporting target mode), and a region for 1590 * external dma addressable command/status structures (23XX and 1591 * later). 1592 */ 1593 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1594 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1595 #ifdef ISP_TARGET_MODE 1596 if (IS_24XX(isp)) { 1597 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1598 } 1599 #endif 1600 if (isp->isp_type >= ISP_HA_FC_2200) { 1601 len += (N_XCMDS * XCMD_SIZE); 1602 } 1603 1604 /* 1605 * Create a tag for the control spaces. We don't always need this 1606 * to be 32 bits, but we do this for simplicity and speed's sake. 1607 */ 1608 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, slim, 0, &isp->isp_osinfo.cdmat)) { 1609 isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); 1610 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1611 free(isp->isp_xflist, M_DEVBUF); 1612 #ifdef ISP_TARGET_MODE 1613 free(isp->isp_tgtlist, M_DEVBUF); 1614 #endif 1615 ISP_LOCK(isp); 1616 return (1); 1617 } 1618 1619 if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) { 1620 isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); 1621 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1622 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1623 free(isp->isp_xflist, M_DEVBUF); 1624 #ifdef ISP_TARGET_MODE 1625 free(isp->isp_tgtlist, M_DEVBUF); 1626 #endif 1627 ISP_LOCK(isp); 1628 return (1); 1629 } 1630 1631 im.isp = isp; 1632 im.chan = 0; 1633 im.vbase = base; 1634 im.error = 0; 1635 1636 bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); 1637 if (im.error) { 1638 isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); 1639 goto bad; 1640 } 1641 1642 if (IS_FC(isp)) { 1643 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1644 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1645 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) { 1646 goto bad; 1647 } 1648 if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &fc->tdmap) != 0) { 1649 bus_dma_tag_destroy(fc->tdmat); 1650 goto bad; 1651 } 1652 im.isp = isp; 1653 im.chan = cmap; 1654 im.vbase = base; 1655 im.error = 0; 1656 bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0); 1657 if (im.error) { 1658 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1659 bus_dma_tag_destroy(fc->tdmat); 1660 goto bad; 1661 } 1662 if (!IS_2100(isp)) { 1663 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { 1664 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); 1665 if (n == NULL) { 1666 while (fc->nexus_free_list) { 1667 n = fc->nexus_free_list; 1668 fc->nexus_free_list = n->next; 1669 free(n, M_DEVBUF); 1670 } 1671 goto bad; 1672 } 1673 n->next = fc->nexus_free_list; 1674 fc->nexus_free_list = n; 1675 } 1676 } 1677 } 1678 } 1679 1680 for (i = 0; i < isp->isp_maxcmds; i++) { 1681 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1682 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1683 if (error) { 1684 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1685 while (--i >= 0) { 1686 bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); 1687 } 1688 goto bad; 1689 } 1690 callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); 1691 if (i == isp->isp_maxcmds-1) { 1692 pcmd->next = NULL; 1693 } else { 1694 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1695 } 1696 } 1697 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1698 ISP_LOCK(isp); 1699 return (0); 1700 1701 bad: 1702 while (--cmap >= 0) { 1703 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1704 bus_dmamap_unload(fc->tdmat, fc->tdmap); 1705 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1706 bus_dma_tag_destroy(fc->tdmat); 1707 while (fc->nexus_free_list) { 1708 struct isp_nexus *n = fc->nexus_free_list; 1709 fc->nexus_free_list = n->next; 1710 free(n, M_DEVBUF); 1711 } 1712 } 1713 if (isp->isp_rquest_dma != 0) 1714 bus_dmamap_unload(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap); 1715 bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); 1716 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1717 free(isp->isp_xflist, M_DEVBUF); 1718 #ifdef ISP_TARGET_MODE 1719 free(isp->isp_tgtlist, M_DEVBUF); 1720 #endif 1721 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1722 isp->isp_rquest = NULL; 1723 ISP_LOCK(isp); 1724 return (1); 1725 } 1726 1727 typedef struct { 1728 ispsoftc_t *isp; 1729 void *cmd_token; 1730 void *rq; /* original request */ 1731 int error; 1732 bus_size_t mapsize; 1733 } mush_t; 1734 1735 #define MUSHERR_NOQENTRIES -2 1736 1737 #ifdef ISP_TARGET_MODE 1738 static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1739 static void tdma2(void *, bus_dma_segment_t *, int, int); 1740 1741 static void 1742 tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1743 { 1744 mush_t *mp; 1745 mp = (mush_t *)arg; 1746 mp->mapsize = mapsize; 1747 tdma2(arg, dm_segs, nseg, error); 1748 } 1749 1750 static void 1751 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1752 { 1753 mush_t *mp; 1754 ispsoftc_t *isp; 1755 struct ccb_scsiio *csio; 1756 isp_ddir_t ddir; 1757 ispreq_t *rq; 1758 1759 mp = (mush_t *) arg; 1760 if (error) { 1761 mp->error = error; 1762 return; 1763 } 1764 csio = mp->cmd_token; 1765 isp = mp->isp; 1766 rq = mp->rq; 1767 if (nseg) { 1768 if (isp->isp_osinfo.sixtyfourbit) { 1769 if (nseg >= ISP_NSEG64_MAX) { 1770 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1771 mp->error = EFAULT; 1772 return; 1773 } 1774 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1775 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1776 } 1777 } else { 1778 if (nseg >= ISP_NSEG_MAX) { 1779 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1780 mp->error = EFAULT; 1781 return; 1782 } 1783 } 1784 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1785 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1786 ddir = ISP_TO_DEVICE; 1787 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1788 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1789 ddir = ISP_FROM_DEVICE; 1790 } else { 1791 dm_segs = NULL; 1792 nseg = 0; 1793 ddir = ISP_NOXFR; 1794 } 1795 } else { 1796 dm_segs = NULL; 1797 nseg = 0; 1798 ddir = ISP_NOXFR; 1799 } 1800 1801 error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len); 1802 switch (error) { 1803 case CMD_EAGAIN: 1804 mp->error = MUSHERR_NOQENTRIES; 1805 case CMD_QUEUED: 1806 break; 1807 default: 1808 mp->error = EIO; 1809 } 1810 } 1811 #endif 1812 1813 static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1814 static void dma2(void *, bus_dma_segment_t *, int, int); 1815 1816 static void 1817 dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1818 { 1819 mush_t *mp; 1820 mp = (mush_t *)arg; 1821 mp->mapsize = mapsize; 1822 dma2(arg, dm_segs, nseg, error); 1823 } 1824 1825 static void 1826 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1827 { 1828 mush_t *mp; 1829 ispsoftc_t *isp; 1830 struct ccb_scsiio *csio; 1831 isp_ddir_t ddir; 1832 ispreq_t *rq; 1833 1834 mp = (mush_t *) arg; 1835 if (error) { 1836 mp->error = error; 1837 return; 1838 } 1839 csio = mp->cmd_token; 1840 isp = mp->isp; 1841 rq = mp->rq; 1842 if (nseg) { 1843 if (isp->isp_osinfo.sixtyfourbit) { 1844 if (nseg >= ISP_NSEG64_MAX) { 1845 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1846 mp->error = EFAULT; 1847 return; 1848 } 1849 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1850 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1851 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1852 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1853 } 1854 } else { 1855 if (nseg >= ISP_NSEG_MAX) { 1856 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1857 mp->error = EFAULT; 1858 return; 1859 } 1860 } 1861 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1862 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1863 ddir = ISP_FROM_DEVICE; 1864 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1865 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1866 ddir = ISP_TO_DEVICE; 1867 } else { 1868 ddir = ISP_NOXFR; 1869 } 1870 } else { 1871 dm_segs = NULL; 1872 nseg = 0; 1873 ddir = ISP_NOXFR; 1874 } 1875 1876 error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map); 1877 switch (error) { 1878 case CMD_EAGAIN: 1879 mp->error = MUSHERR_NOQENTRIES; 1880 break; 1881 case CMD_QUEUED: 1882 break; 1883 default: 1884 mp->error = EIO; 1885 break; 1886 } 1887 } 1888 1889 static int 1890 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1891 { 1892 mush_t mush, *mp; 1893 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1894 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); 1895 int error; 1896 1897 mp = &mush; 1898 mp->isp = isp; 1899 mp->cmd_token = csio; 1900 mp->rq = ff; 1901 mp->error = 0; 1902 mp->mapsize = 0; 1903 1904 #ifdef ISP_TARGET_MODE 1905 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1906 eptr = tdma2; 1907 eptr2 = tdma2_2; 1908 } else 1909 #endif 1910 { 1911 eptr = dma2; 1912 eptr2 = dma2_2; 1913 } 1914 1915 1916 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 1917 (union ccb *)csio, eptr, mp, 0); 1918 if (error == EINPROGRESS) { 1919 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 1920 mp->error = EINVAL; 1921 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 1922 } else if (error && mp->error == 0) { 1923 #ifdef DIAGNOSTIC 1924 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1925 #endif 1926 mp->error = error; 1927 } 1928 if (mp->error) { 1929 int retval = CMD_COMPLETE; 1930 if (mp->error == MUSHERR_NOQENTRIES) { 1931 retval = CMD_EAGAIN; 1932 } else if (mp->error == EFBIG) { 1933 csio->ccb_h.status = CAM_REQ_TOO_BIG; 1934 } else if (mp->error == EINVAL) { 1935 csio->ccb_h.status = CAM_REQ_INVALID; 1936 } else { 1937 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 1938 } 1939 return (retval); 1940 } 1941 return (CMD_QUEUED); 1942 } 1943 1944 static void 1945 isp_pci_reset0(ispsoftc_t *isp) 1946 { 1947 ISP_DISABLE_INTS(isp); 1948 } 1949 1950 static void 1951 isp_pci_reset1(ispsoftc_t *isp) 1952 { 1953 if (!IS_24XX(isp)) { 1954 /* Make sure the BIOS is disabled */ 1955 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1956 } 1957 /* and enable interrupts */ 1958 ISP_ENABLE_INTS(isp); 1959 } 1960 1961 static void 1962 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 1963 { 1964 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1965 if (msg) 1966 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1967 else 1968 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1969 if (IS_SCSI(isp)) 1970 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1971 else 1972 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1973 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1974 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1975 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1976 1977 1978 if (IS_SCSI(isp)) { 1979 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1980 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1981 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1982 ISP_READ(isp, CDMA_FIFO_STS)); 1983 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1984 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1985 ISP_READ(isp, DDMA_FIFO_STS)); 1986 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1987 ISP_READ(isp, SXP_INTERRUPT), 1988 ISP_READ(isp, SXP_GROSS_ERR), 1989 ISP_READ(isp, SXP_PINS_CTRL)); 1990 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1991 } 1992 printf(" mbox regs: %x %x %x %x %x\n", 1993 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1994 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1995 ISP_READ(isp, OUTMAILBOX4)); 1996 printf(" PCI Status Command/Status=%x\n", 1997 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 1998 } 1999