1 /*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/bus.h> 40 #include <sys/stdint.h> 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 #include <sys/uio.h> 48 49 #ifdef __sparc64__ 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62 static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int); 63 static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t); 64 static int isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 65 static int isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 66 static int isp_pci_rd_isr_2400(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 67 static int isp_pci_mbxdma(ispsoftc_t *); 68 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 69 70 71 static void isp_pci_reset0(ispsoftc_t *); 72 static void isp_pci_reset1(ispsoftc_t *); 73 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 74 75 static struct ispmdvec mdvec = { 76 isp_pci_rd_isr, 77 isp_pci_rd_reg, 78 isp_pci_wr_reg, 79 isp_pci_mbxdma, 80 isp_pci_dmasetup, 81 isp_common_dmateardown, 82 isp_pci_reset0, 83 isp_pci_reset1, 84 isp_pci_dumpregs, 85 NULL, 86 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 87 }; 88 89 static struct ispmdvec mdvec_1080 = { 90 isp_pci_rd_isr, 91 isp_pci_rd_reg_1080, 92 isp_pci_wr_reg_1080, 93 isp_pci_mbxdma, 94 isp_pci_dmasetup, 95 isp_common_dmateardown, 96 isp_pci_reset0, 97 isp_pci_reset1, 98 isp_pci_dumpregs, 99 NULL, 100 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 101 }; 102 103 static struct ispmdvec mdvec_12160 = { 104 isp_pci_rd_isr, 105 isp_pci_rd_reg_1080, 106 isp_pci_wr_reg_1080, 107 isp_pci_mbxdma, 108 isp_pci_dmasetup, 109 isp_common_dmateardown, 110 isp_pci_reset0, 111 isp_pci_reset1, 112 isp_pci_dumpregs, 113 NULL, 114 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 115 }; 116 117 static struct ispmdvec mdvec_2100 = { 118 isp_pci_rd_isr, 119 isp_pci_rd_reg, 120 isp_pci_wr_reg, 121 isp_pci_mbxdma, 122 isp_pci_dmasetup, 123 isp_common_dmateardown, 124 isp_pci_reset0, 125 isp_pci_reset1, 126 isp_pci_dumpregs 127 }; 128 129 static struct ispmdvec mdvec_2200 = { 130 isp_pci_rd_isr, 131 isp_pci_rd_reg, 132 isp_pci_wr_reg, 133 isp_pci_mbxdma, 134 isp_pci_dmasetup, 135 isp_common_dmateardown, 136 isp_pci_reset0, 137 isp_pci_reset1, 138 isp_pci_dumpregs 139 }; 140 141 static struct ispmdvec mdvec_2300 = { 142 isp_pci_rd_isr_2300, 143 isp_pci_rd_reg, 144 isp_pci_wr_reg, 145 isp_pci_mbxdma, 146 isp_pci_dmasetup, 147 isp_common_dmateardown, 148 isp_pci_reset0, 149 isp_pci_reset1, 150 isp_pci_dumpregs 151 }; 152 153 static struct ispmdvec mdvec_2400 = { 154 isp_pci_rd_isr_2400, 155 isp_pci_rd_reg_2400, 156 isp_pci_wr_reg_2400, 157 isp_pci_mbxdma, 158 isp_pci_dmasetup, 159 isp_common_dmateardown, 160 isp_pci_reset0, 161 isp_pci_reset1, 162 NULL 163 }; 164 165 static struct ispmdvec mdvec_2500 = { 166 isp_pci_rd_isr_2400, 167 isp_pci_rd_reg_2400, 168 isp_pci_wr_reg_2400, 169 isp_pci_mbxdma, 170 isp_pci_dmasetup, 171 isp_common_dmateardown, 172 isp_pci_reset0, 173 isp_pci_reset1, 174 NULL 175 }; 176 177 static struct ispmdvec mdvec_2600 = { 178 isp_pci_rd_isr_2400, 179 isp_pci_rd_reg_2600, 180 isp_pci_wr_reg_2600, 181 isp_pci_mbxdma, 182 isp_pci_dmasetup, 183 isp_common_dmateardown, 184 isp_pci_reset0, 185 isp_pci_reset1, 186 NULL 187 }; 188 189 #ifndef PCIM_CMD_INVEN 190 #define PCIM_CMD_INVEN 0x10 191 #endif 192 #ifndef PCIM_CMD_BUSMASTEREN 193 #define PCIM_CMD_BUSMASTEREN 0x0004 194 #endif 195 #ifndef PCIM_CMD_PERRESPEN 196 #define PCIM_CMD_PERRESPEN 0x0040 197 #endif 198 #ifndef PCIM_CMD_SEREN 199 #define PCIM_CMD_SEREN 0x0100 200 #endif 201 #ifndef PCIM_CMD_INTX_DISABLE 202 #define PCIM_CMD_INTX_DISABLE 0x0400 203 #endif 204 205 #ifndef PCIR_COMMAND 206 #define PCIR_COMMAND 0x04 207 #endif 208 209 #ifndef PCIR_CACHELNSZ 210 #define PCIR_CACHELNSZ 0x0c 211 #endif 212 213 #ifndef PCIR_LATTIMER 214 #define PCIR_LATTIMER 0x0d 215 #endif 216 217 #ifndef PCIR_ROMADDR 218 #define PCIR_ROMADDR 0x30 219 #endif 220 221 #ifndef PCI_VENDOR_QLOGIC 222 #define PCI_VENDOR_QLOGIC 0x1077 223 #endif 224 225 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 226 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 227 #endif 228 229 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 230 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 231 #endif 232 233 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 234 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 235 #endif 236 237 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 238 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 239 #endif 240 241 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 242 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 243 #endif 244 245 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 246 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 247 #endif 248 249 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 250 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 251 #endif 252 253 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 254 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 255 #endif 256 257 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 258 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 259 #endif 260 261 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 262 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 263 #endif 264 265 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 266 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 267 #endif 268 269 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 270 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 271 #endif 272 273 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 274 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 275 #endif 276 277 #ifndef PCI_PRODUCT_QLOGIC_ISP2532 278 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 279 #endif 280 281 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 282 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 283 #endif 284 285 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 286 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 287 #endif 288 289 #ifndef PCI_PRODUCT_QLOGIC_ISP5432 290 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 291 #endif 292 293 #ifndef PCI_PRODUCT_QLOGIC_ISP2031 294 #define PCI_PRODUCT_QLOGIC_ISP2031 0x2031 295 #endif 296 297 #ifndef PCI_PRODUCT_QLOGIC_ISP8031 298 #define PCI_PRODUCT_QLOGIC_ISP8031 0x8031 299 #endif 300 301 #define PCI_QLOGIC_ISP5432 \ 302 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 303 304 #define PCI_QLOGIC_ISP1020 \ 305 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 306 307 #define PCI_QLOGIC_ISP1080 \ 308 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 309 310 #define PCI_QLOGIC_ISP10160 \ 311 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 312 313 #define PCI_QLOGIC_ISP12160 \ 314 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 315 316 #define PCI_QLOGIC_ISP1240 \ 317 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 318 319 #define PCI_QLOGIC_ISP1280 \ 320 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 321 322 #define PCI_QLOGIC_ISP2100 \ 323 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 324 325 #define PCI_QLOGIC_ISP2200 \ 326 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 327 328 #define PCI_QLOGIC_ISP2300 \ 329 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 330 331 #define PCI_QLOGIC_ISP2312 \ 332 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 333 334 #define PCI_QLOGIC_ISP2322 \ 335 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 336 337 #define PCI_QLOGIC_ISP2422 \ 338 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 339 340 #define PCI_QLOGIC_ISP2432 \ 341 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 342 343 #define PCI_QLOGIC_ISP2532 \ 344 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 345 346 #define PCI_QLOGIC_ISP6312 \ 347 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 348 349 #define PCI_QLOGIC_ISP6322 \ 350 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 351 352 #define PCI_QLOGIC_ISP2031 \ 353 ((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC) 354 355 #define PCI_QLOGIC_ISP8031 \ 356 ((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC) 357 358 /* 359 * Odd case for some AMI raid cards... We need to *not* attach to this. 360 */ 361 #define AMI_RAID_SUBVENDOR_ID 0x101e 362 363 #define PCI_DFLT_LTNCY 0x40 364 #define PCI_DFLT_LNSZ 0x10 365 366 static int isp_pci_probe (device_t); 367 static int isp_pci_attach (device_t); 368 static int isp_pci_detach (device_t); 369 370 371 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 372 struct isp_pcisoftc { 373 ispsoftc_t pci_isp; 374 device_t pci_dev; 375 struct resource * regs; 376 struct resource * regs2; 377 void * irq; 378 int iqd; 379 int rtp; 380 int rgd; 381 int rtp2; 382 int rgd2; 383 void * ih; 384 int16_t pci_poff[_NREG_BLKS]; 385 bus_dma_tag_t dmat; 386 int msicount; 387 }; 388 389 390 static device_method_t isp_pci_methods[] = { 391 /* Device interface */ 392 DEVMETHOD(device_probe, isp_pci_probe), 393 DEVMETHOD(device_attach, isp_pci_attach), 394 DEVMETHOD(device_detach, isp_pci_detach), 395 { 0, 0 } 396 }; 397 398 static driver_t isp_pci_driver = { 399 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 400 }; 401 static devclass_t isp_devclass; 402 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 403 MODULE_DEPEND(isp, cam, 1, 1, 1); 404 MODULE_DEPEND(isp, firmware, 1, 1, 1); 405 static int isp_nvports = 0; 406 407 static int 408 isp_pci_probe(device_t dev) 409 { 410 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 411 case PCI_QLOGIC_ISP1020: 412 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 413 break; 414 case PCI_QLOGIC_ISP1080: 415 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 416 break; 417 case PCI_QLOGIC_ISP1240: 418 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 419 break; 420 case PCI_QLOGIC_ISP1280: 421 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 422 break; 423 case PCI_QLOGIC_ISP10160: 424 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 425 break; 426 case PCI_QLOGIC_ISP12160: 427 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 428 return (ENXIO); 429 } 430 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 431 break; 432 case PCI_QLOGIC_ISP2100: 433 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 434 break; 435 case PCI_QLOGIC_ISP2200: 436 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 437 break; 438 case PCI_QLOGIC_ISP2300: 439 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 440 break; 441 case PCI_QLOGIC_ISP2312: 442 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 443 break; 444 case PCI_QLOGIC_ISP2322: 445 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 446 break; 447 case PCI_QLOGIC_ISP2422: 448 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 449 break; 450 case PCI_QLOGIC_ISP2432: 451 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 452 break; 453 case PCI_QLOGIC_ISP2532: 454 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 455 break; 456 case PCI_QLOGIC_ISP5432: 457 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 458 break; 459 case PCI_QLOGIC_ISP6312: 460 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 461 break; 462 case PCI_QLOGIC_ISP6322: 463 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 464 break; 465 case PCI_QLOGIC_ISP2031: 466 device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter"); 467 break; 468 case PCI_QLOGIC_ISP8031: 469 device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter"); 470 break; 471 default: 472 return (ENXIO); 473 } 474 if (isp_announced == 0 && bootverbose) { 475 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 476 "Core Version %d.%d\n", 477 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 478 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 479 isp_announced++; 480 } 481 /* 482 * XXXX: Here is where we might load the f/w module 483 * XXXX: (or increase a reference count to it). 484 */ 485 return (BUS_PROBE_DEFAULT); 486 } 487 488 static void 489 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 490 { 491 int tval; 492 493 tval = 0; 494 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 495 isp->isp_confopts |= ISP_CFG_NORELOAD; 496 } 497 tval = 0; 498 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 499 isp->isp_confopts |= ISP_CFG_NONVRAM; 500 } 501 tval = 0; 502 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 503 if (tval) { 504 isp->isp_dblev = tval; 505 } else { 506 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 507 } 508 if (bootverbose) { 509 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 510 } 511 tval = -1; 512 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 513 if (tval > 0 && tval <= 254) { 514 isp_nvports = tval; 515 } 516 tval = 7; 517 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 518 isp_quickboot_time = tval; 519 } 520 521 static void 522 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 523 { 524 const char *sptr; 525 int tval = 0; 526 char prefix[12], name[16]; 527 528 if (chan == 0) 529 prefix[0] = 0; 530 else 531 snprintf(prefix, sizeof(prefix), "chan%d.", chan); 532 snprintf(name, sizeof(name), "%siid", prefix); 533 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 534 name, &tval)) { 535 if (IS_FC(isp)) { 536 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 537 } else { 538 #ifdef __sparc64__ 539 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 540 #else 541 ISP_SPI_PC(isp, chan)->iid = 7; 542 #endif 543 } 544 } else { 545 if (IS_FC(isp)) { 546 ISP_FC_PC(isp, chan)->default_id = tval - chan; 547 } else { 548 ISP_SPI_PC(isp, chan)->iid = tval; 549 } 550 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 551 } 552 553 if (IS_SCSI(isp)) 554 return; 555 556 tval = -1; 557 snprintf(name, sizeof(name), "%srole", prefix); 558 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 559 name, &tval) == 0) { 560 switch (tval) { 561 case ISP_ROLE_NONE: 562 case ISP_ROLE_INITIATOR: 563 case ISP_ROLE_TARGET: 564 case ISP_ROLE_BOTH: 565 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); 566 break; 567 default: 568 tval = -1; 569 break; 570 } 571 } 572 if (tval == -1) { 573 tval = ISP_DEFAULT_ROLES; 574 } 575 ISP_FC_PC(isp, chan)->def_role = tval; 576 577 tval = 0; 578 snprintf(name, sizeof(name), "%sfullduplex", prefix); 579 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 580 name, &tval) == 0 && tval != 0) { 581 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 582 } 583 sptr = 0; 584 snprintf(name, sizeof(name), "%stopology", prefix); 585 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 586 name, (const char **) &sptr) == 0 && sptr != 0) { 587 if (strcmp(sptr, "lport") == 0) { 588 isp->isp_confopts |= ISP_CFG_LPORT; 589 } else if (strcmp(sptr, "nport") == 0) { 590 isp->isp_confopts |= ISP_CFG_NPORT; 591 } else if (strcmp(sptr, "lport-only") == 0) { 592 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 593 } else if (strcmp(sptr, "nport-only") == 0) { 594 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 595 } 596 } 597 598 tval = 0; 599 snprintf(name, sizeof(name), "%snofctape", prefix); 600 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 601 name, &tval); 602 if (tval) { 603 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 604 } 605 606 tval = 0; 607 snprintf(name, sizeof(name), "%sfctape", prefix); 608 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 609 name, &tval); 610 if (tval) { 611 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; 612 isp->isp_confopts |= ISP_CFG_FCTAPE; 613 } 614 615 616 /* 617 * Because the resource_*_value functions can neither return 618 * 64 bit integer values, nor can they be directly coerced 619 * to interpret the right hand side of the assignment as 620 * you want them to interpret it, we have to force WWN 621 * hint replacement to specify WWN strings with a leading 622 * 'w' (e..g w50000000aaaa0001). Sigh. 623 */ 624 sptr = 0; 625 snprintf(name, sizeof(name), "%sportwwn", prefix); 626 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 627 name, (const char **) &sptr); 628 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 629 char *eptr = 0; 630 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 631 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 632 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 633 ISP_FC_PC(isp, chan)->def_wwpn = 0; 634 } 635 } 636 637 sptr = 0; 638 snprintf(name, sizeof(name), "%snodewwn", prefix); 639 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 640 name, (const char **) &sptr); 641 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 642 char *eptr = 0; 643 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 644 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 645 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 646 ISP_FC_PC(isp, chan)->def_wwnn = 0; 647 } 648 } 649 650 tval = -1; 651 snprintf(name, sizeof(name), "%sloop_down_limit", prefix); 652 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 653 name, &tval); 654 if (tval >= 0 && tval < 0xffff) { 655 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 656 } else { 657 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 658 } 659 660 tval = -1; 661 snprintf(name, sizeof(name), "%sgone_device_time", prefix); 662 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 663 name, &tval); 664 if (tval >= 0 && tval < 0xffff) { 665 ISP_FC_PC(isp, chan)->gone_device_time = tval; 666 } else { 667 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 668 } 669 } 670 671 static int 672 isp_pci_attach(device_t dev) 673 { 674 int i, locksetup = 0; 675 uint32_t data, cmd, linesz, did; 676 struct isp_pcisoftc *pcs; 677 ispsoftc_t *isp; 678 size_t psize, xsize; 679 char fwname[32]; 680 681 pcs = device_get_softc(dev); 682 if (pcs == NULL) { 683 device_printf(dev, "cannot get softc\n"); 684 return (ENOMEM); 685 } 686 memset(pcs, 0, sizeof (*pcs)); 687 688 pcs->pci_dev = dev; 689 isp = &pcs->pci_isp; 690 isp->isp_dev = dev; 691 isp->isp_nchan = 1; 692 if (sizeof (bus_addr_t) > 4) 693 isp->isp_osinfo.sixtyfourbit = 1; 694 695 /* 696 * Get Generic Options 697 */ 698 isp_nvports = 0; 699 isp_get_generic_options(dev, isp); 700 701 linesz = PCI_DFLT_LNSZ; 702 pcs->irq = pcs->regs = pcs->regs2 = NULL; 703 pcs->rgd = pcs->rtp = pcs->iqd = 0; 704 705 pcs->pci_dev = dev; 706 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 707 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 708 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 709 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 710 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 711 712 switch (pci_get_devid(dev)) { 713 case PCI_QLOGIC_ISP1020: 714 did = 0x1040; 715 isp->isp_mdvec = &mdvec; 716 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 717 break; 718 case PCI_QLOGIC_ISP1080: 719 did = 0x1080; 720 isp->isp_mdvec = &mdvec_1080; 721 isp->isp_type = ISP_HA_SCSI_1080; 722 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 723 break; 724 case PCI_QLOGIC_ISP1240: 725 did = 0x1080; 726 isp->isp_mdvec = &mdvec_1080; 727 isp->isp_type = ISP_HA_SCSI_1240; 728 isp->isp_nchan = 2; 729 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 730 break; 731 case PCI_QLOGIC_ISP1280: 732 did = 0x1080; 733 isp->isp_mdvec = &mdvec_1080; 734 isp->isp_type = ISP_HA_SCSI_1280; 735 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 736 break; 737 case PCI_QLOGIC_ISP10160: 738 did = 0x12160; 739 isp->isp_mdvec = &mdvec_12160; 740 isp->isp_type = ISP_HA_SCSI_10160; 741 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 742 break; 743 case PCI_QLOGIC_ISP12160: 744 did = 0x12160; 745 isp->isp_nchan = 2; 746 isp->isp_mdvec = &mdvec_12160; 747 isp->isp_type = ISP_HA_SCSI_12160; 748 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 749 break; 750 case PCI_QLOGIC_ISP2100: 751 did = 0x2100; 752 isp->isp_mdvec = &mdvec_2100; 753 isp->isp_type = ISP_HA_FC_2100; 754 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 755 if (pci_get_revid(dev) < 3) { 756 /* 757 * XXX: Need to get the actual revision 758 * XXX: number of the 2100 FB. At any rate, 759 * XXX: lower cache line size for early revision 760 * XXX; boards. 761 */ 762 linesz = 1; 763 } 764 break; 765 case PCI_QLOGIC_ISP2200: 766 did = 0x2200; 767 isp->isp_mdvec = &mdvec_2200; 768 isp->isp_type = ISP_HA_FC_2200; 769 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 770 break; 771 case PCI_QLOGIC_ISP2300: 772 did = 0x2300; 773 isp->isp_mdvec = &mdvec_2300; 774 isp->isp_type = ISP_HA_FC_2300; 775 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 776 break; 777 case PCI_QLOGIC_ISP2312: 778 case PCI_QLOGIC_ISP6312: 779 did = 0x2300; 780 isp->isp_mdvec = &mdvec_2300; 781 isp->isp_type = ISP_HA_FC_2312; 782 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 783 break; 784 case PCI_QLOGIC_ISP2322: 785 case PCI_QLOGIC_ISP6322: 786 did = 0x2322; 787 isp->isp_mdvec = &mdvec_2300; 788 isp->isp_type = ISP_HA_FC_2322; 789 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 790 break; 791 case PCI_QLOGIC_ISP2422: 792 case PCI_QLOGIC_ISP2432: 793 did = 0x2400; 794 isp->isp_nchan += isp_nvports; 795 isp->isp_mdvec = &mdvec_2400; 796 isp->isp_type = ISP_HA_FC_2400; 797 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 798 break; 799 case PCI_QLOGIC_ISP2532: 800 did = 0x2500; 801 isp->isp_nchan += isp_nvports; 802 isp->isp_mdvec = &mdvec_2500; 803 isp->isp_type = ISP_HA_FC_2500; 804 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 805 break; 806 case PCI_QLOGIC_ISP5432: 807 did = 0x2500; 808 isp->isp_mdvec = &mdvec_2500; 809 isp->isp_type = ISP_HA_FC_2500; 810 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 811 break; 812 case PCI_QLOGIC_ISP2031: 813 case PCI_QLOGIC_ISP8031: 814 did = 0x2600; 815 isp->isp_nchan += isp_nvports; 816 isp->isp_mdvec = &mdvec_2600; 817 isp->isp_type = ISP_HA_FC_2600; 818 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 819 break; 820 default: 821 device_printf(dev, "unknown device type\n"); 822 goto bad; 823 break; 824 } 825 isp->isp_revision = pci_get_revid(dev); 826 827 if (IS_26XX(isp)) { 828 pcs->rtp = SYS_RES_MEMORY; 829 pcs->rgd = PCIR_BAR(0); 830 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 831 RF_ACTIVE); 832 pcs->rtp2 = SYS_RES_MEMORY; 833 pcs->rgd2 = PCIR_BAR(4); 834 pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2, 835 RF_ACTIVE); 836 } else { 837 pcs->rtp = SYS_RES_MEMORY; 838 pcs->rgd = PCIR_BAR(1); 839 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 840 RF_ACTIVE); 841 if (pcs->regs == NULL) { 842 pcs->rtp = SYS_RES_IOPORT; 843 pcs->rgd = PCIR_BAR(0); 844 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, 845 &pcs->rgd, RF_ACTIVE); 846 } 847 } 848 if (pcs->regs == NULL) { 849 device_printf(dev, "Unable to map any ports\n"); 850 goto bad; 851 } 852 if (bootverbose) { 853 device_printf(dev, "Using %s space register mapping\n", 854 (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory"); 855 } 856 isp->isp_regs = pcs->regs; 857 isp->isp_regs2 = pcs->regs2; 858 859 if (IS_FC(isp)) { 860 psize = sizeof (fcparam); 861 xsize = sizeof (struct isp_fc); 862 } else { 863 psize = sizeof (sdparam); 864 xsize = sizeof (struct isp_spi); 865 } 866 psize *= isp->isp_nchan; 867 xsize *= isp->isp_nchan; 868 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 869 if (isp->isp_param == NULL) { 870 device_printf(dev, "cannot allocate parameter data\n"); 871 goto bad; 872 } 873 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 874 if (isp->isp_osinfo.pc.ptr == NULL) { 875 device_printf(dev, "cannot allocate parameter data\n"); 876 goto bad; 877 } 878 879 /* 880 * Now that we know who we are (roughly) get/set specific options 881 */ 882 for (i = 0; i < isp->isp_nchan; i++) { 883 isp_get_specific_options(dev, i, isp); 884 } 885 886 isp->isp_osinfo.fw = NULL; 887 if (isp->isp_osinfo.fw == NULL) { 888 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 889 isp->isp_osinfo.fw = firmware_get(fwname); 890 } 891 if (isp->isp_osinfo.fw != NULL) { 892 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); 893 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 894 } 895 896 /* 897 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. 898 */ 899 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 900 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 901 if (IS_2300(isp)) { /* per QLogic errata */ 902 cmd &= ~PCIM_CMD_INVEN; 903 } 904 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 905 cmd &= ~PCIM_CMD_INTX_DISABLE; 906 } 907 if (IS_24XX(isp)) { 908 cmd &= ~PCIM_CMD_INTX_DISABLE; 909 } 910 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 911 912 /* 913 * Make sure the Cache Line Size register is set sensibly. 914 */ 915 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 916 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 917 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); 918 data = linesz; 919 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 920 } 921 922 /* 923 * Make sure the Latency Timer is sane. 924 */ 925 data = pci_read_config(dev, PCIR_LATTIMER, 1); 926 if (data < PCI_DFLT_LTNCY) { 927 data = PCI_DFLT_LTNCY; 928 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); 929 pci_write_config(dev, PCIR_LATTIMER, data, 1); 930 } 931 932 /* 933 * Make sure we've disabled the ROM. 934 */ 935 data = pci_read_config(dev, PCIR_ROMADDR, 4); 936 data &= ~1; 937 pci_write_config(dev, PCIR_ROMADDR, data, 4); 938 939 /* 940 * Do MSI 941 * 942 * NB: MSI-X needs to be disabled for the 2432 (PCI-Express) 943 */ 944 if (IS_24XX(isp) || IS_2322(isp)) { 945 pcs->msicount = pci_msi_count(dev); 946 if (pcs->msicount > 1) { 947 pcs->msicount = 1; 948 } 949 if (pci_alloc_msi(dev, &pcs->msicount) == 0) { 950 pcs->iqd = 1; 951 } else { 952 pcs->iqd = 0; 953 } 954 } 955 pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE); 956 if (pcs->irq == NULL) { 957 device_printf(dev, "could not allocate interrupt\n"); 958 goto bad; 959 } 960 961 /* Make sure the lock is set up. */ 962 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 963 locksetup++; 964 965 if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { 966 device_printf(dev, "could not setup interrupt\n"); 967 goto bad; 968 } 969 970 /* 971 * Last minute checks... 972 */ 973 if (IS_23XX(isp) || IS_24XX(isp)) { 974 isp->isp_port = pci_get_function(dev); 975 } 976 977 /* 978 * Make sure we're in reset state. 979 */ 980 ISP_LOCK(isp); 981 if (isp_reinit(isp, 1) != 0) { 982 ISP_UNLOCK(isp); 983 goto bad; 984 } 985 ISP_UNLOCK(isp); 986 if (isp_attach(isp)) { 987 ISP_LOCK(isp); 988 isp_uninit(isp); 989 ISP_UNLOCK(isp); 990 goto bad; 991 } 992 return (0); 993 994 bad: 995 if (pcs->ih) { 996 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 997 } 998 if (locksetup) { 999 mtx_destroy(&isp->isp_osinfo.lock); 1000 } 1001 if (pcs->irq) { 1002 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1003 } 1004 if (pcs->msicount) { 1005 pci_release_msi(dev); 1006 } 1007 if (pcs->regs) 1008 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1009 if (pcs->regs2) 1010 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 1011 if (pcs->pci_isp.isp_param) { 1012 free(pcs->pci_isp.isp_param, M_DEVBUF); 1013 pcs->pci_isp.isp_param = NULL; 1014 } 1015 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1016 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1017 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1018 } 1019 return (ENXIO); 1020 } 1021 1022 static int 1023 isp_pci_detach(device_t dev) 1024 { 1025 struct isp_pcisoftc *pcs; 1026 ispsoftc_t *isp; 1027 int status; 1028 1029 pcs = device_get_softc(dev); 1030 if (pcs == NULL) { 1031 return (ENXIO); 1032 } 1033 isp = (ispsoftc_t *) pcs; 1034 status = isp_detach(isp); 1035 if (status) 1036 return (status); 1037 ISP_LOCK(isp); 1038 isp_uninit(isp); 1039 if (pcs->ih) { 1040 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 1041 } 1042 ISP_UNLOCK(isp); 1043 mtx_destroy(&isp->isp_osinfo.lock); 1044 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1045 if (pcs->msicount) { 1046 pci_release_msi(dev); 1047 } 1048 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1049 if (pcs->regs2) 1050 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 1051 /* 1052 * XXX: THERE IS A LOT OF LEAKAGE HERE 1053 */ 1054 if (pcs->pci_isp.isp_param) { 1055 free(pcs->pci_isp.isp_param, M_DEVBUF); 1056 pcs->pci_isp.isp_param = NULL; 1057 } 1058 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1059 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1060 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1061 } 1062 return (0); 1063 } 1064 1065 #define IspVirt2Off(a, x) \ 1066 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1067 _BLK_REG_SHFT] + ((x) & 0xfff)) 1068 1069 #define BXR2(isp, off) bus_read_2((isp)->isp_regs, (off)) 1070 #define BXW2(isp, off, v) bus_write_2((isp)->isp_regs, (off), (v)) 1071 #define BXR4(isp, off) bus_read_4((isp)->isp_regs, (off)) 1072 #define BXW4(isp, off, v) bus_write_4((isp)->isp_regs, (off), (v)) 1073 #define B2R4(isp, off) bus_read_4((isp)->isp_regs2, (off)) 1074 #define B2W4(isp, off, v) bus_write_4((isp)->isp_regs2, (off), (v)) 1075 1076 static ISP_INLINE int 1077 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1078 { 1079 uint32_t val0, val1; 1080 int i = 0; 1081 1082 do { 1083 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1084 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1085 } while (val0 != val1 && ++i < 1000); 1086 if (val0 != val1) { 1087 return (1); 1088 } 1089 *rp = val0; 1090 return (0); 1091 } 1092 1093 static int 1094 isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1095 { 1096 uint16_t isr, sema; 1097 1098 if (IS_2100(isp)) { 1099 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1100 return (0); 1101 } 1102 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1103 return (0); 1104 } 1105 } else { 1106 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1107 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1108 } 1109 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1110 isr &= INT_PENDING_MASK(isp); 1111 sema &= BIU_SEMA_LOCK; 1112 if (isr == 0 && sema == 0) { 1113 return (0); 1114 } 1115 *isrp = isr; 1116 if ((*semap = sema) != 0) { 1117 if (IS_2100(isp)) { 1118 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, info)) { 1119 return (0); 1120 } 1121 } else { 1122 *info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1123 } 1124 } 1125 return (1); 1126 } 1127 1128 static int 1129 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1130 { 1131 uint32_t hccr, r2hisr; 1132 1133 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1134 *isrp = 0; 1135 return (0); 1136 } 1137 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1138 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1139 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1140 *isrp = 0; 1141 return (0); 1142 } 1143 switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { 1144 case ISPR2HST_ROM_MBX_OK: 1145 case ISPR2HST_ROM_MBX_FAIL: 1146 case ISPR2HST_MBX_OK: 1147 case ISPR2HST_MBX_FAIL: 1148 case ISPR2HST_ASYNC_EVENT: 1149 *semap = 1; 1150 break; 1151 case ISPR2HST_RIO_16: 1152 *info = ASYNC_RIO16_1; 1153 *semap = 1; 1154 return (1); 1155 case ISPR2HST_FPOST: 1156 *info = ASYNC_CMD_CMPLT; 1157 *semap = 1; 1158 return (1); 1159 case ISPR2HST_FPOST_CTIO: 1160 *info = ASYNC_CTIO_DONE; 1161 *semap = 1; 1162 return (1); 1163 case ISPR2HST_RSPQ_UPDATE: 1164 *semap = 0; 1165 break; 1166 default: 1167 hccr = ISP_READ(isp, HCCR); 1168 if (hccr & HCCR_PAUSE) { 1169 ISP_WRITE(isp, HCCR, HCCR_RESET); 1170 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1171 ISP_WRITE(isp, BIU_ICR, 0); 1172 } else { 1173 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1174 } 1175 return (0); 1176 } 1177 *info = (r2hisr >> 16); 1178 return (1); 1179 } 1180 1181 static int 1182 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1183 { 1184 uint32_t r2hisr; 1185 1186 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1187 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1188 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1189 *isrp = 0; 1190 return (0); 1191 } 1192 switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { 1193 case ISPR2HST_ROM_MBX_OK: 1194 case ISPR2HST_ROM_MBX_FAIL: 1195 case ISPR2HST_MBX_OK: 1196 case ISPR2HST_MBX_FAIL: 1197 case ISPR2HST_ASYNC_EVENT: 1198 *semap = 1; 1199 break; 1200 case ISPR2HST_RSPQ_UPDATE: 1201 case ISPR2HST_RSPQ_UPDATE2: 1202 case ISPR2HST_ATIO_UPDATE: 1203 case ISPR2HST_ATIO_RSPQ_UPDATE: 1204 case ISPR2HST_ATIO_UPDATE2: 1205 *semap = 0; 1206 break; 1207 default: 1208 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1209 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1210 return (0); 1211 } 1212 *info = (r2hisr >> 16); 1213 return (1); 1214 } 1215 1216 static uint32_t 1217 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1218 { 1219 uint16_t rv; 1220 int oldconf = 0; 1221 1222 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1223 /* 1224 * We will assume that someone has paused the RISC processor. 1225 */ 1226 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1227 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1228 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1229 } 1230 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1231 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1232 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1233 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1234 } 1235 return (rv); 1236 } 1237 1238 static void 1239 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1240 { 1241 int oldconf = 0; 1242 1243 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1244 /* 1245 * We will assume that someone has paused the RISC processor. 1246 */ 1247 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1248 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1249 oldconf | BIU_PCI_CONF1_SXP); 1250 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1251 } 1252 BXW2(isp, IspVirt2Off(isp, regoff), val); 1253 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1254 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1255 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1256 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1257 } 1258 1259 } 1260 1261 static uint32_t 1262 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1263 { 1264 uint32_t rv, oc = 0; 1265 1266 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1267 uint32_t tc; 1268 /* 1269 * We will assume that someone has paused the RISC processor. 1270 */ 1271 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1272 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1273 if (regoff & SXP_BANK1_SELECT) 1274 tc |= BIU_PCI1080_CONF1_SXP1; 1275 else 1276 tc |= BIU_PCI1080_CONF1_SXP0; 1277 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1278 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1279 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1280 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1281 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1282 oc | BIU_PCI1080_CONF1_DMA); 1283 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1284 } 1285 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1286 if (oc) { 1287 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1288 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1289 } 1290 return (rv); 1291 } 1292 1293 static void 1294 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1295 { 1296 int oc = 0; 1297 1298 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1299 uint32_t tc; 1300 /* 1301 * We will assume that someone has paused the RISC processor. 1302 */ 1303 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1304 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1305 if (regoff & SXP_BANK1_SELECT) 1306 tc |= BIU_PCI1080_CONF1_SXP1; 1307 else 1308 tc |= BIU_PCI1080_CONF1_SXP0; 1309 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1310 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1311 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1312 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1313 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1314 oc | BIU_PCI1080_CONF1_DMA); 1315 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1316 } 1317 BXW2(isp, IspVirt2Off(isp, regoff), val); 1318 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1319 if (oc) { 1320 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1321 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1322 } 1323 } 1324 1325 static uint32_t 1326 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1327 { 1328 uint32_t rv; 1329 int block = regoff & _BLK_REG_MASK; 1330 1331 switch (block) { 1332 case BIU_BLOCK: 1333 break; 1334 case MBOX_BLOCK: 1335 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1336 case SXP_BLOCK: 1337 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK read at 0x%x", regoff); 1338 return (0xffffffff); 1339 case RISC_BLOCK: 1340 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK read at 0x%x", regoff); 1341 return (0xffffffff); 1342 case DMA_BLOCK: 1343 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK read at 0x%x", regoff); 1344 return (0xffffffff); 1345 default: 1346 isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff); 1347 return (0xffffffff); 1348 } 1349 1350 switch (regoff) { 1351 case BIU2400_FLASH_ADDR: 1352 case BIU2400_FLASH_DATA: 1353 case BIU2400_ICR: 1354 case BIU2400_ISR: 1355 case BIU2400_CSR: 1356 case BIU2400_REQINP: 1357 case BIU2400_REQOUTP: 1358 case BIU2400_RSPINP: 1359 case BIU2400_RSPOUTP: 1360 case BIU2400_PRI_REQINP: 1361 case BIU2400_PRI_REQOUTP: 1362 case BIU2400_ATIO_RSPINP: 1363 case BIU2400_ATIO_RSPOUTP: 1364 case BIU2400_HCCR: 1365 case BIU2400_GPIOD: 1366 case BIU2400_GPIOE: 1367 case BIU2400_HSEMA: 1368 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1369 break; 1370 case BIU2400_R2HSTSLO: 1371 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1372 break; 1373 case BIU2400_R2HSTSHI: 1374 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1375 break; 1376 default: 1377 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1378 regoff); 1379 rv = 0xffffffff; 1380 break; 1381 } 1382 return (rv); 1383 } 1384 1385 static void 1386 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1387 { 1388 int block = regoff & _BLK_REG_MASK; 1389 1390 switch (block) { 1391 case BIU_BLOCK: 1392 break; 1393 case MBOX_BLOCK: 1394 BXW2(isp, IspVirt2Off(isp, regoff), val); 1395 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1396 return; 1397 case SXP_BLOCK: 1398 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK write at 0x%x", regoff); 1399 return; 1400 case RISC_BLOCK: 1401 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK write at 0x%x", regoff); 1402 return; 1403 case DMA_BLOCK: 1404 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK write at 0x%x", regoff); 1405 return; 1406 default: 1407 isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff); 1408 break; 1409 } 1410 1411 switch (regoff) { 1412 case BIU2400_FLASH_ADDR: 1413 case BIU2400_FLASH_DATA: 1414 case BIU2400_ICR: 1415 case BIU2400_ISR: 1416 case BIU2400_CSR: 1417 case BIU2400_REQINP: 1418 case BIU2400_REQOUTP: 1419 case BIU2400_RSPINP: 1420 case BIU2400_RSPOUTP: 1421 case BIU2400_PRI_REQINP: 1422 case BIU2400_PRI_REQOUTP: 1423 case BIU2400_ATIO_RSPINP: 1424 case BIU2400_ATIO_RSPOUTP: 1425 case BIU2400_HCCR: 1426 case BIU2400_GPIOD: 1427 case BIU2400_GPIOE: 1428 case BIU2400_HSEMA: 1429 BXW4(isp, IspVirt2Off(isp, regoff), val); 1430 #ifdef MEMORYBARRIERW 1431 if (regoff == BIU2400_REQINP || 1432 regoff == BIU2400_RSPOUTP || 1433 regoff == BIU2400_PRI_REQINP || 1434 regoff == BIU2400_ATIO_RSPOUTP) 1435 MEMORYBARRIERW(isp, SYNC_REG, 1436 IspVirt2Off(isp, regoff), 4, -1) 1437 else 1438 #endif 1439 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1440 break; 1441 default: 1442 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1443 regoff); 1444 break; 1445 } 1446 } 1447 1448 static uint32_t 1449 isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff) 1450 { 1451 uint32_t rv; 1452 1453 switch (regoff) { 1454 case BIU2400_PRI_REQINP: 1455 case BIU2400_PRI_REQOUTP: 1456 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1457 regoff); 1458 rv = 0xffffffff; 1459 break; 1460 case BIU2400_REQINP: 1461 rv = B2R4(isp, 0x00); 1462 break; 1463 case BIU2400_REQOUTP: 1464 rv = B2R4(isp, 0x04); 1465 break; 1466 case BIU2400_RSPINP: 1467 rv = B2R4(isp, 0x08); 1468 break; 1469 case BIU2400_RSPOUTP: 1470 rv = B2R4(isp, 0x0c); 1471 break; 1472 case BIU2400_ATIO_RSPINP: 1473 rv = B2R4(isp, 0x10); 1474 break; 1475 case BIU2400_ATIO_RSPOUTP: 1476 rv = B2R4(isp, 0x14); 1477 break; 1478 default: 1479 rv = isp_pci_rd_reg_2400(isp, regoff); 1480 break; 1481 } 1482 return (rv); 1483 } 1484 1485 static void 1486 isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val) 1487 { 1488 int off; 1489 1490 switch (regoff) { 1491 case BIU2400_PRI_REQINP: 1492 case BIU2400_PRI_REQOUTP: 1493 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1494 regoff); 1495 return; 1496 case BIU2400_REQINP: 1497 off = 0x00; 1498 break; 1499 case BIU2400_REQOUTP: 1500 off = 0x04; 1501 break; 1502 case BIU2400_RSPINP: 1503 off = 0x08; 1504 break; 1505 case BIU2400_RSPOUTP: 1506 off = 0x0c; 1507 break; 1508 case BIU2400_ATIO_RSPINP: 1509 off = 0x10; 1510 break; 1511 case BIU2400_ATIO_RSPOUTP: 1512 off = 0x14; 1513 break; 1514 default: 1515 isp_pci_wr_reg_2400(isp, regoff, val); 1516 return; 1517 } 1518 B2W4(isp, off, val); 1519 } 1520 1521 1522 struct imush { 1523 ispsoftc_t *isp; 1524 caddr_t vbase; 1525 int chan; 1526 int error; 1527 }; 1528 1529 static void imc(void *, bus_dma_segment_t *, int, int); 1530 static void imc1(void *, bus_dma_segment_t *, int, int); 1531 1532 static void 1533 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1534 { 1535 struct imush *imushp = (struct imush *) arg; 1536 isp_ecmd_t *ecmd; 1537 1538 if (error) { 1539 imushp->error = error; 1540 return; 1541 } 1542 if (nseg != 1) { 1543 imushp->error = EINVAL; 1544 return; 1545 } 1546 isp_prt(imushp->isp, ISP_LOGDEBUG0, "request/result area @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1547 1548 imushp->isp->isp_rquest = imushp->vbase; 1549 imushp->isp->isp_rquest_dma = segs->ds_addr; 1550 segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1551 imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1552 1553 imushp->isp->isp_result_dma = segs->ds_addr; 1554 imushp->isp->isp_result = imushp->vbase; 1555 segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1556 imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1557 1558 if (imushp->isp->isp_type >= ISP_HA_FC_2200) { 1559 imushp->isp->isp_osinfo.ecmd_dma = segs->ds_addr; 1560 imushp->isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)imushp->vbase; 1561 imushp->isp->isp_osinfo.ecmd_base = imushp->isp->isp_osinfo.ecmd_free; 1562 for (ecmd = imushp->isp->isp_osinfo.ecmd_free; ecmd < &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { 1563 if (ecmd == &imushp->isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) { 1564 ecmd->next = NULL; 1565 } else { 1566 ecmd->next = ecmd + 1; 1567 } 1568 } 1569 } 1570 #ifdef ISP_TARGET_MODE 1571 segs->ds_addr += (N_XCMDS * XCMD_SIZE); 1572 imushp->vbase += (N_XCMDS * XCMD_SIZE); 1573 if (IS_24XX(imushp->isp)) { 1574 imushp->isp->isp_atioq_dma = segs->ds_addr; 1575 imushp->isp->isp_atioq = imushp->vbase; 1576 } 1577 #endif 1578 } 1579 1580 static void 1581 imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1582 { 1583 struct imush *imushp = (struct imush *) arg; 1584 if (error) { 1585 imushp->error = error; 1586 return; 1587 } 1588 if (nseg != 1) { 1589 imushp->error = EINVAL; 1590 return; 1591 } 1592 isp_prt(imushp->isp, ISP_LOGDEBUG0, "scdma @ 0x%jx/0x%jx", (uintmax_t) segs->ds_addr, (uintmax_t) segs->ds_len); 1593 FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr; 1594 FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase; 1595 } 1596 1597 static int 1598 isp_pci_mbxdma(ispsoftc_t *isp) 1599 { 1600 caddr_t base; 1601 uint32_t len, nsegs; 1602 int i, error, cmap = 0; 1603 bus_size_t slim; /* segment size */ 1604 bus_addr_t llim; /* low limit of unavailable dma */ 1605 bus_addr_t hlim; /* high limit of unavailable dma */ 1606 struct imush im; 1607 1608 /* 1609 * Already been here? If so, leave... 1610 */ 1611 if (isp->isp_rquest) { 1612 return (0); 1613 } 1614 ISP_UNLOCK(isp); 1615 1616 if (isp->isp_maxcmds == 0) { 1617 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1618 ISP_LOCK(isp); 1619 return (1); 1620 } 1621 1622 hlim = BUS_SPACE_MAXADDR; 1623 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1624 if (sizeof (bus_size_t) > 4) { 1625 slim = (bus_size_t) (1ULL << 32); 1626 } else { 1627 slim = (bus_size_t) (1UL << 31); 1628 } 1629 llim = BUS_SPACE_MAXADDR; 1630 } else { 1631 llim = BUS_SPACE_MAXADDR_32BIT; 1632 slim = (1UL << 24); 1633 } 1634 1635 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1636 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1637 if (isp->isp_osinfo.pcmd_pool == NULL) { 1638 isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds"); 1639 ISP_LOCK(isp); 1640 return (1); 1641 } 1642 1643 if (isp->isp_osinfo.sixtyfourbit) { 1644 nsegs = ISP_NSEG64_MAX; 1645 } else { 1646 nsegs = ISP_NSEG_MAX; 1647 } 1648 1649 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, &isp->isp_osinfo.dmat)) { 1650 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1651 ISP_LOCK(isp); 1652 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1653 return (1); 1654 } 1655 1656 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1657 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1658 if (isp->isp_xflist == NULL) { 1659 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1660 ISP_LOCK(isp); 1661 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1662 return (1); 1663 } 1664 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1665 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1666 } 1667 isp->isp_xffree = isp->isp_xflist; 1668 #ifdef ISP_TARGET_MODE 1669 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1670 isp->isp_tgtlist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1671 if (isp->isp_tgtlist == NULL) { 1672 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1673 free(isp->isp_xflist, M_DEVBUF); 1674 ISP_LOCK(isp); 1675 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1676 return (1); 1677 } 1678 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1679 isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1]; 1680 } 1681 isp->isp_tgtfree = isp->isp_tgtlist; 1682 #endif 1683 1684 /* 1685 * Allocate and map the request and result queues (and ATIO queue 1686 * if we're a 2400 supporting target mode), and a region for 1687 * external dma addressable command/status structures (23XX and 1688 * later). 1689 */ 1690 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1691 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1692 #ifdef ISP_TARGET_MODE 1693 if (IS_24XX(isp)) { 1694 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1695 } 1696 #endif 1697 if (isp->isp_type >= ISP_HA_FC_2200) { 1698 len += (N_XCMDS * XCMD_SIZE); 1699 } 1700 1701 /* 1702 * Create a tag for the control spaces. We don't always need this 1703 * to be 32 bits, but we do this for simplicity and speed's sake. 1704 */ 1705 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, slim, 0, &isp->isp_osinfo.cdmat)) { 1706 isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); 1707 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1708 free(isp->isp_xflist, M_DEVBUF); 1709 #ifdef ISP_TARGET_MODE 1710 free(isp->isp_tgtlist, M_DEVBUF); 1711 #endif 1712 ISP_LOCK(isp); 1713 return (1); 1714 } 1715 1716 if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) { 1717 isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); 1718 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1719 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1720 free(isp->isp_xflist, M_DEVBUF); 1721 #ifdef ISP_TARGET_MODE 1722 free(isp->isp_tgtlist, M_DEVBUF); 1723 #endif 1724 ISP_LOCK(isp); 1725 return (1); 1726 } 1727 1728 im.isp = isp; 1729 im.chan = 0; 1730 im.vbase = base; 1731 im.error = 0; 1732 1733 bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); 1734 if (im.error) { 1735 isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); 1736 goto bad; 1737 } 1738 1739 if (IS_FC(isp)) { 1740 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1741 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1742 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) { 1743 goto bad; 1744 } 1745 if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &fc->tdmap) != 0) { 1746 bus_dma_tag_destroy(fc->tdmat); 1747 goto bad; 1748 } 1749 im.isp = isp; 1750 im.chan = cmap; 1751 im.vbase = base; 1752 im.error = 0; 1753 bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0); 1754 if (im.error) { 1755 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1756 bus_dma_tag_destroy(fc->tdmat); 1757 goto bad; 1758 } 1759 if (!IS_2100(isp)) { 1760 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { 1761 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); 1762 if (n == NULL) { 1763 while (fc->nexus_free_list) { 1764 n = fc->nexus_free_list; 1765 fc->nexus_free_list = n->next; 1766 free(n, M_DEVBUF); 1767 } 1768 goto bad; 1769 } 1770 n->next = fc->nexus_free_list; 1771 fc->nexus_free_list = n; 1772 } 1773 } 1774 } 1775 } 1776 1777 for (i = 0; i < isp->isp_maxcmds; i++) { 1778 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1779 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1780 if (error) { 1781 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1782 while (--i >= 0) { 1783 bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); 1784 } 1785 goto bad; 1786 } 1787 callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); 1788 if (i == isp->isp_maxcmds-1) { 1789 pcmd->next = NULL; 1790 } else { 1791 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1792 } 1793 } 1794 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1795 ISP_LOCK(isp); 1796 return (0); 1797 1798 bad: 1799 while (--cmap >= 0) { 1800 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1801 bus_dmamap_unload(fc->tdmat, fc->tdmap); 1802 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1803 bus_dma_tag_destroy(fc->tdmat); 1804 while (fc->nexus_free_list) { 1805 struct isp_nexus *n = fc->nexus_free_list; 1806 fc->nexus_free_list = n->next; 1807 free(n, M_DEVBUF); 1808 } 1809 } 1810 if (isp->isp_rquest_dma != 0) 1811 bus_dmamap_unload(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap); 1812 bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); 1813 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1814 free(isp->isp_xflist, M_DEVBUF); 1815 #ifdef ISP_TARGET_MODE 1816 free(isp->isp_tgtlist, M_DEVBUF); 1817 #endif 1818 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1819 isp->isp_rquest = NULL; 1820 ISP_LOCK(isp); 1821 return (1); 1822 } 1823 1824 typedef struct { 1825 ispsoftc_t *isp; 1826 void *cmd_token; 1827 void *rq; /* original request */ 1828 int error; 1829 bus_size_t mapsize; 1830 } mush_t; 1831 1832 #define MUSHERR_NOQENTRIES -2 1833 1834 #ifdef ISP_TARGET_MODE 1835 static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1836 static void tdma2(void *, bus_dma_segment_t *, int, int); 1837 1838 static void 1839 tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1840 { 1841 mush_t *mp; 1842 mp = (mush_t *)arg; 1843 mp->mapsize = mapsize; 1844 tdma2(arg, dm_segs, nseg, error); 1845 } 1846 1847 static void 1848 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1849 { 1850 mush_t *mp; 1851 ispsoftc_t *isp; 1852 struct ccb_scsiio *csio; 1853 isp_ddir_t ddir; 1854 ispreq_t *rq; 1855 1856 mp = (mush_t *) arg; 1857 if (error) { 1858 mp->error = error; 1859 return; 1860 } 1861 csio = mp->cmd_token; 1862 isp = mp->isp; 1863 rq = mp->rq; 1864 if (nseg) { 1865 if (isp->isp_osinfo.sixtyfourbit) { 1866 if (nseg >= ISP_NSEG64_MAX) { 1867 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1868 mp->error = EFAULT; 1869 return; 1870 } 1871 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1872 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1873 } 1874 } else { 1875 if (nseg >= ISP_NSEG_MAX) { 1876 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1877 mp->error = EFAULT; 1878 return; 1879 } 1880 } 1881 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1882 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1883 ddir = ISP_TO_DEVICE; 1884 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1885 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1886 ddir = ISP_FROM_DEVICE; 1887 } else { 1888 dm_segs = NULL; 1889 nseg = 0; 1890 ddir = ISP_NOXFR; 1891 } 1892 } else { 1893 dm_segs = NULL; 1894 nseg = 0; 1895 ddir = ISP_NOXFR; 1896 } 1897 1898 error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len); 1899 switch (error) { 1900 case CMD_EAGAIN: 1901 mp->error = MUSHERR_NOQENTRIES; 1902 case CMD_QUEUED: 1903 break; 1904 default: 1905 mp->error = EIO; 1906 } 1907 } 1908 #endif 1909 1910 static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1911 static void dma2(void *, bus_dma_segment_t *, int, int); 1912 1913 static void 1914 dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1915 { 1916 mush_t *mp; 1917 mp = (mush_t *)arg; 1918 mp->mapsize = mapsize; 1919 dma2(arg, dm_segs, nseg, error); 1920 } 1921 1922 static void 1923 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1924 { 1925 mush_t *mp; 1926 ispsoftc_t *isp; 1927 struct ccb_scsiio *csio; 1928 isp_ddir_t ddir; 1929 ispreq_t *rq; 1930 1931 mp = (mush_t *) arg; 1932 if (error) { 1933 mp->error = error; 1934 return; 1935 } 1936 csio = mp->cmd_token; 1937 isp = mp->isp; 1938 rq = mp->rq; 1939 if (nseg) { 1940 if (isp->isp_osinfo.sixtyfourbit) { 1941 if (nseg >= ISP_NSEG64_MAX) { 1942 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1943 mp->error = EFAULT; 1944 return; 1945 } 1946 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1947 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1948 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1949 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1950 } 1951 } else { 1952 if (nseg >= ISP_NSEG_MAX) { 1953 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1954 mp->error = EFAULT; 1955 return; 1956 } 1957 } 1958 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1959 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1960 ddir = ISP_FROM_DEVICE; 1961 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1962 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1963 ddir = ISP_TO_DEVICE; 1964 } else { 1965 ddir = ISP_NOXFR; 1966 } 1967 } else { 1968 dm_segs = NULL; 1969 nseg = 0; 1970 ddir = ISP_NOXFR; 1971 } 1972 1973 error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map); 1974 switch (error) { 1975 case CMD_EAGAIN: 1976 mp->error = MUSHERR_NOQENTRIES; 1977 break; 1978 case CMD_QUEUED: 1979 break; 1980 default: 1981 mp->error = EIO; 1982 break; 1983 } 1984 } 1985 1986 static int 1987 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1988 { 1989 mush_t mush, *mp; 1990 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1991 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); 1992 int error; 1993 1994 mp = &mush; 1995 mp->isp = isp; 1996 mp->cmd_token = csio; 1997 mp->rq = ff; 1998 mp->error = 0; 1999 mp->mapsize = 0; 2000 2001 #ifdef ISP_TARGET_MODE 2002 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2003 eptr = tdma2; 2004 eptr2 = tdma2_2; 2005 } else 2006 #endif 2007 { 2008 eptr = dma2; 2009 eptr2 = dma2_2; 2010 } 2011 2012 2013 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 2014 (union ccb *)csio, eptr, mp, 0); 2015 if (error == EINPROGRESS) { 2016 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 2017 mp->error = EINVAL; 2018 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 2019 } else if (error && mp->error == 0) { 2020 #ifdef DIAGNOSTIC 2021 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 2022 #endif 2023 mp->error = error; 2024 } 2025 if (mp->error) { 2026 int retval = CMD_COMPLETE; 2027 if (mp->error == MUSHERR_NOQENTRIES) { 2028 retval = CMD_EAGAIN; 2029 } else if (mp->error == EFBIG) { 2030 csio->ccb_h.status = CAM_REQ_TOO_BIG; 2031 } else if (mp->error == EINVAL) { 2032 csio->ccb_h.status = CAM_REQ_INVALID; 2033 } else { 2034 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 2035 } 2036 return (retval); 2037 } 2038 return (CMD_QUEUED); 2039 } 2040 2041 static void 2042 isp_pci_reset0(ispsoftc_t *isp) 2043 { 2044 ISP_DISABLE_INTS(isp); 2045 } 2046 2047 static void 2048 isp_pci_reset1(ispsoftc_t *isp) 2049 { 2050 if (!IS_24XX(isp)) { 2051 /* Make sure the BIOS is disabled */ 2052 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2053 } 2054 /* and enable interrupts */ 2055 ISP_ENABLE_INTS(isp); 2056 } 2057 2058 static void 2059 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2060 { 2061 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2062 if (msg) 2063 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2064 else 2065 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2066 if (IS_SCSI(isp)) 2067 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2068 else 2069 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2070 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2071 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2072 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2073 2074 2075 if (IS_SCSI(isp)) { 2076 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2077 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2078 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2079 ISP_READ(isp, CDMA_FIFO_STS)); 2080 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2081 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2082 ISP_READ(isp, DDMA_FIFO_STS)); 2083 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2084 ISP_READ(isp, SXP_INTERRUPT), 2085 ISP_READ(isp, SXP_GROSS_ERR), 2086 ISP_READ(isp, SXP_PINS_CTRL)); 2087 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2088 } 2089 printf(" mbox regs: %x %x %x %x %x\n", 2090 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2091 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2092 ISP_READ(isp, OUTMAILBOX4)); 2093 printf(" PCI Status Command/Status=%x\n", 2094 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2095 } 2096