1 /*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 /* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/linker.h> 38 #include <sys/firmware.h> 39 #include <sys/bus.h> 40 #include <sys/stdint.h> 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 #include <sys/rman.h> 46 #include <sys/malloc.h> 47 #include <sys/uio.h> 48 49 #ifdef __sparc64__ 50 #include <dev/ofw/openfirm.h> 51 #include <machine/ofw_machdep.h> 52 #endif 53 54 #include <dev/isp/isp_freebsd.h> 55 56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62 static int isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 63 static int isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 64 static int isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 65 static int isp_pci_mbxdma(ispsoftc_t *); 66 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 67 68 69 static void isp_pci_reset0(ispsoftc_t *); 70 static void isp_pci_reset1(ispsoftc_t *); 71 static void isp_pci_dumpregs(ispsoftc_t *, const char *); 72 73 static struct ispmdvec mdvec = { 74 isp_pci_rd_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_common_dmateardown, 80 isp_pci_reset0, 81 isp_pci_reset1, 82 isp_pci_dumpregs, 83 NULL, 84 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 85 }; 86 87 static struct ispmdvec mdvec_1080 = { 88 isp_pci_rd_isr, 89 isp_pci_rd_reg_1080, 90 isp_pci_wr_reg_1080, 91 isp_pci_mbxdma, 92 isp_pci_dmasetup, 93 isp_common_dmateardown, 94 isp_pci_reset0, 95 isp_pci_reset1, 96 isp_pci_dumpregs, 97 NULL, 98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 99 }; 100 101 static struct ispmdvec mdvec_12160 = { 102 isp_pci_rd_isr, 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_common_dmateardown, 108 isp_pci_reset0, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 NULL, 112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 113 }; 114 115 static struct ispmdvec mdvec_2100 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg, 118 isp_pci_wr_reg, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_common_dmateardown, 122 isp_pci_reset0, 123 isp_pci_reset1, 124 isp_pci_dumpregs 125 }; 126 127 static struct ispmdvec mdvec_2200 = { 128 isp_pci_rd_isr, 129 isp_pci_rd_reg, 130 isp_pci_wr_reg, 131 isp_pci_mbxdma, 132 isp_pci_dmasetup, 133 isp_common_dmateardown, 134 isp_pci_reset0, 135 isp_pci_reset1, 136 isp_pci_dumpregs 137 }; 138 139 static struct ispmdvec mdvec_2300 = { 140 isp_pci_rd_isr_2300, 141 isp_pci_rd_reg, 142 isp_pci_wr_reg, 143 isp_pci_mbxdma, 144 isp_pci_dmasetup, 145 isp_common_dmateardown, 146 isp_pci_reset0, 147 isp_pci_reset1, 148 isp_pci_dumpregs 149 }; 150 151 static struct ispmdvec mdvec_2400 = { 152 isp_pci_rd_isr_2400, 153 isp_pci_rd_reg_2400, 154 isp_pci_wr_reg_2400, 155 isp_pci_mbxdma, 156 isp_pci_dmasetup, 157 isp_common_dmateardown, 158 isp_pci_reset0, 159 isp_pci_reset1, 160 NULL 161 }; 162 163 static struct ispmdvec mdvec_2500 = { 164 isp_pci_rd_isr_2400, 165 isp_pci_rd_reg_2400, 166 isp_pci_wr_reg_2400, 167 isp_pci_mbxdma, 168 isp_pci_dmasetup, 169 isp_common_dmateardown, 170 isp_pci_reset0, 171 isp_pci_reset1, 172 NULL 173 }; 174 175 #ifndef PCIM_CMD_INVEN 176 #define PCIM_CMD_INVEN 0x10 177 #endif 178 #ifndef PCIM_CMD_BUSMASTEREN 179 #define PCIM_CMD_BUSMASTEREN 0x0004 180 #endif 181 #ifndef PCIM_CMD_PERRESPEN 182 #define PCIM_CMD_PERRESPEN 0x0040 183 #endif 184 #ifndef PCIM_CMD_SEREN 185 #define PCIM_CMD_SEREN 0x0100 186 #endif 187 #ifndef PCIM_CMD_INTX_DISABLE 188 #define PCIM_CMD_INTX_DISABLE 0x0400 189 #endif 190 191 #ifndef PCIR_COMMAND 192 #define PCIR_COMMAND 0x04 193 #endif 194 195 #ifndef PCIR_CACHELNSZ 196 #define PCIR_CACHELNSZ 0x0c 197 #endif 198 199 #ifndef PCIR_LATTIMER 200 #define PCIR_LATTIMER 0x0d 201 #endif 202 203 #ifndef PCIR_ROMADDR 204 #define PCIR_ROMADDR 0x30 205 #endif 206 207 #ifndef PCI_VENDOR_QLOGIC 208 #define PCI_VENDOR_QLOGIC 0x1077 209 #endif 210 211 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 212 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 213 #endif 214 215 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 216 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 217 #endif 218 219 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 220 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 221 #endif 222 223 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 224 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 225 #endif 226 227 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 228 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 229 #endif 230 231 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 232 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 233 #endif 234 235 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 236 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 237 #endif 238 239 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 240 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 241 #endif 242 243 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 244 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 245 #endif 246 247 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 248 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 249 #endif 250 251 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 252 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 253 #endif 254 255 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 256 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 257 #endif 258 259 #ifndef PCI_PRODUCT_QLOGIC_ISP2432 260 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 261 #endif 262 263 #ifndef PCI_PRODUCT_QLOGIC_ISP2532 264 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 265 #endif 266 267 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 268 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 269 #endif 270 271 #ifndef PCI_PRODUCT_QLOGIC_ISP6322 272 #define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 273 #endif 274 275 #ifndef PCI_PRODUCT_QLOGIC_ISP5432 276 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 277 #endif 278 279 #define PCI_QLOGIC_ISP5432 \ 280 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 281 282 #define PCI_QLOGIC_ISP1020 \ 283 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 284 285 #define PCI_QLOGIC_ISP1080 \ 286 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 287 288 #define PCI_QLOGIC_ISP10160 \ 289 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 290 291 #define PCI_QLOGIC_ISP12160 \ 292 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 293 294 #define PCI_QLOGIC_ISP1240 \ 295 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 296 297 #define PCI_QLOGIC_ISP1280 \ 298 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 299 300 #define PCI_QLOGIC_ISP2100 \ 301 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 302 303 #define PCI_QLOGIC_ISP2200 \ 304 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 305 306 #define PCI_QLOGIC_ISP2300 \ 307 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 308 309 #define PCI_QLOGIC_ISP2312 \ 310 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 311 312 #define PCI_QLOGIC_ISP2322 \ 313 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 314 315 #define PCI_QLOGIC_ISP2422 \ 316 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 317 318 #define PCI_QLOGIC_ISP2432 \ 319 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 320 321 #define PCI_QLOGIC_ISP2532 \ 322 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 323 324 #define PCI_QLOGIC_ISP6312 \ 325 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 326 327 #define PCI_QLOGIC_ISP6322 \ 328 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 329 330 /* 331 * Odd case for some AMI raid cards... We need to *not* attach to this. 332 */ 333 #define AMI_RAID_SUBVENDOR_ID 0x101e 334 335 #define IO_MAP_REG 0x10 336 #define MEM_MAP_REG 0x14 337 338 #define PCI_DFLT_LTNCY 0x40 339 #define PCI_DFLT_LNSZ 0x10 340 341 static int isp_pci_probe (device_t); 342 static int isp_pci_attach (device_t); 343 static int isp_pci_detach (device_t); 344 345 346 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 347 struct isp_pcisoftc { 348 ispsoftc_t pci_isp; 349 device_t pci_dev; 350 struct resource * pci_reg; 351 void * ih; 352 int16_t pci_poff[_NREG_BLKS]; 353 bus_dma_tag_t dmat; 354 int msicount; 355 }; 356 357 358 static device_method_t isp_pci_methods[] = { 359 /* Device interface */ 360 DEVMETHOD(device_probe, isp_pci_probe), 361 DEVMETHOD(device_attach, isp_pci_attach), 362 DEVMETHOD(device_detach, isp_pci_detach), 363 { 0, 0 } 364 }; 365 366 static driver_t isp_pci_driver = { 367 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 368 }; 369 static devclass_t isp_devclass; 370 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 371 372 static int 373 isp_pci_probe(device_t dev) 374 { 375 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 376 case PCI_QLOGIC_ISP1020: 377 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 378 break; 379 case PCI_QLOGIC_ISP1080: 380 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 381 break; 382 case PCI_QLOGIC_ISP1240: 383 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 384 break; 385 case PCI_QLOGIC_ISP1280: 386 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 387 break; 388 case PCI_QLOGIC_ISP10160: 389 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 390 break; 391 case PCI_QLOGIC_ISP12160: 392 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 393 return (ENXIO); 394 } 395 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 396 break; 397 case PCI_QLOGIC_ISP2100: 398 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 399 break; 400 case PCI_QLOGIC_ISP2200: 401 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 402 break; 403 case PCI_QLOGIC_ISP2300: 404 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 405 break; 406 case PCI_QLOGIC_ISP2312: 407 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 408 break; 409 case PCI_QLOGIC_ISP2322: 410 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 411 break; 412 case PCI_QLOGIC_ISP2422: 413 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 414 break; 415 case PCI_QLOGIC_ISP2432: 416 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 417 break; 418 case PCI_QLOGIC_ISP2532: 419 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 420 break; 421 case PCI_QLOGIC_ISP5432: 422 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 423 break; 424 case PCI_QLOGIC_ISP6312: 425 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 426 break; 427 case PCI_QLOGIC_ISP6322: 428 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 429 break; 430 default: 431 return (ENXIO); 432 } 433 if (isp_announced == 0 && bootverbose) { 434 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 435 "Core Version %d.%d\n", 436 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 437 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 438 isp_announced++; 439 } 440 /* 441 * XXXX: Here is where we might load the f/w module 442 * XXXX: (or increase a reference count to it). 443 */ 444 return (BUS_PROBE_DEFAULT); 445 } 446 447 static void 448 isp_get_generic_options(device_t dev, ispsoftc_t *isp, int *nvp) 449 { 450 int tval; 451 452 /* 453 * Figure out if we're supposed to skip this one. 454 */ 455 tval = 0; 456 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "disable", &tval) == 0 && tval) { 457 device_printf(dev, "disabled at user request\n"); 458 isp->isp_osinfo.disabled = 1; 459 return; 460 } 461 462 tval = 0; 463 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 464 isp->isp_confopts |= ISP_CFG_NORELOAD; 465 } 466 tval = 0; 467 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 468 isp->isp_confopts |= ISP_CFG_NONVRAM; 469 } 470 tval = 0; 471 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 472 if (tval) { 473 isp->isp_dblev = tval; 474 } else { 475 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 476 } 477 if (bootverbose) { 478 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 479 } 480 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 481 if (tval > 0 && tval < 127) { 482 *nvp = tval; 483 } else { 484 *nvp = 0; 485 } 486 tval = 1; 487 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "autoconfig", &tval); 488 isp_autoconfig = tval; 489 tval = 7; 490 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 491 isp_quickboot_time = tval; 492 493 tval = 0; 494 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "forcemulti", &tval) == 0 && tval != 0) { 495 isp->isp_osinfo.forcemulti = 1; 496 } 497 } 498 499 static void 500 isp_get_pci_options(device_t dev, int *m1, int *m2) 501 { 502 int tval; 503 /* 504 * Which we should try first - memory mapping or i/o mapping? 505 * 506 * We used to try memory first followed by i/o on alpha, otherwise 507 * the reverse, but we should just try memory first all the time now. 508 */ 509 *m1 = PCIM_CMD_MEMEN; 510 *m2 = PCIM_CMD_PORTEN; 511 512 tval = 0; 513 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &tval) == 0 && tval != 0) { 514 *m1 = PCIM_CMD_PORTEN; 515 *m2 = PCIM_CMD_MEMEN; 516 } 517 tval = 0; 518 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_memmap", &tval) == 0 && tval != 0) { 519 *m1 = PCIM_CMD_MEMEN; 520 *m2 = PCIM_CMD_PORTEN; 521 } 522 } 523 524 static void 525 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 526 { 527 const char *sptr; 528 int tval; 529 530 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "iid", &tval)) { 531 if (IS_FC(isp)) { 532 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 533 } else { 534 #ifdef __sparc64__ 535 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 536 #else 537 ISP_SPI_PC(isp, chan)->iid = 7; 538 #endif 539 } 540 } else { 541 if (IS_FC(isp)) { 542 ISP_FC_PC(isp, chan)->default_id = tval - chan; 543 } else { 544 ISP_SPI_PC(isp, chan)->iid = tval; 545 } 546 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 547 } 548 549 tval = -1; 550 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "role", &tval) == 0) { 551 switch (tval) { 552 case ISP_ROLE_NONE: 553 case ISP_ROLE_INITIATOR: 554 case ISP_ROLE_TARGET: 555 case ISP_ROLE_INITIATOR|ISP_ROLE_TARGET: 556 device_printf(dev, "setting role to 0x%x\n", tval); 557 break; 558 default: 559 tval = -1; 560 break; 561 } 562 } 563 if (tval == -1) { 564 tval = ISP_DEFAULT_ROLES; 565 } 566 567 if (IS_SCSI(isp)) { 568 ISP_SPI_PC(isp, chan)->def_role = tval; 569 return; 570 } 571 ISP_FC_PC(isp, chan)->def_role = tval; 572 573 tval = 0; 574 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fullduplex", &tval) == 0 && tval != 0) { 575 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 576 } 577 sptr = 0; 578 if (resource_string_value(device_get_name(dev), device_get_unit(dev), "topology", (const char **) &sptr) == 0 && sptr != 0) { 579 if (strcmp(sptr, "lport") == 0) { 580 isp->isp_confopts |= ISP_CFG_LPORT; 581 } else if (strcmp(sptr, "nport") == 0) { 582 isp->isp_confopts |= ISP_CFG_NPORT; 583 } else if (strcmp(sptr, "lport-only") == 0) { 584 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 585 } else if (strcmp(sptr, "nport-only") == 0) { 586 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 587 } 588 } 589 590 /* 591 * Because the resource_*_value functions can neither return 592 * 64 bit integer values, nor can they be directly coerced 593 * to interpret the right hand side of the assignment as 594 * you want them to interpret it, we have to force WWN 595 * hint replacement to specify WWN strings with a leading 596 * 'w' (e..g w50000000aaaa0001). Sigh. 597 */ 598 sptr = 0; 599 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "portwwn", (const char **) &sptr); 600 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 601 char *eptr = 0; 602 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 603 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 604 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 605 ISP_FC_PC(isp, chan)->def_wwpn = 0; 606 } 607 } 608 609 sptr = 0; 610 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), "nodewwn", (const char **) &sptr); 611 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 612 char *eptr = 0; 613 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 614 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 615 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 616 ISP_FC_PC(isp, chan)->def_wwnn = 0; 617 } 618 } 619 620 tval = 0; 621 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "hysteresis", &tval); 622 if (tval >= 0 && tval < 256) { 623 ISP_FC_PC(isp, chan)->hysteresis = tval; 624 } else { 625 ISP_FC_PC(isp, chan)->hysteresis = isp_fabric_hysteresis; 626 } 627 628 tval = -1; 629 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "loop_down_limit", &tval); 630 if (tval >= 0 && tval < 0xffff) { 631 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 632 } else { 633 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 634 } 635 636 tval = -1; 637 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "gone_device_time", &tval); 638 if (tval >= 0 && tval < 0xffff) { 639 ISP_FC_PC(isp, chan)->gone_device_time = tval; 640 } else { 641 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 642 } 643 } 644 645 static int 646 isp_pci_attach(device_t dev) 647 { 648 struct resource *regs, *irq; 649 int rtp, rgd, iqd, i, m1, m2, locksetup = 0; 650 int isp_nvports = 0; 651 uint32_t data, cmd, linesz, did; 652 struct isp_pcisoftc *pcs; 653 ispsoftc_t *isp; 654 size_t psize, xsize; 655 char fwname[32]; 656 657 pcs = device_get_softc(dev); 658 if (pcs == NULL) { 659 device_printf(dev, "cannot get softc\n"); 660 return (ENOMEM); 661 } 662 memset(pcs, 0, sizeof (*pcs)); 663 664 pcs->pci_dev = dev; 665 isp = &pcs->pci_isp; 666 isp->isp_dev = dev; 667 isp->isp_nchan = 1; 668 669 /* 670 * Get Generic Options 671 */ 672 isp_get_generic_options(dev, isp, &isp_nvports); 673 674 /* 675 * Check to see if options have us disabled 676 */ 677 if (isp->isp_osinfo.disabled) { 678 /* 679 * But return zero to preserve unit numbering 680 */ 681 return (0); 682 } 683 684 /* 685 * Get PCI options- which in this case are just mapping preferences. 686 */ 687 isp_get_pci_options(dev, &m1, &m2); 688 689 linesz = PCI_DFLT_LNSZ; 690 irq = regs = NULL; 691 rgd = rtp = iqd = 0; 692 693 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 694 if (cmd & m1) { 695 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 696 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 697 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 698 } 699 if (regs == NULL && (cmd & m2)) { 700 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 701 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 702 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 703 } 704 if (regs == NULL) { 705 device_printf(dev, "unable to map any ports\n"); 706 goto bad; 707 } 708 if (bootverbose) { 709 device_printf(dev, "using %s space register mapping\n", (rgd == IO_MAP_REG)? "I/O" : "Memory"); 710 } 711 isp->isp_bus_tag = rman_get_bustag(regs); 712 isp->isp_bus_handle = rman_get_bushandle(regs); 713 714 pcs->pci_dev = dev; 715 pcs->pci_reg = regs; 716 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 717 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 718 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 719 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 720 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 721 722 switch (pci_get_devid(dev)) { 723 case PCI_QLOGIC_ISP1020: 724 did = 0x1040; 725 isp->isp_mdvec = &mdvec; 726 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 727 break; 728 case PCI_QLOGIC_ISP1080: 729 did = 0x1080; 730 isp->isp_mdvec = &mdvec_1080; 731 isp->isp_type = ISP_HA_SCSI_1080; 732 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 733 break; 734 case PCI_QLOGIC_ISP1240: 735 did = 0x1080; 736 isp->isp_mdvec = &mdvec_1080; 737 isp->isp_type = ISP_HA_SCSI_1240; 738 isp->isp_nchan = 2; 739 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 740 break; 741 case PCI_QLOGIC_ISP1280: 742 did = 0x1080; 743 isp->isp_mdvec = &mdvec_1080; 744 isp->isp_type = ISP_HA_SCSI_1280; 745 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 746 break; 747 case PCI_QLOGIC_ISP10160: 748 did = 0x12160; 749 isp->isp_mdvec = &mdvec_12160; 750 isp->isp_type = ISP_HA_SCSI_10160; 751 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 752 break; 753 case PCI_QLOGIC_ISP12160: 754 did = 0x12160; 755 isp->isp_nchan = 2; 756 isp->isp_mdvec = &mdvec_12160; 757 isp->isp_type = ISP_HA_SCSI_12160; 758 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 759 break; 760 case PCI_QLOGIC_ISP2100: 761 did = 0x2100; 762 isp->isp_mdvec = &mdvec_2100; 763 isp->isp_type = ISP_HA_FC_2100; 764 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 765 if (pci_get_revid(dev) < 3) { 766 /* 767 * XXX: Need to get the actual revision 768 * XXX: number of the 2100 FB. At any rate, 769 * XXX: lower cache line size for early revision 770 * XXX; boards. 771 */ 772 linesz = 1; 773 } 774 break; 775 case PCI_QLOGIC_ISP2200: 776 did = 0x2200; 777 isp->isp_mdvec = &mdvec_2200; 778 isp->isp_type = ISP_HA_FC_2200; 779 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 780 break; 781 case PCI_QLOGIC_ISP2300: 782 did = 0x2300; 783 isp->isp_mdvec = &mdvec_2300; 784 isp->isp_type = ISP_HA_FC_2300; 785 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 786 break; 787 case PCI_QLOGIC_ISP2312: 788 case PCI_QLOGIC_ISP6312: 789 did = 0x2300; 790 isp->isp_mdvec = &mdvec_2300; 791 isp->isp_type = ISP_HA_FC_2312; 792 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 793 break; 794 case PCI_QLOGIC_ISP2322: 795 case PCI_QLOGIC_ISP6322: 796 did = 0x2322; 797 isp->isp_mdvec = &mdvec_2300; 798 isp->isp_type = ISP_HA_FC_2322; 799 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 800 break; 801 case PCI_QLOGIC_ISP2422: 802 case PCI_QLOGIC_ISP2432: 803 did = 0x2400; 804 isp->isp_nchan += isp_nvports; 805 isp->isp_mdvec = &mdvec_2400; 806 isp->isp_type = ISP_HA_FC_2400; 807 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 808 break; 809 case PCI_QLOGIC_ISP2532: 810 did = 0x2500; 811 isp->isp_nchan += isp_nvports; 812 isp->isp_mdvec = &mdvec_2500; 813 isp->isp_type = ISP_HA_FC_2500; 814 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 815 break; 816 case PCI_QLOGIC_ISP5432: 817 did = 0x2500; 818 isp->isp_mdvec = &mdvec_2500; 819 isp->isp_type = ISP_HA_FC_2500; 820 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 821 break; 822 default: 823 device_printf(dev, "unknown device type\n"); 824 goto bad; 825 break; 826 } 827 isp->isp_revision = pci_get_revid(dev); 828 829 if (IS_FC(isp)) { 830 psize = sizeof (fcparam); 831 xsize = sizeof (struct isp_fc); 832 } else { 833 psize = sizeof (sdparam); 834 xsize = sizeof (struct isp_spi); 835 } 836 psize *= isp->isp_nchan; 837 xsize *= isp->isp_nchan; 838 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 839 if (isp->isp_param == NULL) { 840 device_printf(dev, "cannot allocate parameter data\n"); 841 goto bad; 842 } 843 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 844 if (isp->isp_osinfo.pc.ptr == NULL) { 845 device_printf(dev, "cannot allocate parameter data\n"); 846 goto bad; 847 } 848 849 /* 850 * Now that we know who we are (roughly) get/set specific options 851 */ 852 for (i = 0; i < isp->isp_nchan; i++) { 853 isp_get_specific_options(dev, i, isp); 854 } 855 856 /* 857 * The 'it' suffix really only matters for SCSI cards in target mode. 858 */ 859 isp->isp_osinfo.fw = NULL; 860 if (IS_SCSI(isp) && (ISP_SPI_PC(isp, 0)->def_role & ISP_ROLE_TARGET)) { 861 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 862 isp->isp_osinfo.fw = firmware_get(fwname); 863 } else if (IS_24XX(isp) && (isp->isp_nchan > 1 || isp->isp_osinfo.forcemulti)) { 864 snprintf(fwname, sizeof (fwname), "isp_%04x_multi", did); 865 isp->isp_osinfo.fw = firmware_get(fwname); 866 } 867 if (isp->isp_osinfo.fw == NULL) { 868 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 869 isp->isp_osinfo.fw = firmware_get(fwname); 870 } 871 if (isp->isp_osinfo.fw != NULL) { 872 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 873 } 874 875 /* 876 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 877 * are set. 878 */ 879 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 880 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 881 882 if (IS_2300(isp)) { /* per QLogic errata */ 883 cmd &= ~PCIM_CMD_INVEN; 884 } 885 886 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 887 cmd &= ~PCIM_CMD_INTX_DISABLE; 888 } 889 890 if (IS_24XX(isp)) { 891 cmd &= ~PCIM_CMD_INTX_DISABLE; 892 } 893 894 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 895 896 /* 897 * Make sure the Cache Line Size register is set sensibly. 898 */ 899 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 900 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 901 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d from %d", linesz, data); 902 data = linesz; 903 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 904 } 905 906 /* 907 * Make sure the Latency Timer is sane. 908 */ 909 data = pci_read_config(dev, PCIR_LATTIMER, 1); 910 if (data < PCI_DFLT_LTNCY) { 911 data = PCI_DFLT_LTNCY; 912 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 913 pci_write_config(dev, PCIR_LATTIMER, data, 1); 914 } 915 916 /* 917 * Make sure we've disabled the ROM. 918 */ 919 data = pci_read_config(dev, PCIR_ROMADDR, 4); 920 data &= ~1; 921 pci_write_config(dev, PCIR_ROMADDR, data, 4); 922 923 /* 924 * Do MSI 925 * 926 * NB: MSI-X needs to be disabled for the 2432 (PCI-Express) 927 */ 928 if (IS_24XX(isp) || IS_2322(isp)) { 929 pcs->msicount = pci_msi_count(dev); 930 if (pcs->msicount > 1) { 931 pcs->msicount = 1; 932 } 933 if (pci_alloc_msi(dev, &pcs->msicount) == 0) { 934 iqd = 1; 935 } else { 936 iqd = 0; 937 } 938 } 939 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, RF_ACTIVE | RF_SHAREABLE); 940 if (irq == NULL) { 941 device_printf(dev, "could not allocate interrupt\n"); 942 goto bad; 943 } 944 945 /* Make sure the lock is set up. */ 946 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 947 locksetup++; 948 949 if (isp_setup_intr(dev, irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { 950 device_printf(dev, "could not setup interrupt\n"); 951 goto bad; 952 } 953 954 /* 955 * Last minute checks... 956 */ 957 if (IS_23XX(isp) || IS_24XX(isp)) { 958 isp->isp_port = pci_get_function(dev); 959 } 960 961 /* 962 * Make sure we're in reset state. 963 */ 964 ISP_LOCK(isp); 965 isp_reset(isp, 1); 966 if (isp->isp_state != ISP_RESETSTATE) { 967 ISP_UNLOCK(isp); 968 goto bad; 969 } 970 isp_init(isp); 971 if (isp->isp_state == ISP_INITSTATE) { 972 isp->isp_state = ISP_RUNSTATE; 973 } 974 ISP_UNLOCK(isp); 975 if (isp_attach(isp)) { 976 ISP_LOCK(isp); 977 isp_uninit(isp); 978 ISP_UNLOCK(isp); 979 goto bad; 980 } 981 return (0); 982 983 bad: 984 if (pcs->ih) { 985 (void) bus_teardown_intr(dev, irq, pcs->ih); 986 } 987 if (locksetup) { 988 mtx_destroy(&isp->isp_osinfo.lock); 989 } 990 if (irq) { 991 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 992 } 993 if (pcs->msicount) { 994 pci_release_msi(dev); 995 } 996 if (regs) { 997 (void) bus_release_resource(dev, rtp, rgd, regs); 998 } 999 if (pcs->pci_isp.isp_param) { 1000 free(pcs->pci_isp.isp_param, M_DEVBUF); 1001 pcs->pci_isp.isp_param = NULL; 1002 } 1003 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1004 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1005 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1006 } 1007 return (ENXIO); 1008 } 1009 1010 static int 1011 isp_pci_detach(device_t dev) 1012 { 1013 struct isp_pcisoftc *pcs; 1014 ispsoftc_t *isp; 1015 1016 pcs = device_get_softc(dev); 1017 if (pcs == NULL) { 1018 return (ENXIO); 1019 } 1020 isp = (ispsoftc_t *) pcs; 1021 ISP_DISABLE_INTS(isp); 1022 mtx_destroy(&isp->isp_osinfo.lock); 1023 return (0); 1024 } 1025 1026 #define IspVirt2Off(a, x) \ 1027 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1028 _BLK_REG_SHFT] + ((x) & 0xfff)) 1029 1030 #define BXR2(isp, off) \ 1031 bus_space_read_2(isp->isp_bus_tag, isp->isp_bus_handle, off) 1032 #define BXW2(isp, off, v) \ 1033 bus_space_write_2(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1034 #define BXR4(isp, off) \ 1035 bus_space_read_4(isp->isp_bus_tag, isp->isp_bus_handle, off) 1036 #define BXW4(isp, off, v) \ 1037 bus_space_write_4(isp->isp_bus_tag, isp->isp_bus_handle, off, v) 1038 1039 1040 static ISP_INLINE int 1041 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1042 { 1043 uint32_t val0, val1; 1044 int i = 0; 1045 1046 do { 1047 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1048 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1049 } while (val0 != val1 && ++i < 1000); 1050 if (val0 != val1) { 1051 return (1); 1052 } 1053 *rp = val0; 1054 return (0); 1055 } 1056 1057 static int 1058 isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbp) 1059 { 1060 uint16_t isr, sema; 1061 1062 if (IS_2100(isp)) { 1063 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1064 return (0); 1065 } 1066 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1067 return (0); 1068 } 1069 } else { 1070 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1071 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1072 } 1073 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1074 isr &= INT_PENDING_MASK(isp); 1075 sema &= BIU_SEMA_LOCK; 1076 if (isr == 0 && sema == 0) { 1077 return (0); 1078 } 1079 *isrp = isr; 1080 if ((*semap = sema) != 0) { 1081 if (IS_2100(isp)) { 1082 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1083 return (0); 1084 } 1085 } else { 1086 *mbp = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1087 } 1088 } 1089 return (1); 1090 } 1091 1092 static int 1093 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p) 1094 { 1095 uint32_t hccr; 1096 uint32_t r2hisr; 1097 1098 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1099 *isrp = 0; 1100 return (0); 1101 } 1102 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1103 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1104 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1105 *isrp = 0; 1106 return (0); 1107 } 1108 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1109 case ISPR2HST_ROM_MBX_OK: 1110 case ISPR2HST_ROM_MBX_FAIL: 1111 case ISPR2HST_MBX_OK: 1112 case ISPR2HST_MBX_FAIL: 1113 case ISPR2HST_ASYNC_EVENT: 1114 *isrp = r2hisr & 0xffff; 1115 *mbox0p = (r2hisr >> 16); 1116 *semap = 1; 1117 return (1); 1118 case ISPR2HST_RIO_16: 1119 *isrp = r2hisr & 0xffff; 1120 *mbox0p = ASYNC_RIO16_1; 1121 *semap = 1; 1122 return (1); 1123 case ISPR2HST_FPOST: 1124 *isrp = r2hisr & 0xffff; 1125 *mbox0p = ASYNC_CMD_CMPLT; 1126 *semap = 1; 1127 return (1); 1128 case ISPR2HST_FPOST_CTIO: 1129 *isrp = r2hisr & 0xffff; 1130 *mbox0p = ASYNC_CTIO_DONE; 1131 *semap = 1; 1132 return (1); 1133 case ISPR2HST_RSPQ_UPDATE: 1134 *isrp = r2hisr & 0xffff; 1135 *mbox0p = 0; 1136 *semap = 0; 1137 return (1); 1138 default: 1139 hccr = ISP_READ(isp, HCCR); 1140 if (hccr & HCCR_PAUSE) { 1141 ISP_WRITE(isp, HCCR, HCCR_RESET); 1142 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1143 ISP_WRITE(isp, BIU_ICR, 0); 1144 } else { 1145 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1146 } 1147 return (0); 1148 } 1149 } 1150 1151 static int 1152 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, uint16_t *semap, uint16_t *mbox0p) 1153 { 1154 uint32_t r2hisr; 1155 1156 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1157 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1158 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1159 *isrp = 0; 1160 return (0); 1161 } 1162 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1163 case ISP2400R2HST_ROM_MBX_OK: 1164 case ISP2400R2HST_ROM_MBX_FAIL: 1165 case ISP2400R2HST_MBX_OK: 1166 case ISP2400R2HST_MBX_FAIL: 1167 case ISP2400R2HST_ASYNC_EVENT: 1168 *isrp = r2hisr & 0xffff; 1169 *mbox0p = (r2hisr >> 16); 1170 *semap = 1; 1171 return (1); 1172 case ISP2400R2HST_RSPQ_UPDATE: 1173 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1174 case ISP2400R2HST_ATIO_RQST_UPDATE: 1175 *isrp = r2hisr & 0xffff; 1176 *mbox0p = 0; 1177 *semap = 0; 1178 return (1); 1179 default: 1180 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1181 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1182 return (0); 1183 } 1184 } 1185 1186 static uint32_t 1187 isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1188 { 1189 uint16_t rv; 1190 int oldconf = 0; 1191 1192 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1193 /* 1194 * We will assume that someone has paused the RISC processor. 1195 */ 1196 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1197 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1198 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1199 } 1200 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1201 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1202 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1203 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1204 } 1205 return (rv); 1206 } 1207 1208 static void 1209 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1210 { 1211 int oldconf = 0; 1212 1213 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1214 /* 1215 * We will assume that someone has paused the RISC processor. 1216 */ 1217 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1218 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1219 oldconf | BIU_PCI_CONF1_SXP); 1220 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1221 } 1222 BXW2(isp, IspVirt2Off(isp, regoff), val); 1223 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1224 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1225 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1226 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1227 } 1228 1229 } 1230 1231 static uint32_t 1232 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1233 { 1234 uint32_t rv, oc = 0; 1235 1236 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1237 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1238 uint32_t tc; 1239 /* 1240 * We will assume that someone has paused the RISC processor. 1241 */ 1242 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1243 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1244 if (regoff & SXP_BANK1_SELECT) 1245 tc |= BIU_PCI1080_CONF1_SXP1; 1246 else 1247 tc |= BIU_PCI1080_CONF1_SXP0; 1248 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1249 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1250 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1251 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1252 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1253 oc | BIU_PCI1080_CONF1_DMA); 1254 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1255 } 1256 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1257 if (oc) { 1258 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1259 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1260 } 1261 return (rv); 1262 } 1263 1264 static void 1265 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1266 { 1267 int oc = 0; 1268 1269 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1270 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1271 uint32_t tc; 1272 /* 1273 * We will assume that someone has paused the RISC processor. 1274 */ 1275 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1276 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1277 if (regoff & SXP_BANK1_SELECT) 1278 tc |= BIU_PCI1080_CONF1_SXP1; 1279 else 1280 tc |= BIU_PCI1080_CONF1_SXP0; 1281 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1282 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1283 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1284 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1285 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1286 oc | BIU_PCI1080_CONF1_DMA); 1287 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1288 } 1289 BXW2(isp, IspVirt2Off(isp, regoff), val); 1290 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1291 if (oc) { 1292 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1293 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1294 } 1295 } 1296 1297 static uint32_t 1298 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1299 { 1300 uint32_t rv; 1301 int block = regoff & _BLK_REG_MASK; 1302 1303 switch (block) { 1304 case BIU_BLOCK: 1305 break; 1306 case MBOX_BLOCK: 1307 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1308 case SXP_BLOCK: 1309 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1310 return (0xffffffff); 1311 case RISC_BLOCK: 1312 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1313 return (0xffffffff); 1314 case DMA_BLOCK: 1315 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1316 return (0xffffffff); 1317 default: 1318 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1319 return (0xffffffff); 1320 } 1321 1322 1323 switch (regoff) { 1324 case BIU2400_FLASH_ADDR: 1325 case BIU2400_FLASH_DATA: 1326 case BIU2400_ICR: 1327 case BIU2400_ISR: 1328 case BIU2400_CSR: 1329 case BIU2400_REQINP: 1330 case BIU2400_REQOUTP: 1331 case BIU2400_RSPINP: 1332 case BIU2400_RSPOUTP: 1333 case BIU2400_PRI_REQINP: 1334 case BIU2400_PRI_REQOUTP: 1335 case BIU2400_ATIO_RSPINP: 1336 case BIU2400_ATIO_RSPOUTP: 1337 case BIU2400_HCCR: 1338 case BIU2400_GPIOD: 1339 case BIU2400_GPIOE: 1340 case BIU2400_HSEMA: 1341 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1342 break; 1343 case BIU2400_R2HSTSLO: 1344 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1345 break; 1346 case BIU2400_R2HSTSHI: 1347 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1348 break; 1349 default: 1350 isp_prt(isp, ISP_LOGERR, 1351 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1352 rv = 0xffffffff; 1353 break; 1354 } 1355 return (rv); 1356 } 1357 1358 static void 1359 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1360 { 1361 int block = regoff & _BLK_REG_MASK; 1362 1363 switch (block) { 1364 case BIU_BLOCK: 1365 break; 1366 case MBOX_BLOCK: 1367 BXW2(isp, IspVirt2Off(isp, regoff), val); 1368 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1369 return; 1370 case SXP_BLOCK: 1371 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1372 return; 1373 case RISC_BLOCK: 1374 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1375 return; 1376 case DMA_BLOCK: 1377 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1378 return; 1379 default: 1380 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1381 regoff); 1382 break; 1383 } 1384 1385 switch (regoff) { 1386 case BIU2400_FLASH_ADDR: 1387 case BIU2400_FLASH_DATA: 1388 case BIU2400_ICR: 1389 case BIU2400_ISR: 1390 case BIU2400_CSR: 1391 case BIU2400_REQINP: 1392 case BIU2400_REQOUTP: 1393 case BIU2400_RSPINP: 1394 case BIU2400_RSPOUTP: 1395 case BIU2400_PRI_REQINP: 1396 case BIU2400_PRI_REQOUTP: 1397 case BIU2400_ATIO_RSPINP: 1398 case BIU2400_ATIO_RSPOUTP: 1399 case BIU2400_HCCR: 1400 case BIU2400_GPIOD: 1401 case BIU2400_GPIOE: 1402 case BIU2400_HSEMA: 1403 BXW4(isp, IspVirt2Off(isp, regoff), val); 1404 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1405 break; 1406 default: 1407 isp_prt(isp, ISP_LOGERR, 1408 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1409 break; 1410 } 1411 } 1412 1413 1414 struct imush { 1415 ispsoftc_t *isp; 1416 caddr_t vbase; 1417 int chan; 1418 int error; 1419 }; 1420 1421 static void imc(void *, bus_dma_segment_t *, int, int); 1422 static void imc1(void *, bus_dma_segment_t *, int, int); 1423 1424 static void 1425 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1426 { 1427 struct imush *imushp = (struct imush *) arg; 1428 1429 if (error) { 1430 imushp->error = error; 1431 return; 1432 } 1433 if (nseg != 1) { 1434 imushp->error = EINVAL; 1435 return; 1436 } 1437 imushp->isp->isp_rquest = imushp->vbase; 1438 imushp->isp->isp_rquest_dma = segs->ds_addr; 1439 segs->ds_addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1440 imushp->vbase += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(imushp->isp)); 1441 imushp->isp->isp_result_dma = segs->ds_addr; 1442 imushp->isp->isp_result = imushp->vbase; 1443 1444 #ifdef ISP_TARGET_MODE 1445 if (IS_24XX(imushp->isp)) { 1446 segs->ds_addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1447 imushp->vbase += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(imushp->isp)); 1448 imushp->isp->isp_atioq_dma = segs->ds_addr; 1449 imushp->isp->isp_atioq = imushp->vbase; 1450 } 1451 #endif 1452 } 1453 1454 static void 1455 imc1(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1456 { 1457 struct imush *imushp = (struct imush *) arg; 1458 if (error) { 1459 imushp->error = error; 1460 return; 1461 } 1462 if (nseg != 1) { 1463 imushp->error = EINVAL; 1464 return; 1465 } 1466 FCPARAM(imushp->isp, imushp->chan)->isp_scdma = segs->ds_addr; 1467 FCPARAM(imushp->isp, imushp->chan)->isp_scratch = imushp->vbase; 1468 } 1469 1470 static int 1471 isp_pci_mbxdma(ispsoftc_t *isp) 1472 { 1473 caddr_t base; 1474 uint32_t len; 1475 int i, error, ns, cmap = 0; 1476 bus_size_t slim; /* segment size */ 1477 bus_addr_t llim; /* low limit of unavailable dma */ 1478 bus_addr_t hlim; /* high limit of unavailable dma */ 1479 struct imush im; 1480 1481 /* 1482 * Already been here? If so, leave... 1483 */ 1484 if (isp->isp_rquest) { 1485 return (0); 1486 } 1487 ISP_UNLOCK(isp); 1488 1489 if (isp->isp_maxcmds == 0) { 1490 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1491 ISP_LOCK(isp); 1492 return (1); 1493 } 1494 1495 hlim = BUS_SPACE_MAXADDR; 1496 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1497 if (sizeof (bus_size_t) > 4) { 1498 slim = (bus_size_t) (1ULL << 32); 1499 } else { 1500 slim = (bus_size_t) (1UL << 31); 1501 } 1502 llim = BUS_SPACE_MAXADDR; 1503 } else { 1504 llim = BUS_SPACE_MAXADDR_32BIT; 1505 slim = (1UL << 24); 1506 } 1507 1508 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1509 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1510 if (isp->isp_osinfo.pcmd_pool == NULL) { 1511 isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds"); 1512 ISP_LOCK(isp); 1513 return (1); 1514 } 1515 1516 /* 1517 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1518 */ 1519 #ifdef ISP_TARGET_MODE 1520 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1521 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1522 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1523 ISP_LOCK(isp); 1524 return (1); 1525 } 1526 #endif 1527 1528 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &isp->isp_osinfo.dmat)) { 1529 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1530 ISP_LOCK(isp); 1531 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1532 return (1); 1533 } 1534 1535 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1536 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1537 if (isp->isp_xflist == NULL) { 1538 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1539 ISP_LOCK(isp); 1540 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1541 return (1); 1542 } 1543 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1544 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1545 } 1546 isp->isp_xffree = isp->isp_xflist; 1547 #ifdef ISP_TARGET_MODE 1548 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1549 isp->isp_tgtlist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1550 if (isp->isp_tgtlist == NULL) { 1551 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1552 free(isp->isp_xflist, M_DEVBUF); 1553 ISP_LOCK(isp); 1554 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1555 return (1); 1556 } 1557 for (len = 0; len < isp->isp_maxcmds - 1; len++) { 1558 isp->isp_tgtlist[len].cmd = &isp->isp_tgtlist[len+1]; 1559 } 1560 isp->isp_tgtfree = isp->isp_tgtlist; 1561 #endif 1562 1563 /* 1564 * Allocate and map the request and result queues (and ATIO queue 1565 * if we're a 2400 supporting target mode). 1566 */ 1567 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1568 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1569 #ifdef ISP_TARGET_MODE 1570 if (IS_24XX(isp)) { 1571 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1572 } 1573 #endif 1574 1575 ns = (len / PAGE_SIZE) + 1; 1576 1577 /* 1578 * Create a tag for the control spaces. We don't always need this 1579 * to be 32 bits, but we do this for simplicity and speed's sake. 1580 */ 1581 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, len, ns, slim, 0, &isp->isp_osinfo.cdmat)) { 1582 isp_prt(isp, ISP_LOGERR, "cannot create a dma tag for control spaces"); 1583 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1584 free(isp->isp_xflist, M_DEVBUF); 1585 #ifdef ISP_TARGET_MODE 1586 free(isp->isp_tgtlist, M_DEVBUF); 1587 #endif 1588 ISP_LOCK(isp); 1589 return (1); 1590 } 1591 1592 if (bus_dmamem_alloc(isp->isp_osinfo.cdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &isp->isp_osinfo.cdmap) != 0) { 1593 isp_prt(isp, ISP_LOGERR, "cannot allocate %d bytes of CCB memory", len); 1594 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1595 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1596 free(isp->isp_xflist, M_DEVBUF); 1597 #ifdef ISP_TARGET_MODE 1598 free(isp->isp_tgtlist, M_DEVBUF); 1599 #endif 1600 ISP_LOCK(isp); 1601 return (1); 1602 } 1603 1604 im.isp = isp; 1605 im.chan = 0; 1606 im.vbase = base; 1607 im.error = 0; 1608 1609 bus_dmamap_load(isp->isp_osinfo.cdmat, isp->isp_osinfo.cdmap, base, len, imc, &im, 0); 1610 if (im.error) { 1611 isp_prt(isp, ISP_LOGERR, "error %d loading dma map for control areas", im.error); 1612 goto bad; 1613 } 1614 1615 if (IS_FC(isp)) { 1616 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1617 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1618 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ISP_FC_SCRLEN, 1, slim, 0, &fc->tdmat)) { 1619 goto bad; 1620 } 1621 if (bus_dmamem_alloc(fc->tdmat, (void **)&base, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &fc->tdmap) != 0) { 1622 bus_dma_tag_destroy(fc->tdmat); 1623 goto bad; 1624 } 1625 im.isp = isp; 1626 im.chan = cmap; 1627 im.vbase = base; 1628 im.error = 0; 1629 bus_dmamap_load(fc->tdmat, fc->tdmap, base, ISP_FC_SCRLEN, imc1, &im, 0); 1630 if (im.error) { 1631 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1632 bus_dma_tag_destroy(fc->tdmat); 1633 goto bad; 1634 } 1635 } 1636 } 1637 1638 for (i = 0; i < isp->isp_maxcmds; i++) { 1639 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1640 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1641 if (error) { 1642 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1643 while (--i >= 0) { 1644 bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap); 1645 } 1646 goto bad; 1647 } 1648 callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); 1649 if (i == isp->isp_maxcmds-1) { 1650 pcmd->next = NULL; 1651 } else { 1652 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1653 } 1654 } 1655 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1656 ISP_LOCK(isp); 1657 return (0); 1658 1659 bad: 1660 while (--cmap >= 0) { 1661 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1662 bus_dmamem_free(fc->tdmat, base, fc->tdmap); 1663 bus_dma_tag_destroy(fc->tdmat); 1664 } 1665 bus_dmamem_free(isp->isp_osinfo.cdmat, base, isp->isp_osinfo.cdmap); 1666 bus_dma_tag_destroy(isp->isp_osinfo.cdmat); 1667 free(isp->isp_xflist, M_DEVBUF); 1668 #ifdef ISP_TARGET_MODE 1669 free(isp->isp_tgtlist, M_DEVBUF); 1670 #endif 1671 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1672 isp->isp_rquest = NULL; 1673 ISP_LOCK(isp); 1674 return (1); 1675 } 1676 1677 typedef struct { 1678 ispsoftc_t *isp; 1679 void *cmd_token; 1680 void *rq; /* original request */ 1681 int error; 1682 bus_size_t mapsize; 1683 } mush_t; 1684 1685 #define MUSHERR_NOQENTRIES -2 1686 1687 #ifdef ISP_TARGET_MODE 1688 static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1689 static void tdma2(void *, bus_dma_segment_t *, int, int); 1690 1691 static void 1692 tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1693 { 1694 mush_t *mp; 1695 mp = (mush_t *)arg; 1696 mp->mapsize = mapsize; 1697 tdma2(arg, dm_segs, nseg, error); 1698 } 1699 1700 static void 1701 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1702 { 1703 mush_t *mp; 1704 ispsoftc_t *isp; 1705 struct ccb_scsiio *csio; 1706 isp_ddir_t ddir; 1707 ispreq_t *rq; 1708 1709 mp = (mush_t *) arg; 1710 if (error) { 1711 mp->error = error; 1712 return; 1713 } 1714 csio = mp->cmd_token; 1715 isp = mp->isp; 1716 rq = mp->rq; 1717 if (nseg) { 1718 if (sizeof (bus_addr_t) > 4) { 1719 if (nseg >= ISP_NSEG64_MAX) { 1720 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1721 mp->error = EFAULT; 1722 return; 1723 } 1724 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1725 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1726 } 1727 } else { 1728 if (nseg >= ISP_NSEG_MAX) { 1729 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1730 mp->error = EFAULT; 1731 return; 1732 } 1733 } 1734 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1735 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1736 ddir = ISP_TO_DEVICE; 1737 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1738 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1739 ddir = ISP_FROM_DEVICE; 1740 } else { 1741 dm_segs = NULL; 1742 nseg = 0; 1743 ddir = ISP_NOXFR; 1744 } 1745 } else { 1746 dm_segs = NULL; 1747 nseg = 0; 1748 ddir = ISP_NOXFR; 1749 } 1750 1751 if (isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len) != CMD_QUEUED) { 1752 mp->error = MUSHERR_NOQENTRIES; 1753 } 1754 } 1755 #endif 1756 1757 static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1758 static void dma2(void *, bus_dma_segment_t *, int, int); 1759 1760 static void 1761 dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1762 { 1763 mush_t *mp; 1764 mp = (mush_t *)arg; 1765 mp->mapsize = mapsize; 1766 dma2(arg, dm_segs, nseg, error); 1767 } 1768 1769 static void 1770 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1771 { 1772 mush_t *mp; 1773 ispsoftc_t *isp; 1774 struct ccb_scsiio *csio; 1775 isp_ddir_t ddir; 1776 ispreq_t *rq; 1777 1778 mp = (mush_t *) arg; 1779 if (error) { 1780 mp->error = error; 1781 return; 1782 } 1783 csio = mp->cmd_token; 1784 isp = mp->isp; 1785 rq = mp->rq; 1786 if (nseg) { 1787 if (sizeof (bus_addr_t) > 4) { 1788 if (nseg >= ISP_NSEG64_MAX) { 1789 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1790 mp->error = EFAULT; 1791 return; 1792 } 1793 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1794 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1795 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1796 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1797 } 1798 } else { 1799 if (nseg >= ISP_NSEG_MAX) { 1800 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1801 mp->error = EFAULT; 1802 return; 1803 } 1804 } 1805 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1806 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1807 ddir = ISP_FROM_DEVICE; 1808 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1809 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1810 ddir = ISP_TO_DEVICE; 1811 } else { 1812 ddir = ISP_NOXFR; 1813 } 1814 } else { 1815 dm_segs = NULL; 1816 nseg = 0; 1817 ddir = ISP_NOXFR; 1818 } 1819 1820 if (isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir) != CMD_QUEUED) { 1821 mp->error = MUSHERR_NOQENTRIES; 1822 } 1823 } 1824 1825 static int 1826 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1827 { 1828 mush_t mush, *mp; 1829 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1830 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); 1831 1832 mp = &mush; 1833 mp->isp = isp; 1834 mp->cmd_token = csio; 1835 mp->rq = ff; 1836 mp->error = 0; 1837 mp->mapsize = 0; 1838 1839 #ifdef ISP_TARGET_MODE 1840 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1841 eptr = tdma2; 1842 eptr2 = tdma2_2; 1843 } else 1844 #endif 1845 { 1846 eptr = dma2; 1847 eptr2 = dma2_2; 1848 } 1849 1850 1851 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || (csio->dxfer_len == 0)) { 1852 (*eptr)(mp, NULL, 0, 0); 1853 } else if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1854 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1855 int error; 1856 error = bus_dmamap_load(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1857 #if 0 1858 xpt_print(csio->ccb_h.path, "%s: bus_dmamap_load " "ptr %p len %d returned %d\n", __func__, csio->data_ptr, csio->dxfer_len, error); 1859 #endif 1860 1861 if (error == EINPROGRESS) { 1862 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 1863 mp->error = EINVAL; 1864 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 1865 } else if (error && mp->error == 0) { 1866 #ifdef DIAGNOSTIC 1867 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1868 #endif 1869 mp->error = error; 1870 } 1871 } else { 1872 /* Pointer to physical buffer */ 1873 struct bus_dma_segment seg; 1874 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 1875 seg.ds_len = csio->dxfer_len; 1876 (*eptr)(mp, &seg, 1, 0); 1877 } 1878 } else { 1879 struct bus_dma_segment *segs; 1880 1881 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1882 isp_prt(isp, ISP_LOGERR, "Physical segment pointers unsupported"); 1883 mp->error = EINVAL; 1884 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1885 struct uio sguio; 1886 int error; 1887 1888 /* 1889 * We're taking advantage of the fact that 1890 * the pointer/length sizes and layout of the iovec 1891 * structure are the same as the bus_dma_segment 1892 * structure. This might be a little dangerous, 1893 * but only if they change the structures, which 1894 * seems unlikely. 1895 */ 1896 KASSERT((sizeof (sguio.uio_iov) == sizeof (csio->data_ptr) && 1897 sizeof (sguio.uio_iovcnt) >= sizeof (csio->sglist_cnt) && 1898 sizeof (sguio.uio_resid) >= sizeof (csio->dxfer_len)), ("Ken's assumption failed")); 1899 sguio.uio_iov = (struct iovec *)csio->data_ptr; 1900 sguio.uio_iovcnt = csio->sglist_cnt; 1901 sguio.uio_resid = csio->dxfer_len; 1902 sguio.uio_segflg = UIO_SYSSPACE; 1903 1904 error = bus_dmamap_load_uio(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, &sguio, eptr2, mp, 0); 1905 1906 if (error != 0 && mp->error == 0) { 1907 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1908 mp->error = error; 1909 } 1910 } else { 1911 /* Just use the segments provided */ 1912 segs = (struct bus_dma_segment *) csio->data_ptr; 1913 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1914 } 1915 } 1916 if (mp->error) { 1917 int retval = CMD_COMPLETE; 1918 if (mp->error == MUSHERR_NOQENTRIES) { 1919 retval = CMD_EAGAIN; 1920 } else if (mp->error == EFBIG) { 1921 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1922 } else if (mp->error == EINVAL) { 1923 XS_SETERR(csio, CAM_REQ_INVALID); 1924 } else { 1925 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1926 } 1927 return (retval); 1928 } 1929 return (CMD_QUEUED); 1930 } 1931 1932 static void 1933 isp_pci_reset0(ispsoftc_t *isp) 1934 { 1935 ISP_DISABLE_INTS(isp); 1936 } 1937 1938 static void 1939 isp_pci_reset1(ispsoftc_t *isp) 1940 { 1941 if (!IS_24XX(isp)) { 1942 /* Make sure the BIOS is disabled */ 1943 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 1944 } 1945 /* and enable interrupts */ 1946 ISP_ENABLE_INTS(isp); 1947 } 1948 1949 static void 1950 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 1951 { 1952 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1953 if (msg) 1954 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 1955 else 1956 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 1957 if (IS_SCSI(isp)) 1958 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 1959 else 1960 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 1961 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 1962 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 1963 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 1964 1965 1966 if (IS_SCSI(isp)) { 1967 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 1968 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 1969 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 1970 ISP_READ(isp, CDMA_FIFO_STS)); 1971 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 1972 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 1973 ISP_READ(isp, DDMA_FIFO_STS)); 1974 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 1975 ISP_READ(isp, SXP_INTERRUPT), 1976 ISP_READ(isp, SXP_GROSS_ERR), 1977 ISP_READ(isp, SXP_PINS_CTRL)); 1978 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 1979 } 1980 printf(" mbox regs: %x %x %x %x %x\n", 1981 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 1982 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 1983 ISP_READ(isp, OUTMAILBOX4)); 1984 printf(" PCI Status Command/Status=%x\n", 1985 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 1986 } 1987