1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2020 Alexander Motin <mav@FreeBSD.org> 5 * Copyright (c) 1997-2008 by Matthew Jacob 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 /* 30 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 31 * FreeBSD Version. 32 */ 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/module.h> 40 #include <sys/linker.h> 41 #include <sys/firmware.h> 42 #include <sys/bus.h> 43 #include <sys/stdint.h> 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 #include <machine/bus.h> 47 #include <machine/resource.h> 48 #include <sys/rman.h> 49 #include <sys/malloc.h> 50 #include <sys/uio.h> 51 #include <dev/isp/isp_freebsd.h> 52 53 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 54 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 55 static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int); 56 static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t); 57 static void isp_pci_run_isr_2400(ispsoftc_t *); 58 static int isp_pci_mbxdma(ispsoftc_t *); 59 static void isp_pci_mbxdmafree(ispsoftc_t *); 60 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 61 static int isp_pci_irqsetup(ispsoftc_t *); 62 63 static struct ispmdvec mdvec_2400 = { 64 isp_pci_run_isr_2400, 65 isp_pci_rd_reg_2400, 66 isp_pci_wr_reg_2400, 67 isp_pci_mbxdma, 68 isp_pci_dmasetup, 69 isp_common_dmateardown, 70 isp_pci_irqsetup, 71 NULL 72 }; 73 74 static struct ispmdvec mdvec_2500 = { 75 isp_pci_run_isr_2400, 76 isp_pci_rd_reg_2400, 77 isp_pci_wr_reg_2400, 78 isp_pci_mbxdma, 79 isp_pci_dmasetup, 80 isp_common_dmateardown, 81 isp_pci_irqsetup, 82 NULL 83 }; 84 85 static struct ispmdvec mdvec_2600 = { 86 isp_pci_run_isr_2400, 87 isp_pci_rd_reg_2600, 88 isp_pci_wr_reg_2600, 89 isp_pci_mbxdma, 90 isp_pci_dmasetup, 91 isp_common_dmateardown, 92 isp_pci_irqsetup, 93 NULL 94 }; 95 96 static struct ispmdvec mdvec_2700 = { 97 isp_pci_run_isr_2400, 98 isp_pci_rd_reg_2600, 99 isp_pci_wr_reg_2600, 100 isp_pci_mbxdma, 101 isp_pci_dmasetup, 102 isp_common_dmateardown, 103 isp_pci_irqsetup, 104 NULL 105 }; 106 107 #ifndef PCIM_CMD_INVEN 108 #define PCIM_CMD_INVEN 0x10 109 #endif 110 #ifndef PCIM_CMD_BUSMASTEREN 111 #define PCIM_CMD_BUSMASTEREN 0x0004 112 #endif 113 #ifndef PCIM_CMD_PERRESPEN 114 #define PCIM_CMD_PERRESPEN 0x0040 115 #endif 116 #ifndef PCIM_CMD_SEREN 117 #define PCIM_CMD_SEREN 0x0100 118 #endif 119 #ifndef PCIM_CMD_INTX_DISABLE 120 #define PCIM_CMD_INTX_DISABLE 0x0400 121 #endif 122 123 #ifndef PCIR_COMMAND 124 #define PCIR_COMMAND 0x04 125 #endif 126 127 #ifndef PCIR_CACHELNSZ 128 #define PCIR_CACHELNSZ 0x0c 129 #endif 130 131 #ifndef PCIR_LATTIMER 132 #define PCIR_LATTIMER 0x0d 133 #endif 134 135 #ifndef PCIR_ROMADDR 136 #define PCIR_ROMADDR 0x30 137 #endif 138 139 #define PCI_VENDOR_QLOGIC 0x1077 140 141 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 142 #define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 143 #define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 144 #define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 145 #define PCI_PRODUCT_QLOGIC_ISP2031 0x2031 146 #define PCI_PRODUCT_QLOGIC_ISP8031 0x8031 147 #define PCI_PRODUCT_QLOGIC_ISP2684 0x2171 148 #define PCI_PRODUCT_QLOGIC_ISP2692 0x2b61 149 #define PCI_PRODUCT_QLOGIC_ISP2714 0x2071 150 #define PCI_PRODUCT_QLOGIC_ISP2722 0x2261 151 152 #define PCI_QLOGIC_ISP2422 \ 153 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 154 #define PCI_QLOGIC_ISP2432 \ 155 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 156 #define PCI_QLOGIC_ISP2532 \ 157 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 158 #define PCI_QLOGIC_ISP5432 \ 159 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 160 #define PCI_QLOGIC_ISP2031 \ 161 ((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC) 162 #define PCI_QLOGIC_ISP8031 \ 163 ((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC) 164 #define PCI_QLOGIC_ISP2684 \ 165 ((PCI_PRODUCT_QLOGIC_ISP2684 << 16) | PCI_VENDOR_QLOGIC) 166 #define PCI_QLOGIC_ISP2692 \ 167 ((PCI_PRODUCT_QLOGIC_ISP2692 << 16) | PCI_VENDOR_QLOGIC) 168 #define PCI_QLOGIC_ISP2714 \ 169 ((PCI_PRODUCT_QLOGIC_ISP2714 << 16) | PCI_VENDOR_QLOGIC) 170 #define PCI_QLOGIC_ISP2722 \ 171 ((PCI_PRODUCT_QLOGIC_ISP2722 << 16) | PCI_VENDOR_QLOGIC) 172 173 #define PCI_DFLT_LTNCY 0x40 174 #define PCI_DFLT_LNSZ 0x10 175 176 static int isp_pci_probe (device_t); 177 static int isp_pci_attach (device_t); 178 static int isp_pci_detach (device_t); 179 180 181 #define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 182 struct isp_pcisoftc { 183 ispsoftc_t pci_isp; 184 device_t pci_dev; 185 struct resource * regs; 186 struct resource * regs1; 187 struct resource * regs2; 188 struct { 189 int iqd; 190 struct resource * irq; 191 void * ih; 192 } irq[ISP_MAX_IRQS]; 193 int rtp; 194 int rgd; 195 int rtp1; 196 int rgd1; 197 int rtp2; 198 int rgd2; 199 bus_dma_tag_t dmat; 200 int msicount; 201 }; 202 203 204 static device_method_t isp_pci_methods[] = { 205 /* Device interface */ 206 DEVMETHOD(device_probe, isp_pci_probe), 207 DEVMETHOD(device_attach, isp_pci_attach), 208 DEVMETHOD(device_detach, isp_pci_detach), 209 { 0, 0 } 210 }; 211 212 static driver_t isp_pci_driver = { 213 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 214 }; 215 static devclass_t isp_devclass; 216 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 217 MODULE_DEPEND(isp, cam, 1, 1, 1); 218 MODULE_DEPEND(isp, firmware, 1, 1, 1); 219 static int isp_nvports = 0; 220 221 static int 222 isp_pci_probe(device_t dev) 223 { 224 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 225 case PCI_QLOGIC_ISP2422: 226 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 227 break; 228 case PCI_QLOGIC_ISP2432: 229 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 230 break; 231 case PCI_QLOGIC_ISP2532: 232 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 233 break; 234 case PCI_QLOGIC_ISP5432: 235 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 236 break; 237 case PCI_QLOGIC_ISP2031: 238 device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter"); 239 break; 240 case PCI_QLOGIC_ISP8031: 241 device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter"); 242 break; 243 case PCI_QLOGIC_ISP2684: 244 device_set_desc(dev, "Qlogic ISP 2684 PCI FC Adapter"); 245 break; 246 case PCI_QLOGIC_ISP2692: 247 device_set_desc(dev, "Qlogic ISP 2692 PCI FC Adapter"); 248 break; 249 case PCI_QLOGIC_ISP2714: 250 device_set_desc(dev, "Qlogic ISP 2714 PCI FC Adapter"); 251 break; 252 case PCI_QLOGIC_ISP2722: 253 device_set_desc(dev, "Qlogic ISP 2722 PCI FC Adapter"); 254 break; 255 default: 256 return (ENXIO); 257 } 258 if (isp_announced == 0 && bootverbose) { 259 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 260 "Core Version %d.%d\n", 261 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 262 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 263 isp_announced++; 264 } 265 /* 266 * XXXX: Here is where we might load the f/w module 267 * XXXX: (or increase a reference count to it). 268 */ 269 return (BUS_PROBE_DEFAULT); 270 } 271 272 static void 273 isp_get_generic_options(device_t dev, ispsoftc_t *isp) 274 { 275 int tval; 276 277 tval = 0; 278 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 279 isp->isp_confopts |= ISP_CFG_NORELOAD; 280 } 281 tval = 0; 282 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 283 isp->isp_confopts |= ISP_CFG_NONVRAM; 284 } 285 tval = 0; 286 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 287 if (tval) { 288 isp->isp_dblev = tval; 289 } else { 290 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 291 } 292 if (bootverbose) { 293 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 294 } 295 tval = -1; 296 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 297 if (tval > 0 && tval <= 254) { 298 isp_nvports = tval; 299 } 300 tval = 7; 301 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 302 isp_quickboot_time = tval; 303 } 304 305 static void 306 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 307 { 308 const char *sptr; 309 int tval = 0; 310 char prefix[12], name[16]; 311 312 if (chan == 0) 313 prefix[0] = 0; 314 else 315 snprintf(prefix, sizeof(prefix), "chan%d.", chan); 316 snprintf(name, sizeof(name), "%siid", prefix); 317 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 318 name, &tval)) { 319 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 320 } else { 321 ISP_FC_PC(isp, chan)->default_id = tval - chan; 322 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 323 } 324 325 tval = -1; 326 snprintf(name, sizeof(name), "%srole", prefix); 327 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 328 name, &tval) == 0) { 329 switch (tval) { 330 case ISP_ROLE_NONE: 331 case ISP_ROLE_INITIATOR: 332 case ISP_ROLE_TARGET: 333 case ISP_ROLE_BOTH: 334 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); 335 break; 336 default: 337 tval = -1; 338 break; 339 } 340 } 341 if (tval == -1) { 342 tval = ISP_DEFAULT_ROLES; 343 } 344 ISP_FC_PC(isp, chan)->def_role = tval; 345 346 tval = 0; 347 snprintf(name, sizeof(name), "%sfullduplex", prefix); 348 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 349 name, &tval) == 0 && tval != 0) { 350 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 351 } 352 sptr = NULL; 353 snprintf(name, sizeof(name), "%stopology", prefix); 354 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 355 name, (const char **) &sptr) == 0 && sptr != NULL) { 356 if (strcmp(sptr, "lport") == 0) { 357 isp->isp_confopts |= ISP_CFG_LPORT; 358 } else if (strcmp(sptr, "nport") == 0) { 359 isp->isp_confopts |= ISP_CFG_NPORT; 360 } else if (strcmp(sptr, "lport-only") == 0) { 361 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 362 } else if (strcmp(sptr, "nport-only") == 0) { 363 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 364 } 365 } 366 367 #ifdef ISP_FCTAPE_OFF 368 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 369 #else 370 isp->isp_confopts |= ISP_CFG_FCTAPE; 371 #endif 372 373 tval = 0; 374 snprintf(name, sizeof(name), "%snofctape", prefix); 375 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 376 name, &tval); 377 if (tval) { 378 isp->isp_confopts &= ~ISP_CFG_FCTAPE; 379 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 380 } 381 382 tval = 0; 383 snprintf(name, sizeof(name), "%sfctape", prefix); 384 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 385 name, &tval); 386 if (tval) { 387 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; 388 isp->isp_confopts |= ISP_CFG_FCTAPE; 389 } 390 391 392 /* 393 * Because the resource_*_value functions can neither return 394 * 64 bit integer values, nor can they be directly coerced 395 * to interpret the right hand side of the assignment as 396 * you want them to interpret it, we have to force WWN 397 * hint replacement to specify WWN strings with a leading 398 * 'w' (e..g w50000000aaaa0001). Sigh. 399 */ 400 sptr = NULL; 401 snprintf(name, sizeof(name), "%sportwwn", prefix); 402 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 403 name, (const char **) &sptr); 404 if (tval == 0 && sptr != NULL && *sptr++ == 'w') { 405 char *eptr = NULL; 406 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 407 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 408 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 409 ISP_FC_PC(isp, chan)->def_wwpn = 0; 410 } 411 } 412 413 sptr = NULL; 414 snprintf(name, sizeof(name), "%snodewwn", prefix); 415 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 416 name, (const char **) &sptr); 417 if (tval == 0 && sptr != NULL && *sptr++ == 'w') { 418 char *eptr = NULL; 419 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 420 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 421 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 422 ISP_FC_PC(isp, chan)->def_wwnn = 0; 423 } 424 } 425 426 tval = -1; 427 snprintf(name, sizeof(name), "%sloop_down_limit", prefix); 428 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 429 name, &tval); 430 if (tval >= 0 && tval < 0xffff) { 431 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 432 } else { 433 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 434 } 435 436 tval = -1; 437 snprintf(name, sizeof(name), "%sgone_device_time", prefix); 438 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 439 name, &tval); 440 if (tval >= 0 && tval < 0xffff) { 441 ISP_FC_PC(isp, chan)->gone_device_time = tval; 442 } else { 443 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 444 } 445 } 446 447 static int 448 isp_pci_attach(device_t dev) 449 { 450 struct isp_pcisoftc *pcs = device_get_softc(dev); 451 ispsoftc_t *isp = &pcs->pci_isp; 452 int i; 453 uint32_t data, cmd, linesz, did; 454 size_t psize, xsize; 455 char fwname[32]; 456 457 pcs->pci_dev = dev; 458 isp->isp_dev = dev; 459 isp->isp_nchan = 1; 460 mtx_init(&isp->isp_lock, "isp", NULL, MTX_DEF); 461 462 /* 463 * Get Generic Options 464 */ 465 isp_nvports = 0; 466 isp_get_generic_options(dev, isp); 467 468 linesz = PCI_DFLT_LNSZ; 469 pcs->regs = pcs->regs2 = NULL; 470 pcs->rgd = pcs->rtp = 0; 471 472 pcs->pci_dev = dev; 473 isp->isp_nchan += isp_nvports; 474 switch (pci_get_devid(dev)) { 475 case PCI_QLOGIC_ISP2422: 476 case PCI_QLOGIC_ISP2432: 477 did = 0x2400; 478 isp->isp_mdvec = &mdvec_2400; 479 isp->isp_type = ISP_HA_FC_2400; 480 break; 481 case PCI_QLOGIC_ISP2532: 482 did = 0x2500; 483 isp->isp_mdvec = &mdvec_2500; 484 isp->isp_type = ISP_HA_FC_2500; 485 break; 486 case PCI_QLOGIC_ISP5432: 487 did = 0x2500; 488 isp->isp_mdvec = &mdvec_2500; 489 isp->isp_type = ISP_HA_FC_2500; 490 break; 491 case PCI_QLOGIC_ISP2031: 492 case PCI_QLOGIC_ISP8031: 493 did = 0x2600; 494 isp->isp_mdvec = &mdvec_2600; 495 isp->isp_type = ISP_HA_FC_2600; 496 break; 497 case PCI_QLOGIC_ISP2684: 498 case PCI_QLOGIC_ISP2692: 499 case PCI_QLOGIC_ISP2714: 500 case PCI_QLOGIC_ISP2722: 501 did = 0x2700; 502 isp->isp_mdvec = &mdvec_2700; 503 isp->isp_type = ISP_HA_FC_2700; 504 break; 505 default: 506 device_printf(dev, "unknown device type\n"); 507 goto bad; 508 break; 509 } 510 isp->isp_revision = pci_get_revid(dev); 511 512 if (IS_26XX(isp)) { 513 pcs->rtp = SYS_RES_MEMORY; 514 pcs->rgd = PCIR_BAR(0); 515 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 516 RF_ACTIVE); 517 pcs->rtp1 = SYS_RES_MEMORY; 518 pcs->rgd1 = PCIR_BAR(2); 519 pcs->regs1 = bus_alloc_resource_any(dev, pcs->rtp1, &pcs->rgd1, 520 RF_ACTIVE); 521 pcs->rtp2 = SYS_RES_MEMORY; 522 pcs->rgd2 = PCIR_BAR(4); 523 pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2, 524 RF_ACTIVE); 525 } else { 526 pcs->rtp = SYS_RES_MEMORY; 527 pcs->rgd = PCIR_BAR(1); 528 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 529 RF_ACTIVE); 530 if (pcs->regs == NULL) { 531 pcs->rtp = SYS_RES_IOPORT; 532 pcs->rgd = PCIR_BAR(0); 533 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, 534 &pcs->rgd, RF_ACTIVE); 535 } 536 } 537 if (pcs->regs == NULL) { 538 device_printf(dev, "Unable to map any ports\n"); 539 goto bad; 540 } 541 if (bootverbose) { 542 device_printf(dev, "Using %s space register mapping\n", 543 (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory"); 544 } 545 isp->isp_regs = pcs->regs; 546 isp->isp_regs2 = pcs->regs2; 547 548 psize = sizeof(fcparam) * isp->isp_nchan; 549 xsize = sizeof(struct isp_fc) * isp->isp_nchan; 550 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 551 if (isp->isp_param == NULL) { 552 device_printf(dev, "cannot allocate parameter data\n"); 553 goto bad; 554 } 555 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 556 if (isp->isp_osinfo.pc.ptr == NULL) { 557 device_printf(dev, "cannot allocate parameter data\n"); 558 goto bad; 559 } 560 561 /* 562 * Now that we know who we are (roughly) get/set specific options 563 */ 564 for (i = 0; i < isp->isp_nchan; i++) { 565 isp_get_specific_options(dev, i, isp); 566 } 567 568 isp->isp_osinfo.fw = NULL; 569 if (isp->isp_osinfo.fw == NULL) { 570 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 571 isp->isp_osinfo.fw = firmware_get(fwname); 572 } 573 if (isp->isp_osinfo.fw != NULL) { 574 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); 575 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 576 } 577 578 /* 579 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. 580 */ 581 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 582 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 583 cmd &= ~PCIM_CMD_INTX_DISABLE; 584 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 585 586 /* 587 * Make sure the Cache Line Size register is set sensibly. 588 */ 589 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 590 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 591 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); 592 data = linesz; 593 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 594 } 595 596 /* 597 * Make sure the Latency Timer is sane. 598 */ 599 data = pci_read_config(dev, PCIR_LATTIMER, 1); 600 if (data < PCI_DFLT_LTNCY) { 601 data = PCI_DFLT_LTNCY; 602 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); 603 pci_write_config(dev, PCIR_LATTIMER, data, 1); 604 } 605 606 /* 607 * Make sure we've disabled the ROM. 608 */ 609 data = pci_read_config(dev, PCIR_ROMADDR, 4); 610 data &= ~1; 611 pci_write_config(dev, PCIR_ROMADDR, data, 4); 612 613 /* 614 * Last minute checks... 615 */ 616 isp->isp_port = pci_get_function(dev); 617 618 /* 619 * Make sure we're in reset state. 620 */ 621 ISP_LOCK(isp); 622 if (isp_reinit(isp, 1) != 0) { 623 ISP_UNLOCK(isp); 624 goto bad; 625 } 626 ISP_UNLOCK(isp); 627 if (isp_attach(isp)) { 628 ISP_LOCK(isp); 629 isp_shutdown(isp); 630 ISP_UNLOCK(isp); 631 goto bad; 632 } 633 return (0); 634 635 bad: 636 if (isp->isp_osinfo.fw == NULL && !IS_26XX(isp)) { 637 /* 638 * Failure to attach at boot time might have been caused 639 * by a missing ispfw(4). Except for for 16Gb adapters, 640 * there's no loadable firmware for them. 641 */ 642 isp_prt(isp, ISP_LOGWARN, "See the ispfw(4) man page on " 643 "how to load known good firmware at boot time"); 644 } 645 for (i = 0; i < isp->isp_nirq; i++) { 646 (void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih); 647 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd, 648 pcs->irq[0].irq); 649 } 650 if (pcs->msicount) { 651 pci_release_msi(dev); 652 } 653 if (pcs->regs) 654 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 655 if (pcs->regs1) 656 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 657 if (pcs->regs2) 658 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 659 if (pcs->pci_isp.isp_param) { 660 free(pcs->pci_isp.isp_param, M_DEVBUF); 661 pcs->pci_isp.isp_param = NULL; 662 } 663 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 664 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 665 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 666 } 667 mtx_destroy(&isp->isp_lock); 668 return (ENXIO); 669 } 670 671 static int 672 isp_pci_detach(device_t dev) 673 { 674 struct isp_pcisoftc *pcs = device_get_softc(dev); 675 ispsoftc_t *isp = &pcs->pci_isp; 676 int i, status; 677 678 status = isp_detach(isp); 679 if (status) 680 return (status); 681 ISP_LOCK(isp); 682 isp_shutdown(isp); 683 ISP_UNLOCK(isp); 684 for (i = 0; i < isp->isp_nirq; i++) { 685 (void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih); 686 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd, 687 pcs->irq[i].irq); 688 } 689 if (pcs->msicount) 690 pci_release_msi(dev); 691 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 692 if (pcs->regs1) 693 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 694 if (pcs->regs2) 695 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 696 isp_pci_mbxdmafree(isp); 697 if (pcs->pci_isp.isp_param) { 698 free(pcs->pci_isp.isp_param, M_DEVBUF); 699 pcs->pci_isp.isp_param = NULL; 700 } 701 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 702 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 703 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 704 } 705 mtx_destroy(&isp->isp_lock); 706 return (0); 707 } 708 709 #define BXR2(isp, off) bus_read_2((isp)->isp_regs, (off)) 710 #define BXW2(isp, off, v) bus_write_2((isp)->isp_regs, (off), (v)) 711 #define BXR4(isp, off) bus_read_4((isp)->isp_regs, (off)) 712 #define BXW4(isp, off, v) bus_write_4((isp)->isp_regs, (off), (v)) 713 #define B2R4(isp, off) bus_read_4((isp)->isp_regs2, (off)) 714 #define B2W4(isp, off, v) bus_write_4((isp)->isp_regs2, (off), (v)) 715 716 static void 717 isp_pci_run_isr_2400(ispsoftc_t *isp) 718 { 719 uint32_t r2hisr; 720 uint16_t isr, info; 721 722 r2hisr = BXR4(isp, BIU2400_R2HSTS); 723 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 724 if ((r2hisr & BIU_R2HST_INTR) == 0) 725 return; 726 isr = r2hisr & BIU_R2HST_ISTAT_MASK; 727 info = (r2hisr >> 16); 728 switch (isr) { 729 case ISPR2HST_ROM_MBX_OK: 730 case ISPR2HST_ROM_MBX_FAIL: 731 case ISPR2HST_MBX_OK: 732 case ISPR2HST_MBX_FAIL: 733 isp_intr_mbox(isp, info); 734 break; 735 case ISPR2HST_ASYNC_EVENT: 736 isp_intr_async(isp, info); 737 break; 738 case ISPR2HST_RSPQ_UPDATE: 739 isp_intr_respq(isp); 740 break; 741 case ISPR2HST_RSPQ_UPDATE2: 742 #ifdef ISP_TARGET_MODE 743 case ISPR2HST_ATIO_RSPQ_UPDATE: 744 #endif 745 isp_intr_respq(isp); 746 /* FALLTHROUGH */ 747 #ifdef ISP_TARGET_MODE 748 case ISPR2HST_ATIO_UPDATE: 749 case ISPR2HST_ATIO_UPDATE2: 750 isp_intr_atioq(isp); 751 #endif 752 break; 753 default: 754 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 755 } 756 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 757 } 758 759 static uint32_t 760 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 761 { 762 int block = regoff & _BLK_REG_MASK; 763 764 switch (block) { 765 case BIU_BLOCK: 766 return (BXR4(isp, regoff)); 767 case MBOX_BLOCK: 768 return (BXR2(isp, regoff)); 769 } 770 isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff); 771 return (0xffffffff); 772 } 773 774 static void 775 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 776 { 777 int block = regoff & _BLK_REG_MASK; 778 779 switch (block) { 780 case BIU_BLOCK: 781 BXW4(isp, regoff, val); 782 #ifdef MEMORYBARRIERW 783 if (regoff == BIU2400_REQINP || 784 regoff == BIU2400_RSPOUTP || 785 regoff == BIU2400_PRI_REQINP || 786 regoff == BIU2400_ATIO_RSPOUTP) 787 MEMORYBARRIERW(isp, SYNC_REG, regoff, 4, -1) 788 else 789 #endif 790 MEMORYBARRIER(isp, SYNC_REG, regoff, 4, -1); 791 return; 792 case MBOX_BLOCK: 793 BXW2(isp, regoff, val); 794 MEMORYBARRIER(isp, SYNC_REG, regoff, 2, -1); 795 return; 796 } 797 isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff); 798 } 799 800 static uint32_t 801 isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff) 802 { 803 uint32_t rv; 804 805 switch (regoff) { 806 case BIU2400_PRI_REQINP: 807 case BIU2400_PRI_REQOUTP: 808 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 809 regoff); 810 rv = 0xffffffff; 811 break; 812 case BIU2400_REQINP: 813 rv = B2R4(isp, 0x00); 814 break; 815 case BIU2400_REQOUTP: 816 rv = B2R4(isp, 0x04); 817 break; 818 case BIU2400_RSPINP: 819 rv = B2R4(isp, 0x08); 820 break; 821 case BIU2400_RSPOUTP: 822 rv = B2R4(isp, 0x0c); 823 break; 824 case BIU2400_ATIO_RSPINP: 825 rv = B2R4(isp, 0x10); 826 break; 827 case BIU2400_ATIO_RSPOUTP: 828 rv = B2R4(isp, 0x14); 829 break; 830 default: 831 rv = isp_pci_rd_reg_2400(isp, regoff); 832 break; 833 } 834 return (rv); 835 } 836 837 static void 838 isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val) 839 { 840 int off; 841 842 switch (regoff) { 843 case BIU2400_PRI_REQINP: 844 case BIU2400_PRI_REQOUTP: 845 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 846 regoff); 847 return; 848 case BIU2400_REQINP: 849 off = 0x00; 850 break; 851 case BIU2400_REQOUTP: 852 off = 0x04; 853 break; 854 case BIU2400_RSPINP: 855 off = 0x08; 856 break; 857 case BIU2400_RSPOUTP: 858 off = 0x0c; 859 break; 860 case BIU2400_ATIO_RSPINP: 861 off = 0x10; 862 break; 863 case BIU2400_ATIO_RSPOUTP: 864 off = 0x14; 865 break; 866 default: 867 isp_pci_wr_reg_2400(isp, regoff, val); 868 return; 869 } 870 B2W4(isp, off, val); 871 } 872 873 874 struct imush { 875 bus_addr_t maddr; 876 int error; 877 }; 878 879 static void 880 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 881 { 882 struct imush *imushp = (struct imush *) arg; 883 884 if (!(imushp->error = error)) 885 imushp->maddr = segs[0].ds_addr; 886 } 887 888 static int 889 isp_pci_mbxdma(ispsoftc_t *isp) 890 { 891 caddr_t base; 892 uint32_t len; 893 int i, error, cmap = 0; 894 bus_size_t slim; /* segment size */ 895 struct imush im; 896 #ifdef ISP_TARGET_MODE 897 isp_ecmd_t *ecmd; 898 #endif 899 900 /* Already been here? If so, leave... */ 901 if (isp->isp_xflist != NULL) 902 return (0); 903 if (isp->isp_rquest != NULL && isp->isp_maxcmds == 0) 904 return (0); 905 ISP_UNLOCK(isp); 906 if (isp->isp_rquest != NULL) 907 goto gotmaxcmds; 908 909 if (sizeof (bus_size_t) > 4) 910 slim = (bus_size_t) (1ULL << 32); 911 else 912 slim = (bus_size_t) (1UL << 31); 913 if (bus_dma_tag_create(bus_get_dma_tag(ISP_PCD(isp)), 1, slim, 914 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 915 (ISP_NSEG64_MAX - 1) * PAGE_SIZE, ISP_NSEG64_MAX, 916 (ISP_NSEG64_MAX - 1) * PAGE_SIZE, 0, 917 busdma_lock_mutex, &isp->isp_lock, &isp->isp_osinfo.dmat)) { 918 ISP_LOCK(isp); 919 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 920 return (1); 921 } 922 923 /* 924 * Allocate and map the request queue. 925 */ 926 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 927 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 928 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 929 len, 1, len, 0, NULL, NULL, &isp->isp_osinfo.reqdmat)) { 930 isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag"); 931 goto bad; 932 } 933 if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base, 934 BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) { 935 isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory"); 936 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 937 goto bad; 938 } 939 isp->isp_rquest = base; 940 im.error = 0; 941 if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap, 942 base, len, imc, &im, BUS_DMA_NOWAIT) || im.error) { 943 isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error); 944 goto bad; 945 } 946 isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx", 947 (uintmax_t)im.maddr, (uintmax_t)len); 948 isp->isp_rquest_dma = im.maddr; 949 950 #ifdef ISP_TARGET_MODE 951 /* 952 * Allocate region for external DMA addressable command/status structures. 953 */ 954 len = N_XCMDS * XCMD_SIZE; 955 if (bus_dma_tag_create(isp->isp_osinfo.dmat, XCMD_SIZE, slim, 956 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 957 len, 1, len, 0, NULL, NULL, &isp->isp_osinfo.ecmd_dmat)) { 958 isp_prt(isp, ISP_LOGERR, "cannot create ECMD DMA tag"); 959 goto bad; 960 } 961 if (bus_dmamem_alloc(isp->isp_osinfo.ecmd_dmat, (void **)&base, 962 BUS_DMA_COHERENT, &isp->isp_osinfo.ecmd_map) != 0) { 963 isp_prt(isp, ISP_LOGERR, "cannot allocate ECMD DMA memory"); 964 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 965 goto bad; 966 } 967 isp->isp_osinfo.ecmd_base = (isp_ecmd_t *)base; 968 im.error = 0; 969 if (bus_dmamap_load(isp->isp_osinfo.ecmd_dmat, isp->isp_osinfo.ecmd_map, 970 base, len, imc, &im, BUS_DMA_NOWAIT) || im.error) { 971 isp_prt(isp, ISP_LOGERR, "error loading ECMD DMA map %d", im.error); 972 goto bad; 973 } 974 isp_prt(isp, ISP_LOGDEBUG0, "ecmd area @ 0x%jx/0x%jx", 975 (uintmax_t)im.maddr, (uintmax_t)len); 976 977 isp->isp_osinfo.ecmd_dma = im.maddr; 978 isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base; 979 for (ecmd = isp->isp_osinfo.ecmd_free; 980 ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { 981 if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) 982 ecmd->next = NULL; 983 else 984 ecmd->next = ecmd + 1; 985 } 986 #endif 987 988 /* 989 * Allocate and map the result queue. 990 */ 991 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 992 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 993 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 994 len, 1, len, 0, NULL, NULL, &isp->isp_osinfo.respdmat)) { 995 isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag"); 996 goto bad; 997 } 998 if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base, 999 BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) { 1000 isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory"); 1001 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1002 goto bad; 1003 } 1004 isp->isp_result = base; 1005 im.error = 0; 1006 if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap, 1007 base, len, imc, &im, BUS_DMA_NOWAIT) || im.error) { 1008 isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error); 1009 goto bad; 1010 } 1011 isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx", 1012 (uintmax_t)im.maddr, (uintmax_t)len); 1013 isp->isp_result_dma = im.maddr; 1014 1015 #ifdef ISP_TARGET_MODE 1016 /* 1017 * Allocate and map ATIO queue on 24xx with target mode. 1018 */ 1019 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1020 if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1021 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1022 len, 1, len, 0, NULL, NULL, &isp->isp_osinfo.atiodmat)) { 1023 isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag"); 1024 goto bad; 1025 } 1026 if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base, 1027 BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) { 1028 isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory"); 1029 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1030 goto bad; 1031 } 1032 isp->isp_atioq = base; 1033 im.error = 0; 1034 if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap, 1035 base, len, imc, &im, BUS_DMA_NOWAIT) || im.error) { 1036 isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error); 1037 goto bad; 1038 } 1039 isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx", 1040 (uintmax_t)im.maddr, (uintmax_t)len); 1041 isp->isp_atioq_dma = im.maddr; 1042 #endif 1043 1044 if (bus_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1045 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1046 2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, NULL, NULL, 1047 &isp->isp_osinfo.iocbdmat)) { 1048 goto bad; 1049 } 1050 if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat, 1051 (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0) 1052 goto bad; 1053 isp->isp_iocb = base; 1054 im.error = 0; 1055 if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, 1056 base, 2*QENTRY_LEN, imc, &im, BUS_DMA_NOWAIT) || im.error) 1057 goto bad; 1058 isp->isp_iocb_dma = im.maddr; 1059 1060 if (bus_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1061 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1062 ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, NULL, NULL, 1063 &isp->isp_osinfo.scdmat)) 1064 goto bad; 1065 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1066 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1067 if (bus_dmamem_alloc(isp->isp_osinfo.scdmat, 1068 (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0) 1069 goto bad; 1070 FCPARAM(isp, cmap)->isp_scratch = base; 1071 im.error = 0; 1072 if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap, 1073 base, ISP_FC_SCRLEN, imc, &im, BUS_DMA_NOWAIT) || 1074 im.error) { 1075 bus_dmamem_free(isp->isp_osinfo.scdmat, 1076 base, fc->scmap); 1077 FCPARAM(isp, cmap)->isp_scratch = NULL; 1078 goto bad; 1079 } 1080 FCPARAM(isp, cmap)->isp_scdma = im.maddr; 1081 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { 1082 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); 1083 if (n == NULL) { 1084 while (fc->nexus_free_list) { 1085 n = fc->nexus_free_list; 1086 fc->nexus_free_list = n->next; 1087 free(n, M_DEVBUF); 1088 } 1089 goto bad; 1090 } 1091 n->next = fc->nexus_free_list; 1092 fc->nexus_free_list = n; 1093 } 1094 } 1095 1096 if (isp->isp_maxcmds == 0) { 1097 ISP_LOCK(isp); 1098 return (0); 1099 } 1100 1101 gotmaxcmds: 1102 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1103 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) 1104 malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1105 for (i = 0; i < isp->isp_maxcmds; i++) { 1106 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1107 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1108 if (error) { 1109 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1110 while (--i >= 0) { 1111 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1112 isp->isp_osinfo.pcmd_pool[i].dmap); 1113 } 1114 goto bad; 1115 } 1116 callout_init_mtx(&pcmd->wdog, &isp->isp_lock, 0); 1117 if (i == isp->isp_maxcmds-1) 1118 pcmd->next = NULL; 1119 else 1120 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1121 } 1122 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1123 1124 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1125 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1126 for (len = 0; len < isp->isp_maxcmds - 1; len++) 1127 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1128 isp->isp_xffree = isp->isp_xflist; 1129 1130 ISP_LOCK(isp); 1131 return (0); 1132 1133 bad: 1134 isp_pci_mbxdmafree(isp); 1135 ISP_LOCK(isp); 1136 return (1); 1137 } 1138 1139 static void 1140 isp_pci_mbxdmafree(ispsoftc_t *isp) 1141 { 1142 int i; 1143 1144 if (isp->isp_xflist != NULL) { 1145 free(isp->isp_xflist, M_DEVBUF); 1146 isp->isp_xflist = NULL; 1147 } 1148 if (isp->isp_osinfo.pcmd_pool != NULL) { 1149 for (i = 0; i < isp->isp_maxcmds; i++) { 1150 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1151 isp->isp_osinfo.pcmd_pool[i].dmap); 1152 } 1153 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1154 isp->isp_osinfo.pcmd_pool = NULL; 1155 } 1156 for (i = 0; i < isp->isp_nchan; i++) { 1157 struct isp_fc *fc = ISP_FC_PC(isp, i); 1158 if (FCPARAM(isp, i)->isp_scdma != 0) { 1159 bus_dmamap_unload(isp->isp_osinfo.scdmat, 1160 fc->scmap); 1161 FCPARAM(isp, i)->isp_scdma = 0; 1162 } 1163 if (FCPARAM(isp, i)->isp_scratch != NULL) { 1164 bus_dmamem_free(isp->isp_osinfo.scdmat, 1165 FCPARAM(isp, i)->isp_scratch, fc->scmap); 1166 FCPARAM(isp, i)->isp_scratch = NULL; 1167 } 1168 while (fc->nexus_free_list) { 1169 struct isp_nexus *n = fc->nexus_free_list; 1170 fc->nexus_free_list = n->next; 1171 free(n, M_DEVBUF); 1172 } 1173 } 1174 if (isp->isp_iocb_dma != 0) { 1175 bus_dma_tag_destroy(isp->isp_osinfo.scdmat); 1176 bus_dmamap_unload(isp->isp_osinfo.iocbdmat, 1177 isp->isp_osinfo.iocbmap); 1178 isp->isp_iocb_dma = 0; 1179 } 1180 if (isp->isp_iocb != NULL) { 1181 bus_dmamem_free(isp->isp_osinfo.iocbdmat, 1182 isp->isp_iocb, isp->isp_osinfo.iocbmap); 1183 bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat); 1184 } 1185 #ifdef ISP_TARGET_MODE 1186 if (isp->isp_atioq_dma != 0) { 1187 bus_dmamap_unload(isp->isp_osinfo.atiodmat, 1188 isp->isp_osinfo.atiomap); 1189 isp->isp_atioq_dma = 0; 1190 } 1191 if (isp->isp_atioq != NULL) { 1192 bus_dmamem_free(isp->isp_osinfo.atiodmat, isp->isp_atioq, 1193 isp->isp_osinfo.atiomap); 1194 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1195 isp->isp_atioq = NULL; 1196 } 1197 #endif 1198 if (isp->isp_result_dma != 0) { 1199 bus_dmamap_unload(isp->isp_osinfo.respdmat, 1200 isp->isp_osinfo.respmap); 1201 isp->isp_result_dma = 0; 1202 } 1203 if (isp->isp_result != NULL) { 1204 bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result, 1205 isp->isp_osinfo.respmap); 1206 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1207 isp->isp_result = NULL; 1208 } 1209 #ifdef ISP_TARGET_MODE 1210 if (isp->isp_osinfo.ecmd_dma != 0) { 1211 bus_dmamap_unload(isp->isp_osinfo.ecmd_dmat, 1212 isp->isp_osinfo.ecmd_map); 1213 isp->isp_osinfo.ecmd_dma = 0; 1214 } 1215 if (isp->isp_osinfo.ecmd_base != NULL) { 1216 bus_dmamem_free(isp->isp_osinfo.ecmd_dmat, isp->isp_osinfo.ecmd_base, 1217 isp->isp_osinfo.ecmd_map); 1218 bus_dma_tag_destroy(isp->isp_osinfo.ecmd_dmat); 1219 isp->isp_osinfo.ecmd_base = NULL; 1220 } 1221 #endif 1222 if (isp->isp_rquest_dma != 0) { 1223 bus_dmamap_unload(isp->isp_osinfo.reqdmat, 1224 isp->isp_osinfo.reqmap); 1225 isp->isp_rquest_dma = 0; 1226 } 1227 if (isp->isp_rquest != NULL) { 1228 bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest, 1229 isp->isp_osinfo.reqmap); 1230 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1231 isp->isp_rquest = NULL; 1232 } 1233 } 1234 1235 typedef struct { 1236 ispsoftc_t *isp; 1237 void *cmd_token; 1238 void *rq; /* original request */ 1239 int error; 1240 } mush_t; 1241 1242 #define MUSHERR_NOQENTRIES -2 1243 1244 static void 1245 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1246 { 1247 mush_t *mp = (mush_t *) arg; 1248 ispsoftc_t *isp= mp->isp; 1249 struct ccb_scsiio *csio = mp->cmd_token; 1250 isp_ddir_t ddir; 1251 int sdir; 1252 1253 if (error) { 1254 mp->error = error; 1255 return; 1256 } 1257 if (nseg == 0) { 1258 ddir = ISP_NOXFR; 1259 } else { 1260 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1261 ddir = ISP_FROM_DEVICE; 1262 } else { 1263 ddir = ISP_TO_DEVICE; 1264 } 1265 if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^ 1266 ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) { 1267 sdir = BUS_DMASYNC_PREREAD; 1268 } else { 1269 sdir = BUS_DMASYNC_PREWRITE; 1270 } 1271 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 1272 sdir); 1273 } 1274 1275 error = isp_send_cmd(isp, mp->rq, dm_segs, nseg, XS_XFRLEN(csio), 1276 ddir, (ispds64_t *)csio->req_map); 1277 switch (error) { 1278 case CMD_EAGAIN: 1279 mp->error = MUSHERR_NOQENTRIES; 1280 break; 1281 case CMD_QUEUED: 1282 break; 1283 default: 1284 mp->error = EIO; 1285 break; 1286 } 1287 } 1288 1289 static int 1290 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 1291 { 1292 mush_t mush, *mp; 1293 int error; 1294 1295 mp = &mush; 1296 mp->isp = isp; 1297 mp->cmd_token = csio; 1298 mp->rq = ff; 1299 mp->error = 0; 1300 1301 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 1302 (union ccb *)csio, dma2, mp, BUS_DMA_NOWAIT); 1303 if (error && mp->error == 0) { 1304 #ifdef DIAGNOSTIC 1305 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 1306 #endif 1307 mp->error = error; 1308 } 1309 if (mp->error) { 1310 int retval = CMD_COMPLETE; 1311 if (mp->error == MUSHERR_NOQENTRIES) { 1312 retval = CMD_EAGAIN; 1313 } else if (mp->error == EFBIG) { 1314 csio->ccb_h.status = CAM_REQ_TOO_BIG; 1315 } else if (mp->error == EINVAL) { 1316 csio->ccb_h.status = CAM_REQ_INVALID; 1317 } else { 1318 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 1319 } 1320 return (retval); 1321 } 1322 return (CMD_QUEUED); 1323 } 1324 1325 static int 1326 isp_pci_irqsetup(ispsoftc_t *isp) 1327 { 1328 device_t dev = isp->isp_osinfo.dev; 1329 struct isp_pcisoftc *pcs = device_get_softc(dev); 1330 driver_intr_t *f; 1331 int i, max_irq; 1332 1333 /* Allocate IRQs only once. */ 1334 if (isp->isp_nirq > 0) 1335 return (0); 1336 1337 ISP_UNLOCK(isp); 1338 if (ISP_CAP_MSIX(isp)) { 1339 max_irq = IS_26XX(isp) ? 3 : (IS_25XX(isp) ? 2 : 0); 1340 resource_int_value(device_get_name(dev), 1341 device_get_unit(dev), "msix", &max_irq); 1342 max_irq = imin(ISP_MAX_IRQS, max_irq); 1343 pcs->msicount = imin(pci_msix_count(dev), max_irq); 1344 if (pcs->msicount > 0 && 1345 pci_alloc_msix(dev, &pcs->msicount) != 0) 1346 pcs->msicount = 0; 1347 } 1348 if (pcs->msicount == 0) { 1349 max_irq = 1; 1350 resource_int_value(device_get_name(dev), 1351 device_get_unit(dev), "msi", &max_irq); 1352 max_irq = imin(1, max_irq); 1353 pcs->msicount = imin(pci_msi_count(dev), max_irq); 1354 if (pcs->msicount > 0 && 1355 pci_alloc_msi(dev, &pcs->msicount) != 0) 1356 pcs->msicount = 0; 1357 } 1358 for (i = 0; i < MAX(1, pcs->msicount); i++) { 1359 pcs->irq[i].iqd = i + (pcs->msicount > 0); 1360 pcs->irq[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1361 &pcs->irq[i].iqd, RF_ACTIVE | RF_SHAREABLE); 1362 if (pcs->irq[i].irq == NULL) { 1363 device_printf(dev, "could not allocate interrupt\n"); 1364 break; 1365 } 1366 if (i == 0) 1367 f = isp_platform_intr; 1368 else if (i == 1) 1369 f = isp_platform_intr_resp; 1370 else 1371 f = isp_platform_intr_atio; 1372 if (bus_setup_intr(dev, pcs->irq[i].irq, ISP_IFLAGS, NULL, 1373 f, isp, &pcs->irq[i].ih)) { 1374 device_printf(dev, "could not setup interrupt\n"); 1375 (void) bus_release_resource(dev, SYS_RES_IRQ, 1376 pcs->irq[i].iqd, pcs->irq[i].irq); 1377 break; 1378 } 1379 if (pcs->msicount > 1) { 1380 bus_describe_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih, 1381 "%d", i); 1382 } 1383 isp->isp_nirq = i + 1; 1384 } 1385 ISP_LOCK(isp); 1386 1387 return (isp->isp_nirq == 0); 1388 } 1389