1 /*- 2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 1997-2006 by Matthew Jacob 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/module.h> 38 #include <sys/bus.h> 39 #include <sys/stdint.h> 40 41 #include <dev/pci/pcireg.h> 42 #include <dev/pci/pcivar.h> 43 44 #include <machine/bus.h> 45 #include <machine/resource.h> 46 #include <sys/rman.h> 47 #include <sys/malloc.h> 48 49 #include <dev/isp/isp_freebsd.h> 50 51 static uint16_t isp_pci_rd_reg(struct ispsoftc *, int); 52 static void isp_pci_wr_reg(struct ispsoftc *, int, uint16_t); 53 static uint16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 54 static void isp_pci_wr_reg_1080(struct ispsoftc *, int, uint16_t); 55 static int 56 isp_pci_rd_isr(struct ispsoftc *, uint16_t *, uint16_t *, uint16_t *); 57 static int 58 isp_pci_rd_isr_2300(struct ispsoftc *, uint16_t *, uint16_t *, uint16_t *); 59 static int isp_pci_mbxdma(struct ispsoftc *); 60 static int 61 isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, uint16_t *, uint16_t); 62 static void 63 isp_pci_dmateardown(struct ispsoftc *, XS_T *, uint16_t); 64 65 static void isp_pci_reset1(struct ispsoftc *); 66 static void isp_pci_dumpregs(struct ispsoftc *, const char *); 67 68 static struct ispmdvec mdvec = { 69 isp_pci_rd_isr, 70 isp_pci_rd_reg, 71 isp_pci_wr_reg, 72 isp_pci_mbxdma, 73 isp_pci_dmasetup, 74 isp_pci_dmateardown, 75 NULL, 76 isp_pci_reset1, 77 isp_pci_dumpregs, 78 NULL, 79 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 80 }; 81 82 static struct ispmdvec mdvec_1080 = { 83 isp_pci_rd_isr, 84 isp_pci_rd_reg_1080, 85 isp_pci_wr_reg_1080, 86 isp_pci_mbxdma, 87 isp_pci_dmasetup, 88 isp_pci_dmateardown, 89 NULL, 90 isp_pci_reset1, 91 isp_pci_dumpregs, 92 NULL, 93 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 94 }; 95 96 static struct ispmdvec mdvec_12160 = { 97 isp_pci_rd_isr, 98 isp_pci_rd_reg_1080, 99 isp_pci_wr_reg_1080, 100 isp_pci_mbxdma, 101 isp_pci_dmasetup, 102 isp_pci_dmateardown, 103 NULL, 104 isp_pci_reset1, 105 isp_pci_dumpregs, 106 NULL, 107 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 108 }; 109 110 static struct ispmdvec mdvec_2100 = { 111 isp_pci_rd_isr, 112 isp_pci_rd_reg, 113 isp_pci_wr_reg, 114 isp_pci_mbxdma, 115 isp_pci_dmasetup, 116 isp_pci_dmateardown, 117 NULL, 118 isp_pci_reset1, 119 isp_pci_dumpregs 120 }; 121 122 static struct ispmdvec mdvec_2200 = { 123 isp_pci_rd_isr, 124 isp_pci_rd_reg, 125 isp_pci_wr_reg, 126 isp_pci_mbxdma, 127 isp_pci_dmasetup, 128 isp_pci_dmateardown, 129 NULL, 130 isp_pci_reset1, 131 isp_pci_dumpregs 132 }; 133 134 static struct ispmdvec mdvec_2300 = { 135 isp_pci_rd_isr_2300, 136 isp_pci_rd_reg, 137 isp_pci_wr_reg, 138 isp_pci_mbxdma, 139 isp_pci_dmasetup, 140 isp_pci_dmateardown, 141 NULL, 142 isp_pci_reset1, 143 isp_pci_dumpregs 144 }; 145 146 #ifndef PCIM_CMD_INVEN 147 #define PCIM_CMD_INVEN 0x10 148 #endif 149 #ifndef PCIM_CMD_BUSMASTEREN 150 #define PCIM_CMD_BUSMASTEREN 0x0004 151 #endif 152 #ifndef PCIM_CMD_PERRESPEN 153 #define PCIM_CMD_PERRESPEN 0x0040 154 #endif 155 #ifndef PCIM_CMD_SEREN 156 #define PCIM_CMD_SEREN 0x0100 157 #endif 158 159 #ifndef PCIR_COMMAND 160 #define PCIR_COMMAND 0x04 161 #endif 162 163 #ifndef PCIR_CACHELNSZ 164 #define PCIR_CACHELNSZ 0x0c 165 #endif 166 167 #ifndef PCIR_LATTIMER 168 #define PCIR_LATTIMER 0x0d 169 #endif 170 171 #ifndef PCIR_ROMADDR 172 #define PCIR_ROMADDR 0x30 173 #endif 174 175 #ifndef PCI_VENDOR_QLOGIC 176 #define PCI_VENDOR_QLOGIC 0x1077 177 #endif 178 179 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 180 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 181 #endif 182 183 #ifndef PCI_PRODUCT_QLOGIC_ISP1080 184 #define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 185 #endif 186 187 #ifndef PCI_PRODUCT_QLOGIC_ISP10160 188 #define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 189 #endif 190 191 #ifndef PCI_PRODUCT_QLOGIC_ISP12160 192 #define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 193 #endif 194 195 #ifndef PCI_PRODUCT_QLOGIC_ISP1240 196 #define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 197 #endif 198 199 #ifndef PCI_PRODUCT_QLOGIC_ISP1280 200 #define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 201 #endif 202 203 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 204 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 205 #endif 206 207 #ifndef PCI_PRODUCT_QLOGIC_ISP2200 208 #define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 209 #endif 210 211 #ifndef PCI_PRODUCT_QLOGIC_ISP2300 212 #define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 213 #endif 214 215 #ifndef PCI_PRODUCT_QLOGIC_ISP2312 216 #define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 217 #endif 218 219 #ifndef PCI_PRODUCT_QLOGIC_ISP2322 220 #define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 221 #endif 222 223 #ifndef PCI_PRODUCT_QLOGIC_ISP2422 224 #define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 225 #endif 226 227 #ifndef PCI_PRODUCT_QLOGIC_ISP6312 228 #define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 229 #endif 230 231 #define PCI_QLOGIC_ISP1020 \ 232 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 233 234 #define PCI_QLOGIC_ISP1080 \ 235 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 236 237 #define PCI_QLOGIC_ISP10160 \ 238 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 239 240 #define PCI_QLOGIC_ISP12160 \ 241 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 242 243 #define PCI_QLOGIC_ISP1240 \ 244 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 245 246 #define PCI_QLOGIC_ISP1280 \ 247 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 248 249 #define PCI_QLOGIC_ISP2100 \ 250 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 251 252 #define PCI_QLOGIC_ISP2200 \ 253 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 254 255 #define PCI_QLOGIC_ISP2300 \ 256 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 257 258 #define PCI_QLOGIC_ISP2312 \ 259 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 260 261 #define PCI_QLOGIC_ISP2322 \ 262 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 263 264 #define PCI_QLOGIC_ISP2422 \ 265 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 266 267 #define PCI_QLOGIC_ISP6312 \ 268 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 269 270 /* 271 * Odd case for some AMI raid cards... We need to *not* attach to this. 272 */ 273 #define AMI_RAID_SUBVENDOR_ID 0x101e 274 275 #define IO_MAP_REG 0x10 276 #define MEM_MAP_REG 0x14 277 278 #define PCI_DFLT_LTNCY 0x40 279 #define PCI_DFLT_LNSZ 0x10 280 281 static int isp_pci_probe (device_t); 282 static int isp_pci_attach (device_t); 283 284 285 struct isp_pcisoftc { 286 struct ispsoftc pci_isp; 287 device_t pci_dev; 288 struct resource * pci_reg; 289 bus_space_tag_t pci_st; 290 bus_space_handle_t pci_sh; 291 void * ih; 292 int16_t pci_poff[_NREG_BLKS]; 293 bus_dma_tag_t dmat; 294 bus_dmamap_t *dmaps; 295 }; 296 extern ispfwfunc *isp_get_firmware_p; 297 298 static device_method_t isp_pci_methods[] = { 299 /* Device interface */ 300 DEVMETHOD(device_probe, isp_pci_probe), 301 DEVMETHOD(device_attach, isp_pci_attach), 302 { 0, 0 } 303 }; 304 static void isp_pci_intr(void *); 305 306 static driver_t isp_pci_driver = { 307 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 308 }; 309 static devclass_t isp_devclass; 310 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 311 312 static int 313 isp_pci_probe(device_t dev) 314 { 315 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 316 case PCI_QLOGIC_ISP1020: 317 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 318 break; 319 case PCI_QLOGIC_ISP1080: 320 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 321 break; 322 case PCI_QLOGIC_ISP1240: 323 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 324 break; 325 case PCI_QLOGIC_ISP1280: 326 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 327 break; 328 case PCI_QLOGIC_ISP10160: 329 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 330 break; 331 case PCI_QLOGIC_ISP12160: 332 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 333 return (ENXIO); 334 } 335 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 336 break; 337 case PCI_QLOGIC_ISP2100: 338 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 339 break; 340 case PCI_QLOGIC_ISP2200: 341 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 342 break; 343 case PCI_QLOGIC_ISP2300: 344 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 345 break; 346 case PCI_QLOGIC_ISP2312: 347 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 348 break; 349 case PCI_QLOGIC_ISP2322: 350 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 351 break; 352 case PCI_QLOGIC_ISP2422: 353 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 354 break; 355 case PCI_QLOGIC_ISP6312: 356 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 357 break; 358 default: 359 return (ENXIO); 360 } 361 if (isp_announced == 0 && bootverbose) { 362 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 363 "Core Version %d.%d\n", 364 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 365 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 366 isp_announced++; 367 } 368 /* 369 * XXXX: Here is where we might load the f/w module 370 * XXXX: (or increase a reference count to it). 371 */ 372 return (BUS_PROBE_DEFAULT); 373 } 374 375 static int 376 isp_pci_attach(device_t dev) 377 { 378 struct resource *regs, *irq; 379 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 380 uint32_t data, cmd, linesz, psize, basetype; 381 struct isp_pcisoftc *pcs; 382 struct ispsoftc *isp = NULL; 383 struct ispmdvec *mdvp; 384 const char *sptr; 385 int locksetup = 0; 386 387 /* 388 * Figure out if we're supposed to skip this one. 389 */ 390 391 tval = 0; 392 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 393 "disable", &tval) == 0 && tval) { 394 device_printf(dev, "device is disabled\n"); 395 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 396 return (0); 397 } 398 399 role = -1; 400 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 401 "role", &role) == 0 && role != -1) { 402 role &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 403 device_printf(dev, "setting role to 0x%x\n", role); 404 } else { 405 #ifdef ISP_TARGET_MODE 406 role = ISP_ROLE_TARGET; 407 #else 408 role = ISP_DEFAULT_ROLES; 409 #endif 410 } 411 412 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 413 if (pcs == NULL) { 414 device_printf(dev, "cannot allocate softc\n"); 415 return (ENOMEM); 416 } 417 418 /* 419 * Which we should try first - memory mapping or i/o mapping? 420 * 421 * We used to try memory first followed by i/o on alpha, otherwise 422 * the reverse, but we should just try memory first all the time now. 423 */ 424 m1 = PCIM_CMD_MEMEN; 425 m2 = PCIM_CMD_PORTEN; 426 427 tval = 0; 428 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 429 "prefer_iomap", &tval) == 0 && tval != 0) { 430 m1 = PCIM_CMD_PORTEN; 431 m2 = PCIM_CMD_MEMEN; 432 } 433 tval = 0; 434 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 435 "prefer_memmap", &tval) == 0 && tval != 0) { 436 m1 = PCIM_CMD_MEMEN; 437 m2 = PCIM_CMD_PORTEN; 438 } 439 440 linesz = PCI_DFLT_LNSZ; 441 irq = regs = NULL; 442 rgd = rtp = iqd = 0; 443 444 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 445 if (cmd & m1) { 446 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 447 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 448 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 449 } 450 if (regs == NULL && (cmd & m2)) { 451 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 452 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 453 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 454 } 455 if (regs == NULL) { 456 device_printf(dev, "unable to map any ports\n"); 457 goto bad; 458 } 459 if (bootverbose) 460 device_printf(dev, "using %s space register mapping\n", 461 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 462 pcs->pci_dev = dev; 463 pcs->pci_reg = regs; 464 pcs->pci_st = rman_get_bustag(regs); 465 pcs->pci_sh = rman_get_bushandle(regs); 466 467 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 468 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 469 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 470 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 471 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 472 mdvp = &mdvec; 473 basetype = ISP_HA_SCSI_UNKNOWN; 474 psize = sizeof (sdparam); 475 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 476 mdvp = &mdvec; 477 basetype = ISP_HA_SCSI_UNKNOWN; 478 psize = sizeof (sdparam); 479 } 480 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 481 mdvp = &mdvec_1080; 482 basetype = ISP_HA_SCSI_1080; 483 psize = sizeof (sdparam); 484 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 485 ISP1080_DMA_REGS_OFF; 486 } 487 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 488 mdvp = &mdvec_1080; 489 basetype = ISP_HA_SCSI_1240; 490 psize = 2 * sizeof (sdparam); 491 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 492 ISP1080_DMA_REGS_OFF; 493 } 494 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 495 mdvp = &mdvec_1080; 496 basetype = ISP_HA_SCSI_1280; 497 psize = 2 * sizeof (sdparam); 498 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 499 ISP1080_DMA_REGS_OFF; 500 } 501 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 502 mdvp = &mdvec_12160; 503 basetype = ISP_HA_SCSI_10160; 504 psize = sizeof (sdparam); 505 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 506 ISP1080_DMA_REGS_OFF; 507 } 508 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 509 mdvp = &mdvec_12160; 510 basetype = ISP_HA_SCSI_12160; 511 psize = 2 * sizeof (sdparam); 512 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 513 ISP1080_DMA_REGS_OFF; 514 } 515 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 516 mdvp = &mdvec_2100; 517 basetype = ISP_HA_FC_2100; 518 psize = sizeof (fcparam); 519 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 520 PCI_MBOX_REGS2100_OFF; 521 if (pci_get_revid(dev) < 3) { 522 /* 523 * XXX: Need to get the actual revision 524 * XXX: number of the 2100 FB. At any rate, 525 * XXX: lower cache line size for early revision 526 * XXX; boards. 527 */ 528 linesz = 1; 529 } 530 } 531 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 532 mdvp = &mdvec_2200; 533 basetype = ISP_HA_FC_2200; 534 psize = sizeof (fcparam); 535 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 536 PCI_MBOX_REGS2100_OFF; 537 } 538 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 539 mdvp = &mdvec_2300; 540 basetype = ISP_HA_FC_2300; 541 psize = sizeof (fcparam); 542 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 543 PCI_MBOX_REGS2300_OFF; 544 } 545 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 546 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 547 mdvp = &mdvec_2300; 548 basetype = ISP_HA_FC_2312; 549 psize = sizeof (fcparam); 550 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 551 PCI_MBOX_REGS2300_OFF; 552 } 553 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322) { 554 mdvp = &mdvec_2300; 555 basetype = ISP_HA_FC_2322; 556 psize = sizeof (fcparam); 557 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 558 PCI_MBOX_REGS2300_OFF; 559 } 560 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) { 561 mdvp = &mdvec_2300; 562 basetype = ISP_HA_FC_2422; 563 psize = sizeof (fcparam); 564 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 565 PCI_MBOX_REGS2300_OFF; 566 } 567 isp = &pcs->pci_isp; 568 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 569 if (isp->isp_param == NULL) { 570 device_printf(dev, "cannot allocate parameter data\n"); 571 goto bad; 572 } 573 isp->isp_mdvec = mdvp; 574 isp->isp_type = basetype; 575 isp->isp_revision = pci_get_revid(dev); 576 isp->isp_role = role; 577 isp->isp_dev = dev; 578 579 /* 580 * Try and find firmware for this device. 581 */ 582 583 /* 584 * Don't even attempt to get firmware for the 2322/2422 (yet) 585 */ 586 if (IS_2322(isp) == 0 && IS_24XX(isp) == 0 && isp_get_firmware_p) { 587 int device = (int) pci_get_device(dev); 588 #ifdef ISP_TARGET_MODE 589 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 590 #else 591 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 592 #endif 593 } 594 595 /* 596 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 597 * are set. 598 */ 599 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 600 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 601 if (IS_2300(isp)) { /* per QLogic errata */ 602 cmd &= ~PCIM_CMD_INVEN; 603 } 604 if (IS_23XX(isp)) { 605 /* 606 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 607 */ 608 isp->isp_touched = 1; 609 610 } 611 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 612 613 /* 614 * Make sure the Cache Line Size register is set sensibly. 615 */ 616 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 617 if (data != linesz) { 618 data = PCI_DFLT_LNSZ; 619 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 620 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 621 } 622 623 /* 624 * Make sure the Latency Timer is sane. 625 */ 626 data = pci_read_config(dev, PCIR_LATTIMER, 1); 627 if (data < PCI_DFLT_LTNCY) { 628 data = PCI_DFLT_LTNCY; 629 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 630 pci_write_config(dev, PCIR_LATTIMER, data, 1); 631 } 632 633 /* 634 * Make sure we've disabled the ROM. 635 */ 636 data = pci_read_config(dev, PCIR_ROMADDR, 4); 637 data &= ~1; 638 pci_write_config(dev, PCIR_ROMADDR, data, 4); 639 640 iqd = 0; 641 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 642 RF_ACTIVE | RF_SHAREABLE); 643 if (irq == NULL) { 644 device_printf(dev, "could not allocate interrupt\n"); 645 goto bad; 646 } 647 648 tval = 0; 649 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 650 "fwload_disable", &tval) == 0 && tval != 0) { 651 isp->isp_confopts |= ISP_CFG_NORELOAD; 652 } 653 tval = 0; 654 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 655 "ignore_nvram", &tval) == 0 && tval != 0) { 656 isp->isp_confopts |= ISP_CFG_NONVRAM; 657 } 658 tval = 0; 659 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 660 "fullduplex", &tval) == 0 && tval != 0) { 661 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 662 } 663 #ifdef ISP_FW_CRASH_DUMP 664 tval = 0; 665 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 666 "fw_dump_enable", &tval) == 0 && tval != 0) { 667 size_t amt = 0; 668 if (IS_2200(isp)) { 669 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 670 } else if (IS_23XX(isp)) { 671 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 672 } 673 if (amt) { 674 FCPARAM(isp)->isp_dump_data = 675 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 676 } else { 677 device_printf(dev, 678 "f/w crash dumps not supported for this model\n"); 679 } 680 } 681 #endif 682 683 sptr = 0; 684 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 685 "topology", (const char **) &sptr) == 0 && sptr != 0) { 686 if (strcmp(sptr, "lport") == 0) { 687 isp->isp_confopts |= ISP_CFG_LPORT; 688 } else if (strcmp(sptr, "nport") == 0) { 689 isp->isp_confopts |= ISP_CFG_NPORT; 690 } else if (strcmp(sptr, "lport-only") == 0) { 691 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 692 } else if (strcmp(sptr, "nport-only") == 0) { 693 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 694 } 695 } 696 697 /* 698 * Because the resource_*_value functions can neither return 699 * 64 bit integer values, nor can they be directly coerced 700 * to interpret the right hand side of the assignment as 701 * you want them to interpret it, we have to force WWN 702 * hint replacement to specify WWN strings with a leading 703 * 'w' (e..g w50000000aaaa0001). Sigh. 704 */ 705 sptr = 0; 706 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 707 "portwwn", (const char **) &sptr); 708 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 709 char *eptr = 0; 710 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 711 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 712 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 713 isp->isp_osinfo.default_port_wwn = 0; 714 } else { 715 isp->isp_confopts |= ISP_CFG_OWNWWPN; 716 } 717 } 718 if (isp->isp_osinfo.default_port_wwn == 0) { 719 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 720 } 721 722 sptr = 0; 723 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 724 "nodewwn", (const char **) &sptr); 725 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 726 char *eptr = 0; 727 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 728 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 729 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 730 isp->isp_osinfo.default_node_wwn = 0; 731 } else { 732 isp->isp_confopts |= ISP_CFG_OWNWWNN; 733 } 734 } 735 if (isp->isp_osinfo.default_node_wwn == 0) { 736 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 737 } 738 739 isp->isp_osinfo.default_id = -1; 740 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 741 "iid", &tval) == 0) { 742 isp->isp_osinfo.default_id = tval; 743 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 744 } 745 if (isp->isp_osinfo.default_id == -1) { 746 if (IS_FC(isp)) { 747 isp->isp_osinfo.default_id = 109; 748 } else { 749 isp->isp_osinfo.default_id = 7; 750 } 751 } 752 753 isp_debug = 0; 754 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 755 "debug", &isp_debug); 756 757 /* Make sure the lock is set up. */ 758 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 759 locksetup++; 760 761 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 762 device_printf(dev, "could not setup interrupt\n"); 763 goto bad; 764 } 765 766 /* 767 * Set up logging levels. 768 */ 769 if (isp_debug) { 770 isp->isp_dblev = isp_debug; 771 } else { 772 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 773 } 774 if (bootverbose) 775 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 776 777 /* 778 * Last minute checks... 779 */ 780 if (IS_23XX(isp)) { 781 isp->isp_port = pci_get_function(dev); 782 } 783 784 /* 785 * Make sure we're in reset state. 786 */ 787 ISP_LOCK(isp); 788 isp_reset(isp); 789 if (isp->isp_state != ISP_RESETSTATE) { 790 ISP_UNLOCK(isp); 791 goto bad; 792 } 793 isp_init(isp); 794 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 795 isp_uninit(isp); 796 ISP_UNLOCK(isp); 797 goto bad; 798 } 799 isp_attach(isp); 800 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 801 isp_uninit(isp); 802 ISP_UNLOCK(isp); 803 goto bad; 804 } 805 /* 806 * XXXX: Here is where we might unload the f/w module 807 * XXXX: (or decrease the reference count to it). 808 */ 809 ISP_UNLOCK(isp); 810 return (0); 811 812 bad: 813 814 if (pcs && pcs->ih) { 815 (void) bus_teardown_intr(dev, irq, pcs->ih); 816 } 817 818 if (locksetup && isp) { 819 mtx_destroy(&isp->isp_osinfo.lock); 820 } 821 822 if (irq) { 823 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 824 } 825 826 827 if (regs) { 828 (void) bus_release_resource(dev, rtp, rgd, regs); 829 } 830 831 if (pcs) { 832 if (pcs->pci_isp.isp_param) 833 free(pcs->pci_isp.isp_param, M_DEVBUF); 834 free(pcs, M_DEVBUF); 835 } 836 837 /* 838 * XXXX: Here is where we might unload the f/w module 839 * XXXX: (or decrease the reference count to it). 840 */ 841 return (ENXIO); 842 } 843 844 static void 845 isp_pci_intr(void *arg) 846 { 847 struct ispsoftc *isp = arg; 848 uint16_t isr, sema, mbox; 849 850 ISP_LOCK(isp); 851 isp->isp_intcnt++; 852 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 853 isp->isp_intbogus++; 854 } else { 855 int iok = isp->isp_osinfo.intsok; 856 isp->isp_osinfo.intsok = 0; 857 isp_intr(isp, isr, sema, mbox); 858 isp->isp_osinfo.intsok = iok; 859 } 860 ISP_UNLOCK(isp); 861 } 862 863 864 #define IspVirt2Off(a, x) \ 865 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 866 _BLK_REG_SHFT] + ((x) & 0xff)) 867 868 #define BXR2(pcs, off) \ 869 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 870 #define BXW2(pcs, off, v) \ 871 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 872 873 874 static __inline int 875 isp_pci_rd_debounced(struct ispsoftc *isp, int off, uint16_t *rp) 876 { 877 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 878 uint16_t val0, val1; 879 int i = 0; 880 881 do { 882 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 883 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 884 } while (val0 != val1 && ++i < 1000); 885 if (val0 != val1) { 886 return (1); 887 } 888 *rp = val0; 889 return (0); 890 } 891 892 static int 893 isp_pci_rd_isr(struct ispsoftc *isp, uint16_t *isrp, 894 uint16_t *semap, uint16_t *mbp) 895 { 896 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 897 uint16_t isr, sema; 898 899 if (IS_2100(isp)) { 900 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 901 return (0); 902 } 903 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 904 return (0); 905 } 906 } else { 907 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 908 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 909 } 910 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 911 isr &= INT_PENDING_MASK(isp); 912 sema &= BIU_SEMA_LOCK; 913 if (isr == 0 && sema == 0) { 914 return (0); 915 } 916 *isrp = isr; 917 if ((*semap = sema) != 0) { 918 if (IS_2100(isp)) { 919 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 920 return (0); 921 } 922 } else { 923 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 924 } 925 } 926 return (1); 927 } 928 929 static int 930 isp_pci_rd_isr_2300(struct ispsoftc *isp, uint16_t *isrp, 931 uint16_t *semap, uint16_t *mbox0p) 932 { 933 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 934 uint32_t r2hisr; 935 936 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 937 *isrp = 0; 938 return (0); 939 } 940 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 941 IspVirt2Off(pcs, BIU_R2HSTSLO)); 942 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 943 if ((r2hisr & BIU_R2HST_INTR) == 0) { 944 *isrp = 0; 945 return (0); 946 } 947 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 948 case ISPR2HST_ROM_MBX_OK: 949 case ISPR2HST_ROM_MBX_FAIL: 950 case ISPR2HST_MBX_OK: 951 case ISPR2HST_MBX_FAIL: 952 case ISPR2HST_ASYNC_EVENT: 953 *isrp = r2hisr & 0xffff; 954 *mbox0p = (r2hisr >> 16); 955 *semap = 1; 956 return (1); 957 case ISPR2HST_RIO_16: 958 *isrp = r2hisr & 0xffff; 959 *mbox0p = ASYNC_RIO1; 960 *semap = 1; 961 return (1); 962 case ISPR2HST_FPOST: 963 *isrp = r2hisr & 0xffff; 964 *mbox0p = ASYNC_CMD_CMPLT; 965 *semap = 1; 966 return (1); 967 case ISPR2HST_FPOST_CTIO: 968 *isrp = r2hisr & 0xffff; 969 *mbox0p = ASYNC_CTIO_DONE; 970 *semap = 1; 971 return (1); 972 case ISPR2HST_RSPQ_UPDATE: 973 *isrp = r2hisr & 0xffff; 974 *mbox0p = 0; 975 *semap = 0; 976 return (1); 977 default: 978 return (0); 979 } 980 } 981 982 static uint16_t 983 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 984 { 985 uint16_t rv; 986 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 987 int oldconf = 0; 988 989 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 990 /* 991 * We will assume that someone has paused the RISC processor. 992 */ 993 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 994 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 995 oldconf | BIU_PCI_CONF1_SXP); 996 } 997 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 998 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 999 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1000 } 1001 return (rv); 1002 } 1003 1004 static void 1005 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, uint16_t val) 1006 { 1007 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1008 int oldconf = 0; 1009 1010 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1011 /* 1012 * We will assume that someone has paused the RISC processor. 1013 */ 1014 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1015 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1016 oldconf | BIU_PCI_CONF1_SXP); 1017 } 1018 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1019 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1020 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1021 } 1022 } 1023 1024 static uint16_t 1025 isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 1026 { 1027 uint16_t rv, oc = 0; 1028 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1029 1030 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1031 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1032 uint16_t tc; 1033 /* 1034 * We will assume that someone has paused the RISC processor. 1035 */ 1036 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1037 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1038 if (regoff & SXP_BANK1_SELECT) 1039 tc |= BIU_PCI1080_CONF1_SXP1; 1040 else 1041 tc |= BIU_PCI1080_CONF1_SXP0; 1042 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1043 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1044 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1045 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1046 oc | BIU_PCI1080_CONF1_DMA); 1047 } 1048 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1049 if (oc) { 1050 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1051 } 1052 return (rv); 1053 } 1054 1055 static void 1056 isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, uint16_t val) 1057 { 1058 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1059 int oc = 0; 1060 1061 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1062 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1063 uint16_t tc; 1064 /* 1065 * We will assume that someone has paused the RISC processor. 1066 */ 1067 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1068 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1069 if (regoff & SXP_BANK1_SELECT) 1070 tc |= BIU_PCI1080_CONF1_SXP1; 1071 else 1072 tc |= BIU_PCI1080_CONF1_SXP0; 1073 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1074 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1075 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1076 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1077 oc | BIU_PCI1080_CONF1_DMA); 1078 } 1079 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1080 if (oc) { 1081 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1082 } 1083 } 1084 1085 1086 struct imush { 1087 struct ispsoftc *isp; 1088 int error; 1089 }; 1090 1091 static void imc(void *, bus_dma_segment_t *, int, int); 1092 1093 static void 1094 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1095 { 1096 struct imush *imushp = (struct imush *) arg; 1097 if (error) { 1098 imushp->error = error; 1099 } else { 1100 struct ispsoftc *isp =imushp->isp; 1101 bus_addr_t addr = segs->ds_addr; 1102 1103 isp->isp_rquest_dma = addr; 1104 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1105 isp->isp_result_dma = addr; 1106 if (IS_FC(isp)) { 1107 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1108 FCPARAM(isp)->isp_scdma = addr; 1109 } 1110 } 1111 } 1112 1113 /* 1114 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1115 */ 1116 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1117 1118 static int 1119 isp_pci_mbxdma(struct ispsoftc *isp) 1120 { 1121 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1122 caddr_t base; 1123 uint32_t len; 1124 int i, error, ns; 1125 bus_size_t slim; /* segment size */ 1126 bus_addr_t llim; /* low limit of unavailable dma */ 1127 bus_addr_t hlim; /* high limit of unavailable dma */ 1128 struct imush im; 1129 1130 /* 1131 * Already been here? If so, leave... 1132 */ 1133 if (isp->isp_rquest) { 1134 return (0); 1135 } 1136 1137 hlim = BUS_SPACE_MAXADDR; 1138 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1139 slim = (bus_size_t) (1ULL << 32); 1140 #ifdef ISP_TARGET_MODE 1141 /* 1142 * XXX: Until Fixed Soon 1143 */ 1144 llim = BUS_SPACE_MAXADDR_32BIT; 1145 #else 1146 llim = BUS_SPACE_MAXADDR; 1147 #endif 1148 } else { 1149 llim = BUS_SPACE_MAXADDR_32BIT; 1150 slim = (1 << 24); 1151 } 1152 1153 ISP_UNLOCK(isp); 1154 if (bus_dma_tag_create(NULL, 1, slim, llim, hlim, 1155 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1156 busdma_lock_mutex, &Giant, &pcs->dmat)) { 1157 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1158 ISP_LOCK(isp); 1159 return(1); 1160 } 1161 1162 1163 len = sizeof (XS_T **) * isp->isp_maxcmds; 1164 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1165 if (isp->isp_xflist == NULL) { 1166 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1167 ISP_LOCK(isp); 1168 return (1); 1169 } 1170 #ifdef ISP_TARGET_MODE 1171 len = sizeof (void **) * isp->isp_maxcmds; 1172 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1173 if (isp->isp_tgtlist == NULL) { 1174 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1175 ISP_LOCK(isp); 1176 return (1); 1177 } 1178 #endif 1179 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1180 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1181 if (pcs->dmaps == NULL) { 1182 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1183 free(isp->isp_xflist, M_DEVBUF); 1184 #ifdef ISP_TARGET_MODE 1185 free(isp->isp_tgtlist, M_DEVBUF); 1186 #endif 1187 ISP_LOCK(isp); 1188 return (1); 1189 } 1190 1191 /* 1192 * Allocate and map the request, result queues, plus FC scratch area. 1193 */ 1194 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1195 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1196 if (IS_FC(isp)) { 1197 len += ISP2100_SCRLEN; 1198 } 1199 1200 ns = (len / PAGE_SIZE) + 1; 1201 /* 1202 * Create a tag for the control spaces- force it to within 32 bits. 1203 */ 1204 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim, 1205 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1206 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant, 1207 &isp->isp_cdmat)) { 1208 isp_prt(isp, ISP_LOGERR, 1209 "cannot create a dma tag for control spaces"); 1210 free(pcs->dmaps, M_DEVBUF); 1211 free(isp->isp_xflist, M_DEVBUF); 1212 #ifdef ISP_TARGET_MODE 1213 free(isp->isp_tgtlist, M_DEVBUF); 1214 #endif 1215 ISP_LOCK(isp); 1216 return (1); 1217 } 1218 1219 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1220 &isp->isp_cdmap) != 0) { 1221 isp_prt(isp, ISP_LOGERR, 1222 "cannot allocate %d bytes of CCB memory", len); 1223 bus_dma_tag_destroy(isp->isp_cdmat); 1224 free(isp->isp_xflist, M_DEVBUF); 1225 #ifdef ISP_TARGET_MODE 1226 free(isp->isp_tgtlist, M_DEVBUF); 1227 #endif 1228 free(pcs->dmaps, M_DEVBUF); 1229 ISP_LOCK(isp); 1230 return (1); 1231 } 1232 1233 for (i = 0; i < isp->isp_maxcmds; i++) { 1234 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1235 if (error) { 1236 isp_prt(isp, ISP_LOGERR, 1237 "error %d creating per-cmd DMA maps", error); 1238 while (--i >= 0) { 1239 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1240 } 1241 goto bad; 1242 } 1243 } 1244 1245 im.isp = isp; 1246 im.error = 0; 1247 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1248 if (im.error) { 1249 isp_prt(isp, ISP_LOGERR, 1250 "error %d loading dma map for control areas", im.error); 1251 goto bad; 1252 } 1253 1254 isp->isp_rquest = base; 1255 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1256 isp->isp_result = base; 1257 if (IS_FC(isp)) { 1258 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1259 FCPARAM(isp)->isp_scratch = base; 1260 } 1261 ISP_LOCK(isp); 1262 return (0); 1263 1264 bad: 1265 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1266 bus_dma_tag_destroy(isp->isp_cdmat); 1267 free(isp->isp_xflist, M_DEVBUF); 1268 #ifdef ISP_TARGET_MODE 1269 free(isp->isp_tgtlist, M_DEVBUF); 1270 #endif 1271 free(pcs->dmaps, M_DEVBUF); 1272 ISP_LOCK(isp); 1273 isp->isp_rquest = NULL; 1274 return (1); 1275 } 1276 1277 typedef struct { 1278 struct ispsoftc *isp; 1279 void *cmd_token; 1280 void *rq; 1281 uint16_t *nxtip; 1282 uint16_t optr; 1283 int error; 1284 } mush_t; 1285 1286 #define MUSHERR_NOQENTRIES -2 1287 1288 #ifdef ISP_TARGET_MODE 1289 /* 1290 * We need to handle DMA for target mode differently from initiator mode. 1291 * 1292 * DMA mapping and construction and submission of CTIO Request Entries 1293 * and rendevous for completion are very tightly coupled because we start 1294 * out by knowing (per platform) how much data we have to move, but we 1295 * don't know, up front, how many DMA mapping segments will have to be used 1296 * cover that data, so we don't know how many CTIO Request Entries we 1297 * will end up using. Further, for performance reasons we may want to 1298 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1299 * 1300 * The standard vector still goes through isp_pci_dmasetup, but the callback 1301 * for the DMA mapping routines comes here instead with the whole transfer 1302 * mapped and a pointer to a partially filled in already allocated request 1303 * queue entry. We finish the job. 1304 */ 1305 static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1306 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1307 1308 #define STATUS_WITH_DATA 1 1309 1310 static void 1311 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1312 { 1313 mush_t *mp; 1314 struct ccb_scsiio *csio; 1315 struct ispsoftc *isp; 1316 struct isp_pcisoftc *pcs; 1317 bus_dmamap_t *dp; 1318 ct_entry_t *cto, *qe; 1319 uint8_t scsi_status; 1320 uint16_t curi, nxti, handle; 1321 uint32_t sflags; 1322 int32_t resid; 1323 int nth_ctio, nctios, send_status; 1324 1325 mp = (mush_t *) arg; 1326 if (error) { 1327 mp->error = error; 1328 return; 1329 } 1330 1331 isp = mp->isp; 1332 csio = mp->cmd_token; 1333 cto = mp->rq; 1334 curi = isp->isp_reqidx; 1335 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1336 1337 cto->ct_xfrlen = 0; 1338 cto->ct_seg_count = 0; 1339 cto->ct_header.rqs_entry_count = 1; 1340 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1341 1342 if (nseg == 0) { 1343 cto->ct_header.rqs_seqno = 1; 1344 isp_prt(isp, ISP_LOGTDEBUG1, 1345 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1346 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1347 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1348 cto->ct_scsi_status, cto->ct_resid); 1349 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1350 isp_put_ctio(isp, cto, qe); 1351 return; 1352 } 1353 1354 nctios = nseg / ISP_RQDSEG; 1355 if (nseg % ISP_RQDSEG) { 1356 nctios++; 1357 } 1358 1359 /* 1360 * Save syshandle, and potentially any SCSI status, which we'll 1361 * reinsert on the last CTIO we're going to send. 1362 */ 1363 1364 handle = cto->ct_syshandle; 1365 cto->ct_syshandle = 0; 1366 cto->ct_header.rqs_seqno = 0; 1367 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1368 1369 if (send_status) { 1370 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1371 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1372 /* 1373 * Preserve residual. 1374 */ 1375 resid = cto->ct_resid; 1376 1377 /* 1378 * Save actual SCSI status. 1379 */ 1380 scsi_status = cto->ct_scsi_status; 1381 1382 #ifndef STATUS_WITH_DATA 1383 sflags |= CT_NO_DATA; 1384 /* 1385 * We can't do a status at the same time as a data CTIO, so 1386 * we need to synthesize an extra CTIO at this level. 1387 */ 1388 nctios++; 1389 #endif 1390 } else { 1391 sflags = scsi_status = resid = 0; 1392 } 1393 1394 cto->ct_resid = 0; 1395 cto->ct_scsi_status = 0; 1396 1397 pcs = (struct isp_pcisoftc *)isp; 1398 dp = &pcs->dmaps[isp_handle_index(handle)]; 1399 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1400 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1401 } else { 1402 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1403 } 1404 1405 nxti = *mp->nxtip; 1406 1407 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1408 int seglim; 1409 1410 seglim = nseg; 1411 if (seglim) { 1412 int seg; 1413 1414 if (seglim > ISP_RQDSEG) 1415 seglim = ISP_RQDSEG; 1416 1417 for (seg = 0; seg < seglim; seg++, nseg--) { 1418 /* 1419 * Unlike normal initiator commands, we don't 1420 * do any swizzling here. 1421 */ 1422 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1423 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1424 cto->ct_xfrlen += dm_segs->ds_len; 1425 dm_segs++; 1426 } 1427 cto->ct_seg_count = seg; 1428 } else { 1429 /* 1430 * This case should only happen when we're sending an 1431 * extra CTIO with final status. 1432 */ 1433 if (send_status == 0) { 1434 isp_prt(isp, ISP_LOGWARN, 1435 "tdma_mk ran out of segments"); 1436 mp->error = EINVAL; 1437 return; 1438 } 1439 } 1440 1441 /* 1442 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1443 * ct_tagtype, and ct_timeout have been carried over 1444 * unchanged from what our caller had set. 1445 * 1446 * The dataseg fields and the seg_count fields we just got 1447 * through setting. The data direction we've preserved all 1448 * along and only clear it if we're now sending status. 1449 */ 1450 1451 if (nth_ctio == nctios - 1) { 1452 /* 1453 * We're the last in a sequence of CTIOs, so mark 1454 * this CTIO and save the handle to the CCB such that 1455 * when this CTIO completes we can free dma resources 1456 * and do whatever else we need to do to finish the 1457 * rest of the command. We *don't* give this to the 1458 * firmware to work on- the caller will do that. 1459 */ 1460 1461 cto->ct_syshandle = handle; 1462 cto->ct_header.rqs_seqno = 1; 1463 1464 if (send_status) { 1465 cto->ct_scsi_status = scsi_status; 1466 cto->ct_flags |= sflags; 1467 cto->ct_resid = resid; 1468 } 1469 if (send_status) { 1470 isp_prt(isp, ISP_LOGTDEBUG1, 1471 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1472 "scsi status %x resid %d", 1473 cto->ct_fwhandle, csio->ccb_h.target_lun, 1474 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1475 cto->ct_scsi_status, cto->ct_resid); 1476 } else { 1477 isp_prt(isp, ISP_LOGTDEBUG1, 1478 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1479 cto->ct_fwhandle, csio->ccb_h.target_lun, 1480 cto->ct_iid, cto->ct_tag_val, 1481 cto->ct_flags); 1482 } 1483 isp_put_ctio(isp, cto, qe); 1484 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1485 if (nctios > 1) { 1486 MEMORYBARRIER(isp, SYNC_REQUEST, 1487 curi, QENTRY_LEN); 1488 } 1489 } else { 1490 ct_entry_t *oqe = qe; 1491 1492 /* 1493 * Make sure syshandle fields are clean 1494 */ 1495 cto->ct_syshandle = 0; 1496 cto->ct_header.rqs_seqno = 0; 1497 1498 isp_prt(isp, ISP_LOGTDEBUG1, 1499 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1500 cto->ct_fwhandle, csio->ccb_h.target_lun, 1501 cto->ct_iid, cto->ct_flags); 1502 1503 /* 1504 * Get a new CTIO 1505 */ 1506 qe = (ct_entry_t *) 1507 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1508 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1509 if (nxti == mp->optr) { 1510 isp_prt(isp, ISP_LOGTDEBUG0, 1511 "Queue Overflow in tdma_mk"); 1512 mp->error = MUSHERR_NOQENTRIES; 1513 return; 1514 } 1515 1516 /* 1517 * Now that we're done with the old CTIO, 1518 * flush it out to the request queue. 1519 */ 1520 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1521 isp_put_ctio(isp, cto, oqe); 1522 if (nth_ctio != 0) { 1523 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1524 QENTRY_LEN); 1525 } 1526 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1527 1528 /* 1529 * Reset some fields in the CTIO so we can reuse 1530 * for the next one we'll flush to the request 1531 * queue. 1532 */ 1533 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1534 cto->ct_header.rqs_entry_count = 1; 1535 cto->ct_header.rqs_flags = 0; 1536 cto->ct_status = 0; 1537 cto->ct_scsi_status = 0; 1538 cto->ct_xfrlen = 0; 1539 cto->ct_resid = 0; 1540 cto->ct_seg_count = 0; 1541 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1542 } 1543 } 1544 *mp->nxtip = nxti; 1545 } 1546 1547 /* 1548 * We don't have to do multiple CTIOs here. Instead, we can just do 1549 * continuation segments as needed. This greatly simplifies the code 1550 * improves performance. 1551 */ 1552 1553 static void 1554 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1555 { 1556 mush_t *mp; 1557 struct ccb_scsiio *csio; 1558 struct ispsoftc *isp; 1559 ct2_entry_t *cto, *qe; 1560 uint16_t curi, nxti; 1561 int segcnt; 1562 1563 mp = (mush_t *) arg; 1564 if (error) { 1565 mp->error = error; 1566 return; 1567 } 1568 1569 isp = mp->isp; 1570 csio = mp->cmd_token; 1571 cto = mp->rq; 1572 1573 curi = isp->isp_reqidx; 1574 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1575 1576 if (nseg == 0) { 1577 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1578 isp_prt(isp, ISP_LOGWARN, 1579 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1580 "set (0x%x)", cto->ct_flags); 1581 mp->error = EINVAL; 1582 return; 1583 } 1584 /* 1585 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1586 * flags to NO DATA and clear relative offset flags. 1587 * We preserve the ct_resid and the response area. 1588 */ 1589 cto->ct_header.rqs_seqno = 1; 1590 cto->ct_seg_count = 0; 1591 cto->ct_reloff = 0; 1592 isp_prt(isp, ISP_LOGTDEBUG1, 1593 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1594 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1595 cto->ct_iid, cto->ct_flags, cto->ct_status, 1596 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1597 isp_put_ctio2(isp, cto, qe); 1598 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1599 return; 1600 } 1601 1602 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1603 isp_prt(isp, ISP_LOGERR, 1604 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1605 "(0x%x)", cto->ct_flags); 1606 mp->error = EINVAL; 1607 return; 1608 } 1609 1610 1611 nxti = *mp->nxtip; 1612 1613 /* 1614 * Set up the CTIO2 data segments. 1615 */ 1616 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1617 cto->ct_seg_count++, segcnt++) { 1618 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1619 dm_segs[segcnt].ds_addr; 1620 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1621 dm_segs[segcnt].ds_len; 1622 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1623 isp_prt(isp, ISP_LOGTDEBUG1, 1624 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 1625 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 1626 (uintmax_t)dm_segs[segcnt].ds_len); 1627 } 1628 1629 while (segcnt < nseg) { 1630 uint16_t curip; 1631 int seg; 1632 ispcontreq_t local, *crq = &local, *qep; 1633 1634 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1635 curip = nxti; 1636 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1637 if (nxti == mp->optr) { 1638 ISP_UNLOCK(isp); 1639 isp_prt(isp, ISP_LOGTDEBUG0, 1640 "tdma_mkfc: request queue overflow"); 1641 mp->error = MUSHERR_NOQENTRIES; 1642 return; 1643 } 1644 cto->ct_header.rqs_entry_count++; 1645 MEMZERO((void *)crq, sizeof (*crq)); 1646 crq->req_header.rqs_entry_count = 1; 1647 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1648 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1649 segcnt++, seg++) { 1650 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1651 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1652 isp_prt(isp, ISP_LOGTDEBUG1, 1653 "isp_send_ctio2: ent%d[%d]%jx:%ju", 1654 cto->ct_header.rqs_entry_count-1, seg, 1655 (uintmax_t)dm_segs[segcnt].ds_addr, 1656 (uintmax_t)dm_segs[segcnt].ds_len); 1657 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1658 cto->ct_seg_count++; 1659 } 1660 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1661 isp_put_cont_req(isp, crq, qep); 1662 ISP_TDQE(isp, "cont entry", curi, qep); 1663 } 1664 1665 /* 1666 * No do final twiddling for the CTIO itself. 1667 */ 1668 cto->ct_header.rqs_seqno = 1; 1669 isp_prt(isp, ISP_LOGTDEBUG1, 1670 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1671 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1672 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1673 cto->ct_resid); 1674 isp_put_ctio2(isp, cto, qe); 1675 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1676 *mp->nxtip = nxti; 1677 } 1678 #endif 1679 1680 static void dma2_a64(void *, bus_dma_segment_t *, int, int); 1681 static void dma2(void *, bus_dma_segment_t *, int, int); 1682 1683 static void 1684 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1685 { 1686 mush_t *mp; 1687 struct ispsoftc *isp; 1688 struct ccb_scsiio *csio; 1689 struct isp_pcisoftc *pcs; 1690 bus_dmamap_t *dp; 1691 bus_dma_segment_t *eseg; 1692 ispreq64_t *rq; 1693 int seglim, datalen; 1694 uint16_t nxti; 1695 1696 mp = (mush_t *) arg; 1697 if (error) { 1698 mp->error = error; 1699 return; 1700 } 1701 1702 if (nseg < 1) { 1703 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1704 mp->error = EFAULT; 1705 return; 1706 } 1707 csio = mp->cmd_token; 1708 isp = mp->isp; 1709 rq = mp->rq; 1710 pcs = (struct isp_pcisoftc *)mp->isp; 1711 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1712 nxti = *mp->nxtip; 1713 1714 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1715 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1716 } else { 1717 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1718 } 1719 datalen = XS_XFRLEN(csio); 1720 1721 /* 1722 * We're passed an initial partially filled in entry that 1723 * has most fields filled in except for data transfer 1724 * related values. 1725 * 1726 * Our job is to fill in the initial request queue entry and 1727 * then to start allocating and filling in continuation entries 1728 * until we've covered the entire transfer. 1729 */ 1730 1731 if (IS_FC(isp)) { 1732 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1733 seglim = ISP_RQDSEG_T3; 1734 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 1735 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1736 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1737 } else { 1738 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1739 } 1740 } else { 1741 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1742 if (csio->cdb_len > 12) { 1743 seglim = 0; 1744 } else { 1745 seglim = ISP_RQDSEG_A64; 1746 } 1747 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1748 rq->req_flags |= REQFLAG_DATA_IN; 1749 } else { 1750 rq->req_flags |= REQFLAG_DATA_OUT; 1751 } 1752 } 1753 1754 eseg = dm_segs + nseg; 1755 1756 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1757 if (IS_FC(isp)) { 1758 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 1759 rq3->req_dataseg[rq3->req_seg_count].ds_base = 1760 DMA_LO32(dm_segs->ds_addr); 1761 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 1762 DMA_HI32(dm_segs->ds_addr); 1763 rq3->req_dataseg[rq3->req_seg_count].ds_count = 1764 dm_segs->ds_len; 1765 } else { 1766 rq->req_dataseg[rq->req_seg_count].ds_base = 1767 DMA_LO32(dm_segs->ds_addr); 1768 rq->req_dataseg[rq->req_seg_count].ds_basehi = 1769 DMA_HI32(dm_segs->ds_addr); 1770 rq->req_dataseg[rq->req_seg_count].ds_count = 1771 dm_segs->ds_len; 1772 } 1773 datalen -= dm_segs->ds_len; 1774 rq->req_seg_count++; 1775 dm_segs++; 1776 } 1777 1778 while (datalen > 0 && dm_segs != eseg) { 1779 uint16_t onxti; 1780 ispcontreq64_t local, *crq = &local, *cqe; 1781 1782 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1783 onxti = nxti; 1784 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1785 if (nxti == mp->optr) { 1786 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1787 mp->error = MUSHERR_NOQENTRIES; 1788 return; 1789 } 1790 rq->req_header.rqs_entry_count++; 1791 MEMZERO((void *)crq, sizeof (*crq)); 1792 crq->req_header.rqs_entry_count = 1; 1793 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 1794 1795 seglim = 0; 1796 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 1797 crq->req_dataseg[seglim].ds_base = 1798 DMA_LO32(dm_segs->ds_addr); 1799 crq->req_dataseg[seglim].ds_basehi = 1800 DMA_HI32(dm_segs->ds_addr); 1801 crq->req_dataseg[seglim].ds_count = 1802 dm_segs->ds_len; 1803 rq->req_seg_count++; 1804 dm_segs++; 1805 seglim++; 1806 datalen -= dm_segs->ds_len; 1807 } 1808 isp_put_cont64_req(isp, crq, cqe); 1809 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1810 } 1811 *mp->nxtip = nxti; 1812 } 1813 1814 static void 1815 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1816 { 1817 mush_t *mp; 1818 struct ispsoftc *isp; 1819 struct ccb_scsiio *csio; 1820 struct isp_pcisoftc *pcs; 1821 bus_dmamap_t *dp; 1822 bus_dma_segment_t *eseg; 1823 ispreq_t *rq; 1824 int seglim, datalen; 1825 uint16_t nxti; 1826 1827 mp = (mush_t *) arg; 1828 if (error) { 1829 mp->error = error; 1830 return; 1831 } 1832 1833 if (nseg < 1) { 1834 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1835 mp->error = EFAULT; 1836 return; 1837 } 1838 csio = mp->cmd_token; 1839 isp = mp->isp; 1840 rq = mp->rq; 1841 pcs = (struct isp_pcisoftc *)mp->isp; 1842 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1843 nxti = *mp->nxtip; 1844 1845 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1846 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1847 } else { 1848 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1849 } 1850 1851 datalen = XS_XFRLEN(csio); 1852 1853 /* 1854 * We're passed an initial partially filled in entry that 1855 * has most fields filled in except for data transfer 1856 * related values. 1857 * 1858 * Our job is to fill in the initial request queue entry and 1859 * then to start allocating and filling in continuation entries 1860 * until we've covered the entire transfer. 1861 */ 1862 1863 if (IS_FC(isp)) { 1864 seglim = ISP_RQDSEG_T2; 1865 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1866 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1867 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1868 } else { 1869 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1870 } 1871 } else { 1872 if (csio->cdb_len > 12) { 1873 seglim = 0; 1874 } else { 1875 seglim = ISP_RQDSEG; 1876 } 1877 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1878 rq->req_flags |= REQFLAG_DATA_IN; 1879 } else { 1880 rq->req_flags |= REQFLAG_DATA_OUT; 1881 } 1882 } 1883 1884 eseg = dm_segs + nseg; 1885 1886 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1887 if (IS_FC(isp)) { 1888 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1889 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1890 DMA_LO32(dm_segs->ds_addr); 1891 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1892 dm_segs->ds_len; 1893 } else { 1894 rq->req_dataseg[rq->req_seg_count].ds_base = 1895 DMA_LO32(dm_segs->ds_addr); 1896 rq->req_dataseg[rq->req_seg_count].ds_count = 1897 dm_segs->ds_len; 1898 } 1899 datalen -= dm_segs->ds_len; 1900 rq->req_seg_count++; 1901 dm_segs++; 1902 } 1903 1904 while (datalen > 0 && dm_segs != eseg) { 1905 uint16_t onxti; 1906 ispcontreq_t local, *crq = &local, *cqe; 1907 1908 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1909 onxti = nxti; 1910 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1911 if (nxti == mp->optr) { 1912 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1913 mp->error = MUSHERR_NOQENTRIES; 1914 return; 1915 } 1916 rq->req_header.rqs_entry_count++; 1917 MEMZERO((void *)crq, sizeof (*crq)); 1918 crq->req_header.rqs_entry_count = 1; 1919 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1920 1921 seglim = 0; 1922 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1923 crq->req_dataseg[seglim].ds_base = 1924 DMA_LO32(dm_segs->ds_addr); 1925 crq->req_dataseg[seglim].ds_count = 1926 dm_segs->ds_len; 1927 rq->req_seg_count++; 1928 dm_segs++; 1929 seglim++; 1930 datalen -= dm_segs->ds_len; 1931 } 1932 isp_put_cont_req(isp, crq, cqe); 1933 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1934 } 1935 *mp->nxtip = nxti; 1936 } 1937 1938 static int 1939 isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1940 uint16_t *nxtip, uint16_t optr) 1941 { 1942 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1943 ispreq_t *qep; 1944 bus_dmamap_t *dp = NULL; 1945 mush_t mush, *mp; 1946 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1947 1948 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1949 #ifdef ISP_TARGET_MODE 1950 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1951 if (IS_FC(isp)) { 1952 eptr = tdma_mkfc; 1953 } else { 1954 eptr = tdma_mk; 1955 } 1956 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1957 (csio->dxfer_len == 0)) { 1958 mp = &mush; 1959 mp->isp = isp; 1960 mp->cmd_token = csio; 1961 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1962 mp->nxtip = nxtip; 1963 mp->optr = optr; 1964 mp->error = 0; 1965 (*eptr)(mp, NULL, 0, 0); 1966 goto mbxsync; 1967 } 1968 } else 1969 #endif 1970 if (sizeof (bus_addr_t) > 4) { 1971 eptr = dma2_a64; 1972 } else { 1973 eptr = dma2; 1974 } 1975 1976 1977 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1978 (csio->dxfer_len == 0)) { 1979 rq->req_seg_count = 1; 1980 goto mbxsync; 1981 } 1982 1983 /* 1984 * Do a virtual grapevine step to collect info for 1985 * the callback dma allocation that we have to use... 1986 */ 1987 mp = &mush; 1988 mp->isp = isp; 1989 mp->cmd_token = csio; 1990 mp->rq = rq; 1991 mp->nxtip = nxtip; 1992 mp->optr = optr; 1993 mp->error = 0; 1994 1995 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1996 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1997 int error, s; 1998 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1999 s = splsoftvm(); 2000 error = bus_dmamap_load(pcs->dmat, *dp, 2001 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 2002 if (error == EINPROGRESS) { 2003 bus_dmamap_unload(pcs->dmat, *dp); 2004 mp->error = EINVAL; 2005 isp_prt(isp, ISP_LOGERR, 2006 "deferred dma allocation not supported"); 2007 } else if (error && mp->error == 0) { 2008 #ifdef DIAGNOSTIC 2009 isp_prt(isp, ISP_LOGERR, 2010 "error %d in dma mapping code", error); 2011 #endif 2012 mp->error = error; 2013 } 2014 splx(s); 2015 } else { 2016 /* Pointer to physical buffer */ 2017 struct bus_dma_segment seg; 2018 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2019 seg.ds_len = csio->dxfer_len; 2020 (*eptr)(mp, &seg, 1, 0); 2021 } 2022 } else { 2023 struct bus_dma_segment *segs; 2024 2025 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2026 isp_prt(isp, ISP_LOGERR, 2027 "Physical segment pointers unsupported"); 2028 mp->error = EINVAL; 2029 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2030 isp_prt(isp, ISP_LOGERR, 2031 "Virtual segment addresses unsupported"); 2032 mp->error = EINVAL; 2033 } else { 2034 /* Just use the segments provided */ 2035 segs = (struct bus_dma_segment *) csio->data_ptr; 2036 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2037 } 2038 } 2039 if (mp->error) { 2040 int retval = CMD_COMPLETE; 2041 if (mp->error == MUSHERR_NOQENTRIES) { 2042 retval = CMD_EAGAIN; 2043 } else if (mp->error == EFBIG) { 2044 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2045 } else if (mp->error == EINVAL) { 2046 XS_SETERR(csio, CAM_REQ_INVALID); 2047 } else { 2048 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2049 } 2050 return (retval); 2051 } 2052 mbxsync: 2053 switch (rq->req_header.rqs_entry_type) { 2054 case RQSTYPE_REQUEST: 2055 isp_put_request(isp, rq, qep); 2056 break; 2057 case RQSTYPE_CMDONLY: 2058 isp_put_extended_request(isp, (ispextreq_t *)rq, 2059 (ispextreq_t *)qep); 2060 break; 2061 case RQSTYPE_T2RQS: 2062 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2063 break; 2064 case RQSTYPE_A64: 2065 case RQSTYPE_T3RQS: 2066 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2067 break; 2068 } 2069 return (CMD_QUEUED); 2070 } 2071 2072 static void 2073 isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, uint16_t handle) 2074 { 2075 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2076 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2077 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2078 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2079 } else { 2080 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2081 } 2082 bus_dmamap_unload(pcs->dmat, *dp); 2083 } 2084 2085 2086 static void 2087 isp_pci_reset1(struct ispsoftc *isp) 2088 { 2089 /* Make sure the BIOS is disabled */ 2090 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2091 /* and enable interrupts */ 2092 ENABLE_INTS(isp); 2093 } 2094 2095 static void 2096 isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 2097 { 2098 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2099 if (msg) 2100 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2101 else 2102 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2103 if (IS_SCSI(isp)) 2104 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2105 else 2106 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2107 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2108 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2109 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2110 2111 2112 if (IS_SCSI(isp)) { 2113 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2114 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2115 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2116 ISP_READ(isp, CDMA_FIFO_STS)); 2117 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2118 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2119 ISP_READ(isp, DDMA_FIFO_STS)); 2120 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2121 ISP_READ(isp, SXP_INTERRUPT), 2122 ISP_READ(isp, SXP_GROSS_ERR), 2123 ISP_READ(isp, SXP_PINS_CTRL)); 2124 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2125 } 2126 printf(" mbox regs: %x %x %x %x %x\n", 2127 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2128 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2129 ISP_READ(isp, OUTMAILBOX4)); 2130 printf(" PCI Status Command/Status=%x\n", 2131 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2132 } 2133