1 /* $FreeBSD$ */ 2 /* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 *--------------------------------------- 7 * Copyright (c) 1997, 1998 by Matthew Jacob 8 * NASA/Ames Research Center 9 * All rights reserved. 10 *--------------------------------------- 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice immediately at the beginning of the file, without modification, 17 * this list of conditions, and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. The name of the author may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 28 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 #include <dev/isp/isp_freebsd.h> 37 #include <dev/isp/asm_pci.h> 38 #include <sys/malloc.h> 39 #include <vm/vm.h> 40 #include <vm/pmap.h> 41 #include <vm/vm_extern.h> 42 43 44 #include <pci/pcireg.h> 45 #include <pci/pcivar.h> 46 47 #if __FreeBSD_version >= 300004 48 #include <machine/bus_memio.h> 49 #include <machine/bus_pio.h> 50 #include <machine/bus.h> 51 #endif 52 53 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int)); 54 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t)); 55 static int isp_pci_mbxdma __P((struct ispsoftc *)); 56 static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *, 57 ispreq_t *, u_int8_t *, u_int8_t)); 58 #if __FreeBSD_version >= 300004 59 static void 60 isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t)); 61 #else 62 #define isp_pci_dmateardown NULL 63 #endif 64 65 static void isp_pci_reset1 __P((struct ispsoftc *)); 66 static void isp_pci_dumpregs __P((struct ispsoftc *)); 67 68 static struct ispmdvec mdvec = { 69 isp_pci_rd_reg, 70 isp_pci_wr_reg, 71 isp_pci_mbxdma, 72 isp_pci_dmasetup, 73 isp_pci_dmateardown, 74 NULL, 75 isp_pci_reset1, 76 isp_pci_dumpregs, 77 ISP_RISC_CODE, 78 ISP_CODE_LENGTH, 79 ISP_CODE_ORG, 80 ISP_CODE_VERSION, 81 BIU_BURST_ENABLE, 82 0 83 }; 84 85 static struct ispmdvec mdvec_2100 = { 86 isp_pci_rd_reg, 87 isp_pci_wr_reg, 88 isp_pci_mbxdma, 89 isp_pci_dmasetup, 90 isp_pci_dmateardown, 91 NULL, 92 isp_pci_reset1, 93 isp_pci_dumpregs, 94 ISP2100_RISC_CODE, 95 ISP2100_CODE_LENGTH, 96 ISP2100_CODE_ORG, 97 ISP2100_CODE_VERSION, 98 BIU_BURST_ENABLE, 99 0 100 }; 101 102 #ifndef PCIM_CMD_INVEN 103 #define PCIM_CMD_INVEN 0x10 104 #endif 105 #ifndef PCIM_CMD_BUSMASTEREN 106 #define PCIM_CMD_BUSMASTEREN 0x0004 107 #endif 108 109 #ifndef PCI_VENDOR_QLOGIC 110 #define PCI_VENDOR_QLOGIC 0x1077 111 #endif 112 113 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 114 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 115 #endif 116 117 #define PCI_QLOGIC_ISP \ 118 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 119 120 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 121 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 122 #endif 123 124 #define PCI_QLOGIC_ISP2100 \ 125 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 126 127 #define IO_MAP_REG 0x10 128 #define MEM_MAP_REG 0x14 129 130 131 static char *isp_pci_probe __P((pcici_t tag, pcidi_t type)); 132 static void isp_pci_attach __P((pcici_t config_d, int unit)); 133 134 /* This distinguishing define is not right, but it does work */ 135 136 #if __FreeBSD_version < 300004 137 #define IO_SPACE_MAPPING 0 138 #define MEM_SPACE_MAPPING 1 139 typedef int bus_space_tag_t; 140 typedef u_long bus_space_handle_t; 141 #ifdef __alpha__ 142 #define bus_space_read_2(st, sh, offset) \ 143 (st == IO_SPACE_MAPPING)? \ 144 inw((pci_port_t)sh + offset) : readw((pci_port_t)sh + offset) 145 #define bus_space_write_2(st, sh, offset, val) \ 146 if (st == IO_SPACE_MAPPING) outw((pci_port_t)sh + offset, val); else \ 147 writew((pci_port_t)sh + offset, val) 148 #else 149 #define bus_space_read_2(st, sh, offset) \ 150 (st == IO_SPACE_MAPPING)? \ 151 inw((pci_port_t)sh + offset) : *((u_int16_t *)(uintptr_t)sh) 152 #define bus_space_write_2(st, sh, offset, val) \ 153 if (st == IO_SPACE_MAPPING) outw((pci_port_t)sh + offset, val); else \ 154 *((u_int16_t *)(uintptr_t)sh) = val 155 #endif 156 #else 157 #ifdef __alpha__ 158 #define IO_SPACE_MAPPING ALPHA_BUS_SPACE_IO 159 #define MEM_SPACE_MAPPING ALPHA_BUS_SPACE_MEM 160 #else 161 #define IO_SPACE_MAPPING I386_BUS_SPACE_IO 162 #define MEM_SPACE_MAPPING I386_BUS_SPACE_MEM 163 #endif 164 #endif 165 166 struct isp_pcisoftc { 167 struct ispsoftc pci_isp; 168 pcici_t pci_id; 169 bus_space_tag_t pci_st; 170 bus_space_handle_t pci_sh; 171 #if __FreeBSD_version >= 300004 172 bus_dma_tag_t parent_dmat; 173 bus_dma_tag_t cntrol_dmat; 174 bus_dmamap_t cntrol_dmap; 175 bus_dmamap_t dmaps[MAXISPREQUEST]; 176 #endif 177 union { 178 sdparam _x; 179 struct { 180 fcparam _a; 181 char _b[ISP2100_SCRLEN]; 182 } _y; 183 } _z; 184 }; 185 186 static u_long ispunit; 187 188 struct pci_device isp_pci_driver = { 189 "isp", 190 isp_pci_probe, 191 isp_pci_attach, 192 &ispunit, 193 NULL 194 }; 195 DATA_SET (pcidevice_set, isp_pci_driver); 196 197 198 static char * 199 isp_pci_probe(tag, type) 200 pcici_t tag; 201 pcidi_t type; 202 { 203 static int oneshot = 1; 204 char *x; 205 206 switch (type) { 207 case PCI_QLOGIC_ISP: 208 x = "Qlogic ISP 10X0 PCI SCSI Adapter"; 209 break; 210 case PCI_QLOGIC_ISP2100: 211 x = "Qlogic ISP 2100 PCI FC-AL Adapter"; 212 break; 213 default: 214 return (NULL); 215 } 216 if (oneshot) { 217 oneshot = 0; 218 printf("%s Version %d.%d, Core Version %d.%d\n", PVS, 219 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 220 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 221 } 222 return (x); 223 } 224 225 226 static void 227 isp_pci_attach(config_id, unit) 228 pcici_t config_id; 229 int unit; 230 { 231 int mapped; 232 pci_port_t io_port; 233 u_int32_t data; 234 struct isp_pcisoftc *pcs; 235 struct ispsoftc *isp; 236 vm_offset_t vaddr, paddr; 237 ISP_LOCKVAL_DECL; 238 239 240 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT); 241 if (pcs == NULL) { 242 printf("isp%d: cannot allocate softc\n", unit); 243 return; 244 } 245 bzero(pcs, sizeof (struct isp_pcisoftc)); 246 247 vaddr = paddr = NULL; 248 mapped = 0; 249 data = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG); 250 if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) { 251 if (pci_map_mem(config_id, MEM_MAP_REG, &vaddr, &paddr)) { 252 pcs->pci_st = MEM_SPACE_MAPPING; 253 pcs->pci_sh = vaddr; 254 mapped++; 255 } 256 } 257 if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) { 258 if (pci_map_port(config_id, PCI_MAP_REG_START, &io_port)) { 259 pcs->pci_st = IO_SPACE_MAPPING; 260 pcs->pci_sh = io_port; 261 mapped++; 262 } 263 } 264 if (mapped == 0) { 265 printf("isp%d: unable to map any ports!\n", unit); 266 free(pcs, M_DEVBUF); 267 return; 268 } 269 printf("isp%d: using %s space register mapping\n", unit, 270 pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory"); 271 272 isp = &pcs->pci_isp; 273 (void) sprintf(isp->isp_name, "isp%d", unit); 274 isp->isp_osinfo.unit = unit; 275 276 data = pci_conf_read(config_id, PCI_ID_REG); 277 if (data == PCI_QLOGIC_ISP) { 278 isp->isp_mdvec = &mdvec; 279 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 280 isp->isp_param = &pcs->_z._x; 281 } else if (data == PCI_QLOGIC_ISP2100) { 282 isp->isp_mdvec = &mdvec_2100; 283 isp->isp_type = ISP_HA_FC_2100; 284 isp->isp_param = &pcs->_z._y._a; 285 286 ISP_LOCK(isp); 287 data = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG); 288 data |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 289 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, data); 290 291 /* 292 * Wierd- we need to clear the lsb in offset 0x30 to take the 293 * chip out of reset state. 294 */ 295 data = pci_conf_read(config_id, 0x30); 296 data &= ~1; 297 pci_conf_write(config_id, 0x30, data); 298 ISP_UNLOCK(isp); 299 } else { 300 printf("%s: unknown dev (%x)- punting\n", isp->isp_name, data); 301 free(pcs, M_DEVBUF); 302 return; 303 } 304 305 #if __FreeBSD_version >= 300004 306 if (bus_dma_tag_create(NULL, 0, 0, BUS_SPACE_MAXADDR_32BIT, 307 BUS_SPACE_MAXADDR, NULL, NULL, 1<<24, 308 255, 1<<24, 0, &pcs->parent_dmat) != 0) { 309 printf("%s: could not create master dma tag\n", isp->isp_name); 310 free(pcs, M_DEVBUF); 311 return; 312 } 313 #endif 314 if (pci_map_int(config_id, (void (*)(void *))isp_intr, 315 (void *)isp, &IMASK) == 0) { 316 printf("%s: could not map interrupt\n", isp->isp_name); 317 free(pcs, M_DEVBUF); 318 return; 319 } 320 321 pcs->pci_id = config_id; 322 ISP_LOCK(isp); 323 isp_reset(isp); 324 if (isp->isp_state != ISP_RESETSTATE) { 325 ISP_UNLOCK(isp); 326 free(pcs, M_DEVBUF); 327 return; 328 } 329 isp_init(isp); 330 if (isp->isp_state != ISP_INITSTATE) { 331 isp_uninit(isp); 332 ISP_UNLOCK(isp); 333 free(pcs, M_DEVBUF); 334 return; 335 } 336 isp_attach(isp); 337 if (isp->isp_state != ISP_RUNSTATE) { 338 isp_uninit(isp); 339 free(pcs, M_DEVBUF); 340 } 341 ISP_UNLOCK(isp); 342 #ifdef __alpha__ 343 alpha_register_pci_scsi(config_id->bus, config_id->slot, isp->isp_sim); 344 #endif 345 } 346 347 #define PCI_BIU_REGS_OFF BIU_REGS_OFF 348 349 static u_int16_t 350 isp_pci_rd_reg(isp, regoff) 351 struct ispsoftc *isp; 352 int regoff; 353 { 354 u_int16_t rv; 355 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 356 int offset, oldsxp = 0; 357 358 if ((regoff & BIU_BLOCK) != 0) { 359 offset = PCI_BIU_REGS_OFF; 360 } else if ((regoff & MBOX_BLOCK) != 0) { 361 if (isp->isp_type & ISP_HA_SCSI) 362 offset = PCI_MBOX_REGS_OFF; 363 else 364 offset = PCI_MBOX_REGS2100_OFF; 365 } else if ((regoff & SXP_BLOCK) != 0) { 366 offset = PCI_SXP_REGS_OFF; 367 /* 368 * We will assume that someone has paused the RISC processor. 369 */ 370 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1); 371 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP); 372 } else { 373 offset = PCI_RISC_REGS_OFF; 374 } 375 regoff &= 0xff; 376 offset += regoff; 377 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 378 if ((regoff & SXP_BLOCK) != 0) { 379 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp); 380 } 381 return (rv); 382 } 383 384 static void 385 isp_pci_wr_reg(isp, regoff, val) 386 struct ispsoftc *isp; 387 int regoff; 388 u_int16_t val; 389 { 390 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 391 int offset, oldsxp = 0; 392 if ((regoff & BIU_BLOCK) != 0) { 393 offset = PCI_BIU_REGS_OFF; 394 } else if ((regoff & MBOX_BLOCK) != 0) { 395 if (isp->isp_type & ISP_HA_SCSI) 396 offset = PCI_MBOX_REGS_OFF; 397 else 398 offset = PCI_MBOX_REGS2100_OFF; 399 } else if ((regoff & SXP_BLOCK) != 0) { 400 offset = PCI_SXP_REGS_OFF; 401 /* 402 * We will assume that someone has paused the RISC processor. 403 */ 404 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1); 405 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP); 406 } else { 407 offset = PCI_RISC_REGS_OFF; 408 } 409 regoff &= 0xff; 410 offset += regoff; 411 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 412 if ((regoff & SXP_BLOCK) != 0) { 413 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp); 414 } 415 } 416 417 #if __FreeBSD_version >= 300004 418 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int)); 419 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int)); 420 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int)); 421 422 static void 423 isp_map_rquest(arg, segs, nseg, error) 424 void *arg; 425 bus_dma_segment_t *segs; 426 int nseg; 427 int error; 428 { 429 struct ispsoftc *isp = (struct ispsoftc *) arg; 430 isp->isp_rquest_dma = segs->ds_addr; 431 } 432 433 static void 434 isp_map_result(arg, segs, nseg, error) 435 void *arg; 436 bus_dma_segment_t *segs; 437 int nseg; 438 int error; 439 { 440 struct ispsoftc *isp = (struct ispsoftc *) arg; 441 isp->isp_result_dma = segs->ds_addr; 442 } 443 444 static void 445 isp_map_fcscrt(arg, segs, nseg, error) 446 void *arg; 447 bus_dma_segment_t *segs; 448 int nseg; 449 int error; 450 { 451 struct ispsoftc *isp = (struct ispsoftc *) arg; 452 fcparam *fcp = isp->isp_param; 453 fcp->isp_scdma = segs->ds_addr; 454 } 455 456 static int 457 isp_pci_mbxdma(isp) 458 struct ispsoftc *isp; 459 { 460 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 461 caddr_t base; 462 u_int32_t len; 463 int i, error; 464 465 /* 466 * Allocate and map the request, result queues, plus FC scratch area. 467 */ 468 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 469 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 470 if (isp->isp_type & ISP_HA_FC) { 471 len += ISP2100_SCRLEN; 472 } 473 if (bus_dma_tag_create(pci->parent_dmat, 0, 0, BUS_SPACE_MAXADDR, 474 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, BUS_SPACE_MAXSIZE_32BIT, 475 0, &pci->cntrol_dmat) != 0) { 476 printf("%s: cannot create a dma tag for control spaces\n", 477 isp->isp_name); 478 return (1); 479 } 480 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base, 481 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) { 482 printf("%s: cannot allocate %d bytes of CCB memory\n", 483 isp->isp_name, len); 484 return (1); 485 } 486 487 isp->isp_rquest = base; 488 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest, 489 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, pci, 0); 490 491 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 492 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result, 493 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, pci, 0); 494 495 if (isp->isp_type & ISP_HA_FC) { 496 fcparam *fcp = (fcparam *) isp->isp_param; 497 fcp->isp_scratch = isp->isp_result + 498 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 499 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, 500 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, pci, 0); 501 } 502 503 /* 504 * Use this opportunity to initialize/create data DMA maps. 505 */ 506 for (i = 0; i < MAXISPREQUEST; i++) { 507 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]); 508 if (error) { 509 printf("%s: error %d creating mailbox DMA maps\n", 510 isp->isp_name, error); 511 return (1); 512 } 513 } 514 return (0); 515 } 516 517 static void dma2 __P((void *, bus_dma_segment_t *, int, int)); 518 typedef struct { 519 struct ispsoftc *isp; 520 ISP_SCSI_XFER_T *ccb; 521 ispreq_t *rq; 522 u_int8_t *iptrp; 523 u_int8_t optr; 524 u_int error; 525 } mush_t; 526 527 #define MUSHERR_NOQENTRIES -2 528 529 static void 530 dma2(arg, dm_segs, nseg, error) 531 void *arg; 532 bus_dma_segment_t *dm_segs; 533 int nseg; 534 int error; 535 { 536 mush_t *mp; 537 ISP_SCSI_XFER_T *ccb; 538 struct ispsoftc *isp; 539 struct isp_pcisoftc *pci; 540 bus_dmamap_t *dp; 541 bus_dma_segment_t *eseg; 542 ispreq_t *rq; 543 u_int8_t *iptrp; 544 u_int8_t optr; 545 ispcontreq_t *crq; 546 int drq, seglim, datalen; 547 548 mp = (mush_t *) arg; 549 if (error) { 550 mp->error = error; 551 return; 552 } 553 554 isp = mp->isp; 555 if (nseg < 1) { 556 printf("%s: zero or negative segment count\n", isp->isp_name); 557 mp->error = EFAULT; 558 return; 559 } 560 ccb = mp->ccb; 561 rq = mp->rq; 562 iptrp = mp->iptrp; 563 optr = mp->optr; 564 565 pci = (struct isp_pcisoftc *)isp; 566 dp = &pci->dmaps[rq->req_handle - 1]; 567 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 568 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 569 drq = REQFLAG_DATA_IN; 570 } else { 571 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 572 drq = REQFLAG_DATA_OUT; 573 } 574 575 datalen = XS_XFRLEN(ccb); 576 if (isp->isp_type & ISP_HA_FC) { 577 seglim = ISP_RQDSEG_T2; 578 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 579 ((ispreqt2_t *)rq)->req_flags |= drq; 580 } else { 581 seglim = ISP_RQDSEG; 582 rq->req_flags |= drq; 583 } 584 585 eseg = dm_segs + nseg; 586 587 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 588 if (isp->isp_type & ISP_HA_FC) { 589 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 590 rq2->req_dataseg[rq2->req_seg_count].ds_base = 591 dm_segs->ds_addr; 592 rq2->req_dataseg[rq2->req_seg_count].ds_count = 593 dm_segs->ds_len; 594 } else { 595 rq->req_dataseg[rq->req_seg_count].ds_base = 596 dm_segs->ds_addr; 597 rq->req_dataseg[rq->req_seg_count].ds_count = 598 dm_segs->ds_len; 599 } 600 datalen -= dm_segs->ds_len; 601 #if 0 602 if (isp->isp_type & ISP_HA_FC) { 603 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 604 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 605 isp->isp_name, rq->req_seg_count, 606 rq2->req_dataseg[rq2->req_seg_count].ds_count, 607 rq2->req_dataseg[rq2->req_seg_count].ds_base); 608 } else { 609 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 610 isp->isp_name, rq->req_seg_count, 611 rq->req_dataseg[rq->req_seg_count].ds_count, 612 rq->req_dataseg[rq->req_seg_count].ds_base); 613 } 614 #endif 615 rq->req_seg_count++; 616 dm_segs++; 617 } 618 619 while (datalen > 0 && dm_segs != eseg) { 620 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp); 621 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN); 622 if (*iptrp == optr) { 623 #if 0 624 printf("%s: Request Queue Overflow++\n", isp->isp_name); 625 #endif 626 mp->error = MUSHERR_NOQENTRIES; 627 return; 628 } 629 rq->req_header.rqs_entry_count++; 630 bzero((void *)crq, sizeof (*crq)); 631 crq->req_header.rqs_entry_count = 1; 632 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 633 634 seglim = 0; 635 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 636 crq->req_dataseg[seglim].ds_base = 637 dm_segs->ds_addr; 638 crq->req_dataseg[seglim].ds_count = 639 dm_segs->ds_len; 640 #if 0 641 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n", 642 isp->isp_name, rq->req_header.rqs_entry_count-1, 643 seglim, crq->req_dataseg[seglim].ds_count, 644 crq->req_dataseg[seglim].ds_base); 645 #endif 646 rq->req_seg_count++; 647 dm_segs++; 648 seglim++; 649 datalen -= dm_segs->ds_len; 650 } 651 } 652 } 653 654 static int 655 isp_pci_dmasetup(isp, ccb, rq, iptrp, optr) 656 struct ispsoftc *isp; 657 ISP_SCSI_XFER_T *ccb; 658 ispreq_t *rq; 659 u_int8_t *iptrp; 660 u_int8_t optr; 661 { 662 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 663 struct ccb_hdr *ccb_h; 664 struct ccb_scsiio *csio; 665 bus_dmamap_t *dp; 666 mush_t mush, *mp; 667 668 csio = (struct ccb_scsiio *) ccb; 669 ccb_h = &csio->ccb_h; 670 671 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { 672 rq->req_seg_count = 1; 673 return (CMD_QUEUED); 674 } 675 dp = &pci->dmaps[rq->req_handle - 1]; 676 677 /* 678 * Do a virtual grapevine step to collect info for 679 * the callback dma allocation that we have to use... 680 */ 681 mp = &mush; 682 mp->isp = isp; 683 mp->ccb = ccb; 684 mp->rq = rq; 685 mp->iptrp = iptrp; 686 mp->optr = optr; 687 mp->error = 0; 688 689 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 690 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 691 int error, s; 692 693 s = splsoftvm(); 694 error = bus_dmamap_load(pci->parent_dmat, *dp, 695 csio->data_ptr, csio->dxfer_len, dma2, mp, 0); 696 if (error == EINPROGRESS) { 697 bus_dmamap_unload(pci->parent_dmat, *dp); 698 mp->error = EINVAL; 699 printf("%s: deferred dma allocation not " 700 "supported\n", isp->isp_name); 701 } else if (error && mp->error == 0) { 702 mp->error = error; 703 } 704 splx(s); 705 } else { 706 /* Pointer to physical buffer */ 707 struct bus_dma_segment seg; 708 seg.ds_addr = (bus_addr_t)csio->data_ptr; 709 seg.ds_len = csio->dxfer_len; 710 dma2(mp, &seg, 1, 0); 711 } 712 } else { 713 struct bus_dma_segment *segs; 714 715 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) { 716 printf("%s: Physical segment pointers unsupported", 717 isp->isp_name); 718 mp->error = EINVAL; 719 } else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) { 720 printf("%s: Virtual segment addresses unsupported", 721 isp->isp_name); 722 mp->error = EINVAL; 723 } else { 724 /* Just use the segments provided */ 725 segs = (struct bus_dma_segment *) csio->data_ptr; 726 dma2(mp, segs, csio->sglist_cnt, 0); 727 } 728 } 729 if (mp->error) { 730 int retval = CMD_COMPLETE; 731 if (mp->error == MUSHERR_NOQENTRIES) { 732 retval = CMD_EAGAIN; 733 ccb_h->status = CAM_UNREC_HBA_ERROR; 734 } else if (mp->error == EFBIG) { 735 ccb_h->status = CAM_REQ_TOO_BIG; 736 } else if (mp->error == EINVAL) { 737 ccb_h->status = CAM_REQ_INVALID; 738 } else { 739 ccb_h->status = CAM_UNREC_HBA_ERROR; 740 } 741 return (retval); 742 } else { 743 return (CMD_QUEUED); 744 } 745 } 746 747 static void 748 isp_pci_dmateardown(isp, ccb, handle) 749 struct ispsoftc *isp; 750 ISP_SCSI_XFER_T *ccb; 751 u_int32_t handle; 752 { 753 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 754 bus_dmamap_t *dp = &pci->dmaps[handle]; 755 756 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 757 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD); 758 } else { 759 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE); 760 } 761 bus_dmamap_unload(pci->parent_dmat, *dp); 762 } 763 764 #else /* __FreeBSD_version >= 300004 */ 765 766 767 static int 768 isp_pci_mbxdma(isp) 769 struct ispsoftc *isp; 770 { 771 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 772 u_int32_t len; 773 int rseg; 774 775 /* XXXX CHECK FOR ALIGNMENT */ 776 /* 777 * Allocate and map the request queue. 778 */ 779 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 780 isp->isp_rquest = malloc(len, M_DEVBUF, M_NOWAIT); 781 if (isp->isp_rquest == NULL) { 782 printf("%s: cannot malloc request queue\n", isp->isp_name); 783 return (1); 784 } 785 isp->isp_rquest_dma = vtophys(isp->isp_rquest); 786 787 #if 0 788 printf("RQUEST=0x%x (0x%x)...", isp->isp_rquest, isp->isp_rquest_dma); 789 #endif 790 791 /* 792 * Allocate and map the result queue. 793 */ 794 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 795 isp->isp_result = malloc(len, M_DEVBUF, M_NOWAIT); 796 if (isp->isp_result == NULL) { 797 free(isp->isp_rquest, M_DEVBUF); 798 printf("%s: cannot malloc result queue\n", isp->isp_name); 799 return (1); 800 } 801 isp->isp_result_dma = vtophys(isp->isp_result); 802 #if 0 803 printf("RESULT=0x%x (0x%x)\n", isp->isp_result, isp->isp_result_dma); 804 #endif 805 if (isp->isp_type & ISP_HA_FC) { 806 fcparam *fcp = isp->isp_param; 807 len = ISP2100_SCRLEN; 808 fcp->isp_scratch = (volatile caddr_t) &pci->_z._y._b; 809 fcp->isp_scdma = vtophys(fcp->isp_scratch); 810 } 811 return (0); 812 } 813 814 static int 815 isp_pci_dmasetup(isp, xs, rq, iptrp, optr) 816 struct ispsoftc *isp; 817 ISP_SCSI_XFER_T *xs; 818 ispreq_t *rq; 819 u_int8_t *iptrp; 820 u_int8_t optr; 821 { 822 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 823 ispcontreq_t *crq; 824 vm_offset_t vaddr; 825 int drq, seglim; 826 u_int32_t paddr, nextpaddr, datalen, size, *ctrp; 827 828 if (xs->datalen == 0) { 829 rq->req_seg_count = 1; 830 return (CMD_QUEUED); 831 } 832 833 if (xs->flags & SCSI_DATA_IN) { 834 drq = REQFLAG_DATA_IN; 835 } else { 836 drq = REQFLAG_DATA_OUT; 837 } 838 839 if (isp->isp_type & ISP_HA_FC) { 840 seglim = ISP_RQDSEG_T2; 841 ((ispreqt2_t *)rq)->req_totalcnt = XS_XFRLEN(xs); 842 ((ispreqt2_t *)rq)->req_flags |= drq; 843 } else { 844 seglim = ISP_RQDSEG; 845 rq->req_flags |= drq; 846 } 847 848 datalen = XS_XFRLEN(xs); 849 vaddr = (vm_offset_t) xs->data; 850 paddr = vtophys(vaddr); 851 852 while (datalen != 0 && rq->req_seg_count < seglim) { 853 if (isp->isp_type & ISP_HA_FC) { 854 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 855 rq2->req_dataseg[rq2->req_seg_count].ds_base = paddr; 856 ctrp = &rq2->req_dataseg[rq2->req_seg_count].ds_count; 857 } else { 858 rq->req_dataseg[rq->req_seg_count].ds_base = paddr; 859 ctrp = &rq->req_dataseg[rq->req_seg_count].ds_count; 860 } 861 nextpaddr = paddr; 862 *(ctrp) = 0; 863 864 while (datalen != 0 && paddr == nextpaddr) { 865 nextpaddr = (paddr & (~PAGE_MASK)) + PAGE_SIZE; 866 size = nextpaddr - paddr; 867 if (size > datalen) 868 size = datalen; 869 870 *(ctrp) += size; 871 vaddr += size; 872 datalen -= size; 873 if (datalen != 0) 874 paddr = vtophys(vaddr); 875 876 } 877 #if 0 878 if (isp->isp_type & ISP_HA_FC) { 879 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 880 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 881 isp->isp_name, rq->req_seg_count, 882 rq2->req_dataseg[rq2->req_seg_count].ds_count, 883 rq2->req_dataseg[rq2->req_seg_count].ds_base); 884 } else { 885 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 886 isp->isp_name, rq->req_seg_count, 887 rq->req_dataseg[rq->req_seg_count].ds_count, 888 rq->req_dataseg[rq->req_seg_count].ds_base); 889 } 890 #endif 891 rq->req_seg_count++; 892 } 893 894 895 896 if (datalen == 0) 897 return (CMD_QUEUED); 898 899 paddr = vtophys(vaddr); 900 while (datalen > 0) { 901 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp); 902 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN); 903 if (*iptrp == optr) { 904 printf("%s: Request Queue Overflow\n", isp->isp_name); 905 XS_SETERR(xs, HBA_BOTCH); 906 return (CMD_EAGAIN); 907 } 908 rq->req_header.rqs_entry_count++; 909 bzero((void *)crq, sizeof (*crq)); 910 crq->req_header.rqs_entry_count = 1; 911 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 912 913 for (seglim = 0; datalen != 0 && seglim < ISP_CDSEG; seglim++) { 914 crq->req_dataseg[seglim].ds_base = paddr; 915 ctrp = &crq->req_dataseg[seglim].ds_count; 916 *(ctrp) = 0; 917 nextpaddr = paddr; 918 while (datalen != 0 && paddr == nextpaddr) { 919 nextpaddr = (paddr & (~PAGE_MASK)) + PAGE_SIZE; 920 size = nextpaddr - paddr; 921 if (size > datalen) 922 size = datalen; 923 924 *(ctrp) += size; 925 vaddr += size; 926 datalen -= size; 927 if (datalen != 0) 928 paddr = vtophys(vaddr); 929 } 930 #if 0 931 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n", 932 isp->isp_name, rq->req_header.rqs_entry_count-1, 933 seglim, crq->req_dataseg[seglim].ds_count, 934 crq->req_dataseg[seglim].ds_base); 935 #endif 936 rq->req_seg_count++; 937 } 938 } 939 940 return (CMD_QUEUED); 941 } 942 #endif 943 944 static void 945 isp_pci_reset1(isp) 946 struct ispsoftc *isp; 947 { 948 /* Make sure the BIOS is disabled */ 949 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 950 } 951 952 static void 953 isp_pci_dumpregs(isp) 954 struct ispsoftc *isp; 955 { 956 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 957 printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name, 958 pci_conf_read(pci->pci_id, PCI_COMMAND_STATUS_REG)); 959 } 960