1 /* $Id: isp_pci.c,v 1.12 1998/12/28 19:24:23 mjacob Exp $ */ 2 /* release_12_28_98_A+ */ 3 /* 4 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 5 * FreeBSD Version. 6 * 7 *--------------------------------------- 8 * Copyright (c) 1997, 1998 by Matthew Jacob 9 * NASA/Ames Research Center 10 * All rights reserved. 11 *--------------------------------------- 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice immediately at the beginning of the file, without modification, 18 * this list of conditions, and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. The name of the author may not be used to endorse or promote products 23 * derived from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 29 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 #include <dev/isp/isp_freebsd.h> 38 #include <dev/isp/asm_pci.h> 39 #include <sys/malloc.h> 40 #include <vm/vm.h> 41 #include <vm/pmap.h> 42 #include <vm/vm_extern.h> 43 44 45 #include <pci/pcireg.h> 46 #include <pci/pcivar.h> 47 48 #if __FreeBSD_version >= 300004 49 #include <machine/bus_memio.h> 50 #include <machine/bus_pio.h> 51 #include <machine/bus.h> 52 #endif 53 54 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int)); 55 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t)); 56 static int isp_pci_mbxdma __P((struct ispsoftc *)); 57 static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *, 58 ispreq_t *, u_int8_t *, u_int8_t)); 59 #if __FreeBSD_version >= 300004 60 static void 61 isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t)); 62 #else 63 #define isp_pci_dmateardown NULL 64 #endif 65 66 static void isp_pci_reset1 __P((struct ispsoftc *)); 67 static void isp_pci_dumpregs __P((struct ispsoftc *)); 68 69 static struct ispmdvec mdvec = { 70 isp_pci_rd_reg, 71 isp_pci_wr_reg, 72 isp_pci_mbxdma, 73 isp_pci_dmasetup, 74 isp_pci_dmateardown, 75 NULL, 76 isp_pci_reset1, 77 isp_pci_dumpregs, 78 ISP_RISC_CODE, 79 ISP_CODE_LENGTH, 80 ISP_CODE_ORG, 81 ISP_CODE_VERSION, 82 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64, 83 0 84 }; 85 86 static struct ispmdvec mdvec_2100 = { 87 isp_pci_rd_reg, 88 isp_pci_wr_reg, 89 isp_pci_mbxdma, 90 isp_pci_dmasetup, 91 isp_pci_dmateardown, 92 NULL, 93 isp_pci_reset1, 94 isp_pci_dumpregs, 95 ISP2100_RISC_CODE, 96 ISP2100_CODE_LENGTH, 97 ISP2100_CODE_ORG, 98 ISP2100_CODE_VERSION, 99 0, /* Irrelevant to the 2100 */ 100 0 101 }; 102 103 #ifndef PCIM_CMD_INVEN 104 #define PCIM_CMD_INVEN 0x10 105 #endif 106 #ifndef PCIM_CMD_BUSMASTEREN 107 #define PCIM_CMD_BUSMASTEREN 0x0004 108 #endif 109 110 #ifndef PCI_VENDOR_QLOGIC 111 #define PCI_VENDOR_QLOGIC 0x1077 112 #endif 113 114 #ifndef PCI_PRODUCT_QLOGIC_ISP1020 115 #define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 116 #endif 117 118 #define PCI_QLOGIC_ISP \ 119 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 120 121 #ifndef PCI_PRODUCT_QLOGIC_ISP2100 122 #define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 123 #endif 124 125 #define PCI_QLOGIC_ISP2100 \ 126 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 127 128 #define IO_MAP_REG 0x10 129 #define MEM_MAP_REG 0x14 130 131 132 static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type)); 133 static void isp_pci_attach __P((pcici_t config_d, int unit)); 134 135 /* This distinguishing define is not right, but it does work */ 136 137 #if __FreeBSD_version < 300004 138 #define IO_SPACE_MAPPING 0 139 #define MEM_SPACE_MAPPING 1 140 typedef int bus_space_tag_t; 141 typedef u_long bus_space_handle_t; 142 #ifdef __alpha__ 143 #define bus_space_read_2(st, sh, offset) \ 144 (st == IO_SPACE_MAPPING)? \ 145 inw((pci_port_t)sh + offset) : readw((pci_port_t)sh + offset) 146 #define bus_space_write_2(st, sh, offset, val) \ 147 if (st == IO_SPACE_MAPPING) outw((pci_port_t)sh + offset, val); else \ 148 writew((pci_port_t)sh + offset, val) 149 #else 150 #define bus_space_read_2(st, sh, offset) \ 151 (st == IO_SPACE_MAPPING)? \ 152 inw((pci_port_t)sh + offset) : *((u_int16_t *)(uintptr_t)sh) 153 #define bus_space_write_2(st, sh, offset, val) \ 154 if (st == IO_SPACE_MAPPING) outw((pci_port_t)sh + offset, val); else \ 155 *((u_int16_t *)(uintptr_t)sh) = val 156 #endif 157 #else 158 #ifdef __alpha__ 159 #define IO_SPACE_MAPPING ALPHA_BUS_SPACE_IO 160 #define MEM_SPACE_MAPPING ALPHA_BUS_SPACE_MEM 161 #else 162 #define IO_SPACE_MAPPING I386_BUS_SPACE_IO 163 #define MEM_SPACE_MAPPING I386_BUS_SPACE_MEM 164 #endif 165 #endif 166 167 struct isp_pcisoftc { 168 struct ispsoftc pci_isp; 169 pcici_t pci_id; 170 bus_space_tag_t pci_st; 171 bus_space_handle_t pci_sh; 172 #if __FreeBSD_version >= 300004 173 bus_dma_tag_t parent_dmat; 174 bus_dma_tag_t cntrol_dmat; 175 bus_dmamap_t cntrol_dmap; 176 bus_dmamap_t dmaps[MAXISPREQUEST]; 177 #endif 178 union { 179 sdparam _x; 180 struct { 181 fcparam _a; 182 char _b[ISP2100_SCRLEN]; 183 } _y; 184 } _z; 185 }; 186 187 static u_long ispunit; 188 189 struct pci_device isp_pci_driver = { 190 "isp", 191 isp_pci_probe, 192 isp_pci_attach, 193 &ispunit, 194 NULL 195 }; 196 DATA_SET (pcidevice_set, isp_pci_driver); 197 198 199 static const char * 200 isp_pci_probe(pcici_t tag, pcidi_t type) 201 { 202 static int oneshot = 1; 203 char *x; 204 205 switch (type) { 206 case PCI_QLOGIC_ISP: 207 x = "Qlogic ISP 10X0 PCI SCSI Adapter"; 208 break; 209 case PCI_QLOGIC_ISP2100: 210 x = "Qlogic ISP 2100 PCI FC-AL Adapter"; 211 break; 212 default: 213 return (NULL); 214 } 215 if (oneshot) { 216 oneshot = 0; 217 printf("%s Version %d.%d, Core Version %d.%d\n", PVS, 218 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 219 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 220 } 221 return (x); 222 } 223 224 225 static void 226 isp_pci_attach(pcici_t config_id, int unit) 227 { 228 int mapped; 229 pci_port_t io_port; 230 u_int32_t data; 231 struct isp_pcisoftc *pcs; 232 struct ispsoftc *isp; 233 vm_offset_t vaddr, paddr; 234 ISP_LOCKVAL_DECL; 235 236 237 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT); 238 if (pcs == NULL) { 239 printf("isp%d: cannot allocate softc\n", unit); 240 return; 241 } 242 bzero(pcs, sizeof (struct isp_pcisoftc)); 243 244 vaddr = paddr = NULL; 245 mapped = 0; 246 data = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG); 247 if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) { 248 if (pci_map_mem(config_id, MEM_MAP_REG, &vaddr, &paddr)) { 249 pcs->pci_st = MEM_SPACE_MAPPING; 250 pcs->pci_sh = vaddr; 251 mapped++; 252 } 253 } 254 if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) { 255 if (pci_map_port(config_id, PCI_MAP_REG_START, &io_port)) { 256 pcs->pci_st = IO_SPACE_MAPPING; 257 pcs->pci_sh = io_port; 258 mapped++; 259 } 260 } 261 if (mapped == 0) { 262 printf("isp%d: unable to map any ports!\n", unit); 263 free(pcs, M_DEVBUF); 264 return; 265 } 266 printf("isp%d: using %s space register mapping\n", unit, 267 pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory"); 268 269 isp = &pcs->pci_isp; 270 #if __FreeBSD_version >= 300006 271 (void) snprintf(isp->isp_name, sizeof(isp->isp_name), "isp%d", unit); 272 #else 273 (void) sprintf(isp->isp_name, "isp%d", unit); 274 #endif 275 isp->isp_osinfo.unit = unit; 276 277 data = pci_conf_read(config_id, PCI_ID_REG); 278 if (data == PCI_QLOGIC_ISP) { 279 isp->isp_mdvec = &mdvec; 280 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 281 isp->isp_param = &pcs->_z._x; 282 } else if (data == PCI_QLOGIC_ISP2100) { 283 isp->isp_mdvec = &mdvec_2100; 284 isp->isp_type = ISP_HA_FC_2100; 285 isp->isp_param = &pcs->_z._y._a; 286 287 ISP_LOCK(isp); 288 data = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG); 289 data |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 290 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, data); 291 #if 0 292 /* 293 * Wierd- we need to clear the lsb in offset 0x30 to take the 294 * chip out of reset state. 295 */ 296 data = pci_conf_read(config_id, 0x30); 297 data &= ~1; 298 pci_conf_write(config_id, 0x30, data); 299 #endif 300 ISP_UNLOCK(isp); 301 } else { 302 printf("%s: unknown dev (%x)- punting\n", isp->isp_name, data); 303 free(pcs, M_DEVBUF); 304 return; 305 } 306 307 #if __FreeBSD_version >= 300004 308 if (bus_dma_tag_create(NULL, 0, 0, BUS_SPACE_MAXADDR_32BIT, 309 BUS_SPACE_MAXADDR, NULL, NULL, 1<<24, 310 255, 1<<24, 0, &pcs->parent_dmat) != 0) { 311 printf("%s: could not create master dma tag\n", isp->isp_name); 312 free(pcs, M_DEVBUF); 313 return; 314 } 315 #endif 316 if (pci_map_int(config_id, (void (*)(void *))isp_intr, 317 (void *)isp, &IMASK) == 0) { 318 printf("%s: could not map interrupt\n", isp->isp_name); 319 free(pcs, M_DEVBUF); 320 return; 321 } 322 323 pcs->pci_id = config_id; 324 ISP_LOCK(isp); 325 isp_reset(isp); 326 if (isp->isp_state != ISP_RESETSTATE) { 327 ISP_UNLOCK(isp); 328 free(pcs, M_DEVBUF); 329 return; 330 } 331 isp_init(isp); 332 if (isp->isp_state != ISP_INITSTATE) { 333 isp_uninit(isp); 334 ISP_UNLOCK(isp); 335 free(pcs, M_DEVBUF); 336 return; 337 } 338 isp_attach(isp); 339 if (isp->isp_state != ISP_RUNSTATE) { 340 isp_uninit(isp); 341 free(pcs, M_DEVBUF); 342 } 343 ISP_UNLOCK(isp); 344 #ifdef __alpha__ 345 alpha_register_pci_scsi(config_id->bus, config_id->slot, isp->isp_sim); 346 #endif 347 } 348 349 #define PCI_BIU_REGS_OFF BIU_REGS_OFF 350 351 static u_int16_t 352 isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 353 { 354 u_int16_t rv; 355 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 356 int offset, oldsxp = 0; 357 358 if ((regoff & BIU_BLOCK) != 0) { 359 offset = PCI_BIU_REGS_OFF; 360 } else if ((regoff & MBOX_BLOCK) != 0) { 361 if (isp->isp_type & ISP_HA_SCSI) 362 offset = PCI_MBOX_REGS_OFF; 363 else 364 offset = PCI_MBOX_REGS2100_OFF; 365 } else if ((regoff & SXP_BLOCK) != 0) { 366 offset = PCI_SXP_REGS_OFF; 367 /* 368 * We will assume that someone has paused the RISC processor. 369 */ 370 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1); 371 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP); 372 } else { 373 offset = PCI_RISC_REGS_OFF; 374 } 375 regoff &= 0xff; 376 offset += regoff; 377 rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset); 378 if ((regoff & SXP_BLOCK) != 0) { 379 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp); 380 } 381 return (rv); 382 } 383 384 static void 385 isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 386 { 387 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 388 int offset, oldsxp = 0; 389 if ((regoff & BIU_BLOCK) != 0) { 390 offset = PCI_BIU_REGS_OFF; 391 } else if ((regoff & MBOX_BLOCK) != 0) { 392 if (isp->isp_type & ISP_HA_SCSI) 393 offset = PCI_MBOX_REGS_OFF; 394 else 395 offset = PCI_MBOX_REGS2100_OFF; 396 } else if ((regoff & SXP_BLOCK) != 0) { 397 offset = PCI_SXP_REGS_OFF; 398 /* 399 * We will assume that someone has paused the RISC processor. 400 */ 401 oldsxp = isp_pci_rd_reg(isp, BIU_CONF1); 402 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp & ~BIU_PCI_CONF1_SXP); 403 } else { 404 offset = PCI_RISC_REGS_OFF; 405 } 406 regoff &= 0xff; 407 offset += regoff; 408 bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val); 409 if ((regoff & SXP_BLOCK) != 0) { 410 isp_pci_wr_reg(isp, BIU_CONF1, oldsxp); 411 } 412 } 413 414 #if __FreeBSD_version >= 300004 415 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int)); 416 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int)); 417 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int)); 418 419 static void 420 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 421 { 422 struct ispsoftc *isp = (struct ispsoftc *) arg; 423 isp->isp_rquest_dma = segs->ds_addr; 424 } 425 426 static void 427 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error) 428 { 429 struct ispsoftc *isp = (struct ispsoftc *) arg; 430 isp->isp_result_dma = segs->ds_addr; 431 } 432 433 static void 434 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error) 435 { 436 struct ispsoftc *isp = (struct ispsoftc *) arg; 437 fcparam *fcp = isp->isp_param; 438 fcp->isp_scdma = segs->ds_addr; 439 } 440 441 static int 442 isp_pci_mbxdma(struct ispsoftc *isp) 443 { 444 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 445 caddr_t base; 446 u_int32_t len; 447 int i, error; 448 449 /* 450 * Allocate and map the request, result queues, plus FC scratch area. 451 */ 452 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 453 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 454 if (isp->isp_type & ISP_HA_FC) { 455 len += ISP2100_SCRLEN; 456 } 457 if (bus_dma_tag_create(pci->parent_dmat, 0, 0, BUS_SPACE_MAXADDR, 458 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, BUS_SPACE_MAXSIZE_32BIT, 459 0, &pci->cntrol_dmat) != 0) { 460 printf("%s: cannot create a dma tag for control spaces\n", 461 isp->isp_name); 462 return (1); 463 } 464 if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base, 465 BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) { 466 printf("%s: cannot allocate %d bytes of CCB memory\n", 467 isp->isp_name, len); 468 return (1); 469 } 470 471 isp->isp_rquest = base; 472 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest, 473 ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, pci, 0); 474 475 isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 476 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result, 477 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, pci, 0); 478 479 if (isp->isp_type & ISP_HA_FC) { 480 fcparam *fcp = (fcparam *) isp->isp_param; 481 fcp->isp_scratch = isp->isp_result + 482 ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 483 bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, 484 fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, pci, 0); 485 } 486 487 /* 488 * Use this opportunity to initialize/create data DMA maps. 489 */ 490 for (i = 0; i < MAXISPREQUEST; i++) { 491 error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]); 492 if (error) { 493 printf("%s: error %d creating mailbox DMA maps\n", 494 isp->isp_name, error); 495 return (1); 496 } 497 } 498 return (0); 499 } 500 501 static void dma2 __P((void *, bus_dma_segment_t *, int, int)); 502 typedef struct { 503 struct ispsoftc *isp; 504 ISP_SCSI_XFER_T *ccb; 505 ispreq_t *rq; 506 u_int8_t *iptrp; 507 u_int8_t optr; 508 u_int error; 509 } mush_t; 510 511 #define MUSHERR_NOQENTRIES -2 512 513 static void 514 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 515 { 516 mush_t *mp; 517 ISP_SCSI_XFER_T *ccb; 518 struct ispsoftc *isp; 519 struct isp_pcisoftc *pci; 520 bus_dmamap_t *dp; 521 bus_dma_segment_t *eseg; 522 ispreq_t *rq; 523 u_int8_t *iptrp; 524 u_int8_t optr; 525 ispcontreq_t *crq; 526 int drq, seglim, datalen; 527 528 mp = (mush_t *) arg; 529 if (error) { 530 mp->error = error; 531 return; 532 } 533 534 isp = mp->isp; 535 if (nseg < 1) { 536 printf("%s: zero or negative segment count\n", isp->isp_name); 537 mp->error = EFAULT; 538 return; 539 } 540 ccb = mp->ccb; 541 rq = mp->rq; 542 iptrp = mp->iptrp; 543 optr = mp->optr; 544 545 pci = (struct isp_pcisoftc *)isp; 546 dp = &pci->dmaps[rq->req_handle - 1]; 547 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 548 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD); 549 drq = REQFLAG_DATA_IN; 550 } else { 551 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE); 552 drq = REQFLAG_DATA_OUT; 553 } 554 555 datalen = XS_XFRLEN(ccb); 556 if (isp->isp_type & ISP_HA_FC) { 557 seglim = ISP_RQDSEG_T2; 558 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 559 ((ispreqt2_t *)rq)->req_flags |= drq; 560 } else { 561 seglim = ISP_RQDSEG; 562 rq->req_flags |= drq; 563 } 564 565 eseg = dm_segs + nseg; 566 567 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 568 if (isp->isp_type & ISP_HA_FC) { 569 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 570 rq2->req_dataseg[rq2->req_seg_count].ds_base = 571 dm_segs->ds_addr; 572 rq2->req_dataseg[rq2->req_seg_count].ds_count = 573 dm_segs->ds_len; 574 } else { 575 rq->req_dataseg[rq->req_seg_count].ds_base = 576 dm_segs->ds_addr; 577 rq->req_dataseg[rq->req_seg_count].ds_count = 578 dm_segs->ds_len; 579 } 580 datalen -= dm_segs->ds_len; 581 #if 0 582 if (isp->isp_type & ISP_HA_FC) { 583 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 584 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 585 isp->isp_name, rq->req_seg_count, 586 rq2->req_dataseg[rq2->req_seg_count].ds_count, 587 rq2->req_dataseg[rq2->req_seg_count].ds_base); 588 } else { 589 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 590 isp->isp_name, rq->req_seg_count, 591 rq->req_dataseg[rq->req_seg_count].ds_count, 592 rq->req_dataseg[rq->req_seg_count].ds_base); 593 } 594 #endif 595 rq->req_seg_count++; 596 dm_segs++; 597 } 598 599 while (datalen > 0 && dm_segs != eseg) { 600 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp); 601 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN); 602 if (*iptrp == optr) { 603 #if 0 604 printf("%s: Request Queue Overflow++\n", isp->isp_name); 605 #endif 606 mp->error = MUSHERR_NOQENTRIES; 607 return; 608 } 609 rq->req_header.rqs_entry_count++; 610 bzero((void *)crq, sizeof (*crq)); 611 crq->req_header.rqs_entry_count = 1; 612 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 613 614 seglim = 0; 615 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 616 crq->req_dataseg[seglim].ds_base = 617 dm_segs->ds_addr; 618 crq->req_dataseg[seglim].ds_count = 619 dm_segs->ds_len; 620 #if 0 621 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n", 622 isp->isp_name, rq->req_header.rqs_entry_count-1, 623 seglim, crq->req_dataseg[seglim].ds_count, 624 crq->req_dataseg[seglim].ds_base); 625 #endif 626 rq->req_seg_count++; 627 dm_segs++; 628 seglim++; 629 datalen -= dm_segs->ds_len; 630 } 631 } 632 } 633 634 static int 635 isp_pci_dmasetup(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb, ispreq_t *rq, 636 u_int8_t *iptrp, u_int8_t optr) 637 { 638 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 639 struct ccb_hdr *ccb_h; 640 struct ccb_scsiio *csio; 641 bus_dmamap_t *dp; 642 mush_t mush, *mp; 643 644 csio = (struct ccb_scsiio *) ccb; 645 ccb_h = &csio->ccb_h; 646 647 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) { 648 rq->req_seg_count = 1; 649 return (CMD_QUEUED); 650 } 651 dp = &pci->dmaps[rq->req_handle - 1]; 652 653 /* 654 * Do a virtual grapevine step to collect info for 655 * the callback dma allocation that we have to use... 656 */ 657 mp = &mush; 658 mp->isp = isp; 659 mp->ccb = ccb; 660 mp->rq = rq; 661 mp->iptrp = iptrp; 662 mp->optr = optr; 663 mp->error = 0; 664 665 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 666 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 667 int error, s; 668 669 s = splsoftvm(); 670 error = bus_dmamap_load(pci->parent_dmat, *dp, 671 csio->data_ptr, csio->dxfer_len, dma2, mp, 0); 672 if (error == EINPROGRESS) { 673 bus_dmamap_unload(pci->parent_dmat, *dp); 674 mp->error = EINVAL; 675 printf("%s: deferred dma allocation not " 676 "supported\n", isp->isp_name); 677 } else if (error && mp->error == 0) { 678 mp->error = error; 679 } 680 splx(s); 681 } else { 682 /* Pointer to physical buffer */ 683 struct bus_dma_segment seg; 684 seg.ds_addr = (bus_addr_t)csio->data_ptr; 685 seg.ds_len = csio->dxfer_len; 686 dma2(mp, &seg, 1, 0); 687 } 688 } else { 689 struct bus_dma_segment *segs; 690 691 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) { 692 printf("%s: Physical segment pointers unsupported", 693 isp->isp_name); 694 mp->error = EINVAL; 695 } else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) { 696 printf("%s: Virtual segment addresses unsupported", 697 isp->isp_name); 698 mp->error = EINVAL; 699 } else { 700 /* Just use the segments provided */ 701 segs = (struct bus_dma_segment *) csio->data_ptr; 702 dma2(mp, segs, csio->sglist_cnt, 0); 703 } 704 } 705 if (mp->error) { 706 int retval = CMD_COMPLETE; 707 if (mp->error == MUSHERR_NOQENTRIES) { 708 retval = CMD_EAGAIN; 709 ccb_h->status = CAM_UNREC_HBA_ERROR; 710 } else if (mp->error == EFBIG) { 711 ccb_h->status = CAM_REQ_TOO_BIG; 712 } else if (mp->error == EINVAL) { 713 ccb_h->status = CAM_REQ_INVALID; 714 } else { 715 ccb_h->status = CAM_UNREC_HBA_ERROR; 716 } 717 return (retval); 718 } else { 719 return (CMD_QUEUED); 720 } 721 } 722 723 static void 724 isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb, 725 u_int32_t handle) 726 { 727 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 728 bus_dmamap_t *dp = &pci->dmaps[handle]; 729 730 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 731 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD); 732 } else { 733 bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE); 734 } 735 bus_dmamap_unload(pci->parent_dmat, *dp); 736 } 737 738 #else /* __FreeBSD_version >= 300004 */ 739 740 741 static int 742 isp_pci_mbxdma(struct ispsoftc *isp) 743 { 744 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 745 u_int32_t len; 746 int rseg; 747 748 /* XXXX CHECK FOR ALIGNMENT */ 749 /* 750 * Allocate and map the request queue. 751 */ 752 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN); 753 isp->isp_rquest = malloc(len, M_DEVBUF, M_NOWAIT); 754 if (isp->isp_rquest == NULL) { 755 printf("%s: cannot malloc request queue\n", isp->isp_name); 756 return (1); 757 } 758 isp->isp_rquest_dma = vtophys(isp->isp_rquest); 759 760 #if 0 761 printf("RQUEST=0x%x (0x%x)...", isp->isp_rquest, isp->isp_rquest_dma); 762 #endif 763 764 /* 765 * Allocate and map the result queue. 766 */ 767 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN); 768 isp->isp_result = malloc(len, M_DEVBUF, M_NOWAIT); 769 if (isp->isp_result == NULL) { 770 free(isp->isp_rquest, M_DEVBUF); 771 printf("%s: cannot malloc result queue\n", isp->isp_name); 772 return (1); 773 } 774 isp->isp_result_dma = vtophys(isp->isp_result); 775 #if 0 776 printf("RESULT=0x%x (0x%x)\n", isp->isp_result, isp->isp_result_dma); 777 #endif 778 if (isp->isp_type & ISP_HA_FC) { 779 fcparam *fcp = isp->isp_param; 780 len = ISP2100_SCRLEN; 781 fcp->isp_scratch = (volatile caddr_t) &pci->_z._y._b; 782 fcp->isp_scdma = vtophys(fcp->isp_scratch); 783 } 784 return (0); 785 } 786 787 static int 788 isp_pci_dmasetup(struct ispsoftc *isp, ISP_SCSI_XFER_T *xs, 789 ispreq_t *rq, u_int8_t *iptrp, u_int8_t optr) 790 { 791 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 792 ispcontreq_t *crq; 793 vm_offset_t vaddr; 794 int drq, seglim; 795 u_int32_t paddr, nextpaddr, datalen, size, *ctrp; 796 797 if (xs->datalen == 0) { 798 rq->req_seg_count = 1; 799 return (CMD_QUEUED); 800 } 801 802 if (xs->flags & SCSI_DATA_IN) { 803 drq = REQFLAG_DATA_IN; 804 } else { 805 drq = REQFLAG_DATA_OUT; 806 } 807 808 if (isp->isp_type & ISP_HA_FC) { 809 seglim = ISP_RQDSEG_T2; 810 ((ispreqt2_t *)rq)->req_totalcnt = XS_XFRLEN(xs); 811 ((ispreqt2_t *)rq)->req_flags |= drq; 812 } else { 813 seglim = ISP_RQDSEG; 814 rq->req_flags |= drq; 815 } 816 817 datalen = XS_XFRLEN(xs); 818 vaddr = (vm_offset_t) xs->data; 819 paddr = vtophys(vaddr); 820 821 while (datalen != 0 && rq->req_seg_count < seglim) { 822 if (isp->isp_type & ISP_HA_FC) { 823 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 824 rq2->req_dataseg[rq2->req_seg_count].ds_base = paddr; 825 ctrp = &rq2->req_dataseg[rq2->req_seg_count].ds_count; 826 } else { 827 rq->req_dataseg[rq->req_seg_count].ds_base = paddr; 828 ctrp = &rq->req_dataseg[rq->req_seg_count].ds_count; 829 } 830 nextpaddr = paddr; 831 *(ctrp) = 0; 832 833 while (datalen != 0 && paddr == nextpaddr) { 834 nextpaddr = (paddr & (~PAGE_MASK)) + PAGE_SIZE; 835 size = nextpaddr - paddr; 836 if (size > datalen) 837 size = datalen; 838 839 *(ctrp) += size; 840 vaddr += size; 841 datalen -= size; 842 if (datalen != 0) 843 paddr = vtophys(vaddr); 844 845 } 846 #if 0 847 if (isp->isp_type & ISP_HA_FC) { 848 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 849 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 850 isp->isp_name, rq->req_seg_count, 851 rq2->req_dataseg[rq2->req_seg_count].ds_count, 852 rq2->req_dataseg[rq2->req_seg_count].ds_base); 853 } else { 854 printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n", 855 isp->isp_name, rq->req_seg_count, 856 rq->req_dataseg[rq->req_seg_count].ds_count, 857 rq->req_dataseg[rq->req_seg_count].ds_base); 858 } 859 #endif 860 rq->req_seg_count++; 861 } 862 863 864 865 if (datalen == 0) 866 return (CMD_QUEUED); 867 868 paddr = vtophys(vaddr); 869 while (datalen > 0) { 870 crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp); 871 *iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN); 872 if (*iptrp == optr) { 873 printf("%s: Request Queue Overflow\n", isp->isp_name); 874 XS_SETERR(xs, HBA_BOTCH); 875 return (CMD_EAGAIN); 876 } 877 rq->req_header.rqs_entry_count++; 878 bzero((void *)crq, sizeof (*crq)); 879 crq->req_header.rqs_entry_count = 1; 880 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 881 882 for (seglim = 0; datalen != 0 && seglim < ISP_CDSEG; seglim++) { 883 crq->req_dataseg[seglim].ds_base = paddr; 884 ctrp = &crq->req_dataseg[seglim].ds_count; 885 *(ctrp) = 0; 886 nextpaddr = paddr; 887 while (datalen != 0 && paddr == nextpaddr) { 888 nextpaddr = (paddr & (~PAGE_MASK)) + PAGE_SIZE; 889 size = nextpaddr - paddr; 890 if (size > datalen) 891 size = datalen; 892 893 *(ctrp) += size; 894 vaddr += size; 895 datalen -= size; 896 if (datalen != 0) 897 paddr = vtophys(vaddr); 898 } 899 #if 0 900 printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n", 901 isp->isp_name, rq->req_header.rqs_entry_count-1, 902 seglim, crq->req_dataseg[seglim].ds_count, 903 crq->req_dataseg[seglim].ds_base); 904 #endif 905 rq->req_seg_count++; 906 } 907 } 908 909 return (CMD_QUEUED); 910 } 911 #endif 912 913 static void 914 isp_pci_reset1(struct ispsoftc *isp) 915 { 916 /* Make sure the BIOS is disabled */ 917 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 918 } 919 920 static void 921 isp_pci_dumpregs(struct ispsoftc *isp) 922 { 923 struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp; 924 printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name, 925 pci_conf_read(pci->pci_id, PCI_COMMAND_STATUS_REG)); 926 } 927