xref: /freebsd/sys/dev/isp/isp_pci.c (revision a1a4f1a0d87b594d3f17a97dc0127eec1417e6f6)
1 /* $FreeBSD$ */
2 /*
3  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4  * FreeBSD Version.
5  *
6  *---------------------------------------
7  * Copyright (c) 1997, 1998, 1999 by Matthew Jacob
8  * NASA/Ames Research Center
9  * All rights reserved.
10  *---------------------------------------
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice immediately at the beginning of the file, without modification,
17  *    this list of conditions, and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
28  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 #include <dev/isp/isp_freebsd.h>
37 #include <dev/isp/asm_pci.h>
38 #include <sys/malloc.h>
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 #include <vm/vm_extern.h>
42 
43 
44 #include <pci/pcireg.h>
45 #include <pci/pcivar.h>
46 
47 #include <machine/bus_memio.h>
48 #include <machine/bus_pio.h>
49 #include <machine/bus.h>
50 #include <machine/md_var.h>
51 
52 
53 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
54 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
55 #ifndef ISP_DISABLE_1080_SUPPORT
56 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
57 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
58 #endif
59 static int isp_pci_mbxdma __P((struct ispsoftc *));
60 static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
61 	ispreq_t *, u_int8_t *, u_int8_t));
62 static void
63 isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
64 
65 static void isp_pci_reset1 __P((struct ispsoftc *));
66 static void isp_pci_dumpregs __P((struct ispsoftc *));
67 
68 #ifndef ISP_DISABLE_1020_SUPPORT
69 static struct ispmdvec mdvec = {
70 	isp_pci_rd_reg,
71 	isp_pci_wr_reg,
72 	isp_pci_mbxdma,
73 	isp_pci_dmasetup,
74 	isp_pci_dmateardown,
75 	NULL,
76 	isp_pci_reset1,
77 	isp_pci_dumpregs,
78 	ISP_RISC_CODE,
79 	ISP_CODE_LENGTH,
80 	ISP_CODE_ORG,
81 	ISP_CODE_VERSION,
82 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
83 	0
84 };
85 #endif
86 
87 #ifndef ISP_DISABLE_1080_SUPPORT
88 static struct ispmdvec mdvec_1080 = {
89 	isp_pci_rd_reg_1080,
90 	isp_pci_wr_reg_1080,
91 	isp_pci_mbxdma,
92 	isp_pci_dmasetup,
93 	isp_pci_dmateardown,
94 	NULL,
95 	isp_pci_reset1,
96 	isp_pci_dumpregs,
97 	ISP1080_RISC_CODE,
98 	ISP1080_CODE_LENGTH,
99 	ISP1080_CODE_ORG,
100 	ISP1080_CODE_VERSION,
101 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
102 	0
103 };
104 #endif
105 
106 #ifndef ISP_DISABLE_2100_SUPPORT
107 static struct ispmdvec mdvec_2100 = {
108 	isp_pci_rd_reg,
109 	isp_pci_wr_reg,
110 	isp_pci_mbxdma,
111 	isp_pci_dmasetup,
112 	isp_pci_dmateardown,
113 	NULL,
114 	isp_pci_reset1,
115 	isp_pci_dumpregs,
116 	ISP2100_RISC_CODE,
117 	ISP2100_CODE_LENGTH,
118 	ISP2100_CODE_ORG,
119 	ISP2100_CODE_VERSION,
120 	0,			/* Irrelevant to the 2100 */
121 	0
122 };
123 #endif
124 
125 #ifndef	ISP_DISABLE_2200_SUPPORT
126 static struct ispmdvec mdvec_2200 = {
127 	isp_pci_rd_reg,
128 	isp_pci_wr_reg,
129 	isp_pci_mbxdma,
130 	isp_pci_dmasetup,
131 	isp_pci_dmateardown,
132 	NULL,
133 	isp_pci_reset1,
134 	isp_pci_dumpregs,
135 	ISP2200_RISC_CODE,
136 	ISP2200_CODE_LENGTH,
137 	ISP2100_CODE_ORG,
138 	ISP2200_CODE_VERSION,
139 	0,
140 	0
141 };
142 #endif
143 
144 #ifndef	SCSI_ISP_PREFER_MEM_MAP
145 #define	SCSI_ISP_PREFER_MEM_MAP	0
146 #endif
147 
148 #ifndef	PCIM_CMD_INVEN
149 #define	PCIM_CMD_INVEN			0x10
150 #endif
151 #ifndef	PCIM_CMD_BUSMASTEREN
152 #define	PCIM_CMD_BUSMASTEREN		0x0004
153 #endif
154 #ifndef	PCIM_CMD_PERRESPEN
155 #define	PCIM_CMD_PERRESPEN		0x0040
156 #endif
157 #ifndef	PCIM_CMD_SEREN
158 #define	PCIM_CMD_SEREN			0x0100
159 #endif
160 
161 #ifndef	PCIR_COMMAND
162 #define	PCIR_COMMAND			0x04
163 #endif
164 
165 #ifndef	PCIR_CACHELNSZ
166 #define	PCIR_CACHELNSZ			0x0c
167 #endif
168 
169 #ifndef	PCIR_LATTIMER
170 #define	PCIR_LATTIMER			0x0d
171 #endif
172 
173 #ifndef	PCIR_ROMADDR
174 #define	PCIR_ROMADDR			0x30
175 #endif
176 
177 #ifndef	PCI_VENDOR_QLOGIC
178 #define	PCI_VENDOR_QLOGIC	0x1077
179 #endif
180 
181 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
182 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
183 #endif
184 
185 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
186 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
187 #endif
188 
189 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
190 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
191 #endif
192 
193 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
194 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
195 #endif
196 
197 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
198 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
199 #endif
200 
201 #define	PCI_QLOGIC_ISP	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
202 
203 #define	PCI_QLOGIC_ISP1080	\
204 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
205 
206 #define	PCI_QLOGIC_ISP1240	\
207 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
208 
209 #define	PCI_QLOGIC_ISP2100	\
210 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
211 
212 #define	PCI_QLOGIC_ISP2200	\
213 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
214 
215 #define	IO_MAP_REG	0x10
216 #define	MEM_MAP_REG	0x14
217 
218 #define	PCI_DFLT_LTNCY	0x40
219 #define	PCI_DFLT_LNSZ	0x10
220 
221 static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type));
222 static void isp_pci_attach __P((pcici_t config_d, int unit));
223 
224 /* This distinguishing define is not right, but it does work */
225 #ifdef __alpha__
226 #define IO_SPACE_MAPPING	ALPHA_BUS_SPACE_IO
227 #define MEM_SPACE_MAPPING	ALPHA_BUS_SPACE_MEM
228 #else
229 #define IO_SPACE_MAPPING	I386_BUS_SPACE_IO
230 #define MEM_SPACE_MAPPING	I386_BUS_SPACE_MEM
231 #endif
232 
233 struct isp_pcisoftc {
234 	struct ispsoftc			pci_isp;
235         pcici_t				pci_id;
236 	bus_space_tag_t			pci_st;
237 	bus_space_handle_t		pci_sh;
238 	int16_t				pci_poff[_NREG_BLKS];
239 	bus_dma_tag_t			parent_dmat;
240 	bus_dma_tag_t			cntrol_dmat;
241 	bus_dmamap_t			cntrol_dmap;
242 	bus_dmamap_t			dmaps[MAXISPREQUEST];
243 };
244 
245 static u_long ispunit;
246 
247 static struct pci_device isp_pci_driver = {
248 	"isp",
249 	isp_pci_probe,
250 	isp_pci_attach,
251 	&ispunit,
252 	NULL
253 };
254 COMPAT_PCI_DRIVER (isp_pci, isp_pci_driver);
255 
256 
257 static const char *
258 isp_pci_probe(pcici_t tag, pcidi_t type)
259 {
260 	static int oneshot = 1;
261 	char *x;
262 
263         switch (type) {
264 #ifndef	ISP_DISABLE_1020_SUPPORT
265 	case PCI_QLOGIC_ISP:
266 		x = "Qlogic ISP 1020/1040 PCI SCSI Adapter";
267 		break;
268 #endif
269 #ifndef	ISP_DISABLE_1080_SUPPORT
270 	case PCI_QLOGIC_ISP1080:
271 		x = "Qlogic ISP 1080 PCI SCSI Adapter";
272 		break;
273 	case PCI_QLOGIC_ISP1240:
274 		x = "Qlogic ISP 1240 PCI SCSI Adapter";
275 		break;
276 #endif
277 #ifndef	ISP_DISABLE_2100_SUPPORT
278 	case PCI_QLOGIC_ISP2100:
279 		x = "Qlogic ISP 2100 PCI FC-AL Adapter";
280 		break;
281 #endif
282 #ifndef	ISP_DISABLE_2200_SUPPORT
283 	case PCI_QLOGIC_ISP2200:
284 		x = "Qlogic ISP 2200 PCI FC-AL Adapter";
285 		break;
286 #endif
287 	default:
288 		return (NULL);
289 	}
290 	if (oneshot) {
291 		oneshot = 0;
292 		printf("%s Version %d.%d, Core Version %d.%d\n", PVS,
293 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
294 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
295 	}
296 	return (x);
297 }
298 
299 static void
300 isp_pci_attach(pcici_t cfid, int unit)
301 {
302 	int mapped, prefer_mem_map, bitmap;
303 	pci_port_t io_port;
304 	u_int32_t data, linesz, psize, basetype;
305 	struct isp_pcisoftc *pcs;
306 	struct ispsoftc *isp;
307 	vm_offset_t vaddr, paddr;
308 	struct ispmdvec *mdvp;
309 	bus_size_t lim;
310 	ISP_LOCKVAL_DECL;
311 
312 	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
313 	if (pcs == NULL) {
314 		printf("isp%d: cannot allocate softc\n", unit);
315 		return;
316 	}
317 	bzero(pcs, sizeof (struct isp_pcisoftc));
318 
319 	/*
320 	 * Figure out if we're supposed to skip this one.
321 	 */
322 	if (getenv_int("isp_disable", &bitmap)) {
323 		if (bitmap & (1 << unit)) {
324 			printf("isp%d: not configuring\n", unit);
325 			return;
326 		}
327 	}
328 
329 	/*
330 	 * Figure out which we should try first - memory mapping or i/o mapping?
331 	 */
332 #if	SCSI_ISP_PREFER_MEM_MAP == 1
333 	prefer_mem_map = 1;
334 #else
335 	prefer_mem_map = 0;
336 #endif
337 	bitmap = 0;
338 	if (getenv_int("isp_mem_map", &bitmap)) {
339 		if (bitmap & (1 << unit))
340 			prefer_mem_map = 1;
341 	}
342 	bitmap = 0;
343 	if (getenv_int("isp_io_map", &bitmap)) {
344 		if (bitmap & (1 << unit))
345 			prefer_mem_map = 0;
346 	}
347 
348 	vaddr = paddr = NULL;
349 	mapped = 0;
350 	linesz = PCI_DFLT_LNSZ;
351 	/*
352 	 * Note that pci_conf_read is a 32 bit word aligned function.
353 	 */
354 	data = pci_conf_read(cfid, PCIR_COMMAND);
355 	if (prefer_mem_map) {
356 		if (data & PCI_COMMAND_MEM_ENABLE) {
357 			if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
358 				pcs->pci_st = MEM_SPACE_MAPPING;
359 				pcs->pci_sh = vaddr;
360 				mapped++;
361 			}
362 		}
363 		if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
364 			if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
365 				pcs->pci_st = IO_SPACE_MAPPING;
366 				pcs->pci_sh = io_port;
367 				mapped++;
368 			}
369 		}
370 	} else {
371 		if (data & PCI_COMMAND_IO_ENABLE) {
372 			if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
373 				pcs->pci_st = IO_SPACE_MAPPING;
374 				pcs->pci_sh = io_port;
375 				mapped++;
376 			}
377 		}
378 		if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
379 			if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
380 				pcs->pci_st = MEM_SPACE_MAPPING;
381 				pcs->pci_sh = vaddr;
382 				mapped++;
383 			}
384 		}
385 	}
386 	if (mapped == 0) {
387 		printf("isp%d: unable to map any ports!\n", unit);
388 		free(pcs, M_DEVBUF);
389 		return;
390 	}
391 	if (bootverbose)
392 		printf("isp%d: using %s space register mapping\n", unit,
393 		    pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory");
394 
395 	data = pci_conf_read(cfid, PCI_ID_REG);
396 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
397 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
398 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
399 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
400 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
401 	/*
402  	 * GCC!
403 	 */
404 	mdvp = &mdvec;
405 	basetype = ISP_HA_SCSI_UNKNOWN;
406 	psize = sizeof (sdparam);
407 	lim = BUS_SPACE_MAXSIZE_32BIT;
408 #ifndef	ISP_DISABLE_1020_SUPPORT
409 	if (data == PCI_QLOGIC_ISP) {
410 		mdvp = &mdvec;
411 		basetype = ISP_HA_SCSI_UNKNOWN;
412 		psize = sizeof (sdparam);
413 		lim = BUS_SPACE_MAXSIZE_24BIT;
414 	}
415 #endif
416 #ifndef	ISP_DISABLE_1080_SUPPORT
417 	if (data == PCI_QLOGIC_ISP1080) {
418 		mdvp = &mdvec_1080;
419 		basetype = ISP_HA_SCSI_1080;
420 		psize = sizeof (sdparam);
421 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
422 		    ISP1080_DMA_REGS_OFF;
423 	}
424 	if (data == PCI_QLOGIC_ISP1240) {
425 		mdvp = &mdvec_1080;
426 		basetype = ISP_HA_SCSI_12X0;
427 		psize = 2 * sizeof (sdparam);
428 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
429 		    ISP1080_DMA_REGS_OFF;
430 	}
431 #endif
432 #ifndef	ISP_DISABLE_2100_SUPPORT
433 	if (data == PCI_QLOGIC_ISP2100) {
434 		mdvp = &mdvec_2100;
435 		basetype = ISP_HA_FC_2100;
436 		psize = sizeof (fcparam);
437 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
438 		    PCI_MBOX_REGS2100_OFF;
439 		data = pci_conf_read(cfid, PCI_CLASS_REG);
440 		if ((data & 0xff) < 3) {
441 			/*
442 			 * XXX: Need to get the actual revision
443 			 * XXX: number of the 2100 FB. At any rate,
444 			 * XXX: lower cache line size for early revision
445 			 * XXX; boards.
446 			 */
447 			linesz = 1;
448 		}
449 	}
450 #endif
451 #ifndef	ISP_DISABLE_2200_SUPPORT
452 	if (data == PCI_QLOGIC_ISP2200) {
453 		mdvp = &mdvec_2200;
454 		basetype = ISP_HA_FC_2200;
455 		psize = sizeof (fcparam);
456 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
457 		    PCI_MBOX_REGS2100_OFF;
458 	}
459 #endif
460 	isp = &pcs->pci_isp;
461 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT);
462 	if (isp->isp_param == NULL) {
463 		printf("isp%d: cannot allocate parameter data\n", unit);
464 		return;
465 	}
466 	bzero(isp->isp_param, psize);
467 	isp->isp_mdvec = mdvp;
468 	isp->isp_type = basetype;
469 	(void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
470 	isp->isp_osinfo.unit = unit;
471 
472 	ISP_LOCK(isp);
473 
474 	/*
475 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
476 	 * are set.
477 	 */
478 	data = pci_cfgread(cfid, PCIR_COMMAND, 2);
479 	data |=	PCIM_CMD_SEREN		|
480 		PCIM_CMD_PERRESPEN	|
481 		PCIM_CMD_BUSMASTEREN	|
482 		PCIM_CMD_INVEN;
483 	pci_cfgwrite(cfid, PCIR_COMMAND, 2, data);
484 
485 	/*
486 	 * Make sure the Cache Line Size register is set sensibly.
487 	 */
488 	data = pci_cfgread(cfid, PCIR_CACHELNSZ, 1);
489 	if (data != linesz) {
490 		data = PCI_DFLT_LNSZ;
491 		printf("%s: set PCI line size to %d\n", isp->isp_name, data);
492 		pci_cfgwrite(cfid, PCIR_CACHELNSZ, data, 1);
493 	}
494 
495 	/*
496 	 * Make sure the Latency Timer is sane.
497 	 */
498 	data = pci_cfgread(cfid, PCIR_LATTIMER, 1);
499 	if (data < PCI_DFLT_LTNCY) {
500 		data = PCI_DFLT_LTNCY;
501 		printf("%s: set PCI latency to %d\n", isp->isp_name, data);
502 		pci_cfgwrite(cfid, PCIR_LATTIMER, data, 1);
503 	}
504 
505 	/*
506 	 * Make sure we've disabled the ROM.
507 	 */
508 	data = pci_cfgread(cfid, PCIR_ROMADDR, 4);
509 	data &= ~1;
510 	pci_cfgwrite(cfid, PCIR_ROMADDR, data, 4);
511 	ISP_UNLOCK(isp);
512 
513 	if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
514 	    BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
515 	    255, lim, 0, &pcs->parent_dmat) != 0) {
516 		printf("%s: could not create master dma tag\n", isp->isp_name);
517 		free(pcs, M_DEVBUF);
518 		return;
519 	}
520 	if (pci_map_int(cfid, (void (*)(void *))isp_intr,
521 	    (void *)isp, &IMASK) == 0) {
522 		printf("%s: could not map interrupt\n", isp->isp_name);
523 		free(pcs, M_DEVBUF);
524 		return;
525 	}
526 
527 	pcs->pci_id = cfid;
528 #ifdef	SCSI_ISP_NO_FWLOAD_MASK
529 	if (SCSI_ISP_NO_FWLOAD_MASK && (SCSI_ISP_NO_FWLOAD_MASK & (1 << unit)))
530 		isp->isp_confopts |= ISP_CFG_NORELOAD;
531 #endif
532 	if (getenv_int("isp_no_fwload", &bitmap)) {
533 		if (bitmap & (1 << unit))
534 			isp->isp_confopts |= ISP_CFG_NORELOAD;
535 	}
536 	if (getenv_int("isp_fwload", &bitmap)) {
537 		if (bitmap & (1 << unit))
538 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
539 	}
540 
541 #ifdef	SCSI_ISP_NO_NVRAM_MASK
542 	if (SCSI_ISP_NO_NVRAM_MASK && (SCSI_ISP_NO_NVRAM_MASK & (1 << unit))) {
543 		printf("%s: ignoring NVRAM\n", isp->isp_name);
544 		isp->isp_confopts |= ISP_CFG_NONVRAM;
545 	}
546 #endif
547 	if (getenv_int("isp_no_nvram", &bitmap)) {
548 		if (bitmap & (1 << unit))
549 			isp->isp_confopts |= ISP_CFG_NONVRAM;
550 	}
551 	if (getenv_int("isp_nvram", &bitmap)) {
552 		if (bitmap & (1 << unit))
553 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
554 	}
555 
556 #ifdef	SCSI_ISP_FCDUPLEX
557 	if (IS_FC(isp)) {
558 		if (SCSI_ISP_FCDUPLEX && (SCSI_ISP_FCDUPLEX & (1 << unit))) {
559 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
560 		}
561 	}
562 #endif
563 	if (getenv_int("isp_fcduplex", &bitmap)) {
564 		if (bitmap & (1 << unit))
565 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
566 	}
567 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
568 		if (bitmap & (1 << unit))
569 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
570 	}
571 
572 	if (getenv_int("isp_seed", &isp->isp_osinfo.seed)) {
573 		isp->isp_osinfo.seed <<= 8;
574 		isp->isp_osinfo.seed += (unit + 1);
575 	} else {
576 		/*
577 		 * poor man's attempt at pseudo randomness.
578 		 */
579 		long i = (intptr_t) isp;
580 
581 		i >>= 5;
582 		i &= 0x7;
583 
584 		/*
585 		 * This isn't very random, but it's the best we can do for
586 		 * the real edge case of cards that don't have WWNs.
587 		 */
588 		isp->isp_osinfo.seed += ((int) cfid->bus) << 16;
589 		isp->isp_osinfo.seed += ((int) cfid->slot) << 8;
590 		isp->isp_osinfo.seed += ((int) cfid->func);
591 		while (version[i])
592 			isp->isp_osinfo.seed += (int) version[i++];
593 		isp->isp_osinfo.seed <<= 8;
594 		isp->isp_osinfo.seed += (unit + 1);
595 	}
596 
597 	ISP_LOCK(isp);
598 	isp_reset(isp);
599 	if (isp->isp_state != ISP_RESETSTATE) {
600 		(void) pci_unmap_int(cfid);
601 		ISP_UNLOCK(isp);
602 		free(pcs, M_DEVBUF);
603 		return;
604 	}
605 	isp_init(isp);
606 	if (isp->isp_state != ISP_INITSTATE) {
607 		/* If we're a Fibre Channel Card, we allow deferred attach */
608 		if (IS_SCSI(isp)) {
609 			isp_uninit(isp);
610 			(void) pci_unmap_int(cfid); /* Does nothing */
611 			ISP_UNLOCK(isp);
612 			free(pcs, M_DEVBUF);
613 			return;
614 		}
615 	}
616 	isp_attach(isp);
617 	if (isp->isp_state != ISP_RUNSTATE) {
618 		/* If we're a Fibre Channel Card, we allow deferred attach */
619 		if (IS_SCSI(isp)) {
620 			isp_uninit(isp);
621 			(void) pci_unmap_int(cfid); /* Does nothing */
622 			ISP_UNLOCK(isp);
623 			free(pcs, M_DEVBUF);
624 			return;
625 		}
626 	}
627 	ISP_UNLOCK(isp);
628 #ifdef __alpha__
629 	/*
630 	 * THIS SHOULD NOT HAVE TO BE HERE
631 	 */
632 	alpha_register_pci_scsi(cfid->bus, cfid->slot, isp->isp_sim);
633 #endif
634 }
635 
636 static u_int16_t
637 isp_pci_rd_reg(isp, regoff)
638 	struct ispsoftc *isp;
639 	int regoff;
640 {
641 	u_int16_t rv;
642 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
643 	int offset, oldconf = 0;
644 
645 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
646 		/*
647 		 * We will assume that someone has paused the RISC processor.
648 		 */
649 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
650 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
651 	}
652 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
653 	offset += (regoff & 0xff);
654 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
655 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
656 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
657 	}
658 	return (rv);
659 }
660 
661 static void
662 isp_pci_wr_reg(isp, regoff, val)
663 	struct ispsoftc *isp;
664 	int regoff;
665 	u_int16_t val;
666 {
667 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
668 	int offset, oldconf = 0;
669 
670 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
671 		/*
672 		 * We will assume that someone has paused the RISC processor.
673 		 */
674 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
675 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
676 	}
677 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
678 	offset += (regoff & 0xff);
679 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
680 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
681 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
682 	}
683 }
684 
685 #ifndef	ISP_DISABLE_1080_SUPPORT
686 static u_int16_t
687 isp_pci_rd_reg_1080(isp, regoff)
688 	struct ispsoftc *isp;
689 	int regoff;
690 {
691 	u_int16_t rv;
692 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
693 	int offset, oc = 0;
694 
695 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
696 		/*
697 		 * We will assume that someone has paused the RISC processor.
698 		 */
699 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
700 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
701 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
702 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
703 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
704 	}
705 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
706 	offset += (regoff & 0xff);
707 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
708 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
709 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
710 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
711 	}
712 	return (rv);
713 }
714 
715 static void
716 isp_pci_wr_reg_1080(isp, regoff, val)
717 	struct ispsoftc *isp;
718 	int regoff;
719 	u_int16_t val;
720 {
721 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
722 	int offset, oc = 0;
723 
724 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
725 		/*
726 		 * We will assume that someone has paused the RISC processor.
727 		 */
728 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
729 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
730 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
731 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
732 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
733 	}
734 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
735 	offset += (regoff & 0xff);
736 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
737 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
738 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
739 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
740 	}
741 }
742 #endif
743 
744 
745 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
746 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
747 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
748 
749 struct imush {
750 	struct ispsoftc *isp;
751 	int error;
752 };
753 
754 static void
755 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
756 {
757 	struct imush *imushp = (struct imush *) arg;
758 	if (error) {
759 		imushp->error = error;
760 	} else {
761 		imushp->isp->isp_rquest_dma = segs->ds_addr;
762 	}
763 }
764 
765 static void
766 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
767 {
768 	struct imush *imushp = (struct imush *) arg;
769 	if (error) {
770 		imushp->error = error;
771 	} else {
772 		imushp->isp->isp_result_dma = segs->ds_addr;
773 	}
774 }
775 
776 static void
777 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
778 {
779 	struct imush *imushp = (struct imush *) arg;
780 	if (error) {
781 		imushp->error = error;
782 	} else {
783 		fcparam *fcp = imushp->isp->isp_param;
784 		fcp->isp_scdma = segs->ds_addr;
785 	}
786 }
787 
788 static int
789 isp_pci_mbxdma(struct ispsoftc *isp)
790 {
791 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
792 	caddr_t base;
793 	u_int32_t len;
794 	int i, error;
795 	bus_size_t lim;
796 	struct imush im;
797 
798 
799 	if (IS_FC(isp) || IS_1080(isp) || IS_12X0(isp))
800 		lim = BUS_SPACE_MAXADDR + 1;
801 	else
802 		lim = BUS_SPACE_MAXADDR_24BIT + 1;
803 
804 	/*
805 	 * Allocate and map the request, result queues, plus FC scratch area.
806 	 */
807 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
808 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
809 	if (IS_FC(isp)) {
810 		len += ISP2100_SCRLEN;
811 	}
812 	if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
813 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
814 	    BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
815 		printf("%s: cannot create a dma tag for control spaces\n",
816 		    isp->isp_name);
817 		return (1);
818 	}
819 	if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
820 	    BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
821 		printf("%s: cannot allocate %d bytes of CCB memory\n",
822 		    isp->isp_name, len);
823 		return (1);
824 	}
825 
826 	isp->isp_rquest = base;
827 	im.isp = isp;
828 	im.error = 0;
829 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
830 	    ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, &im, 0);
831 	if (im.error) {
832 		printf("%s: error %d loading dma map for DMA request queue\n",
833 		    isp->isp_name, im.error);
834 		return (1);
835 	}
836 	isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
837 	im.error = 0;
838 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
839 	    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, &im, 0);
840 	if (im.error) {
841 		printf("%s: error %d loading dma map for DMA result queue\n",
842 		    isp->isp_name, im.error);
843 		return (1);
844 	}
845 
846 	/*
847 	 * Use this opportunity to initialize/create data DMA maps.
848 	 */
849 	for (i = 0; i < MAXISPREQUEST; i++) {
850 		error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
851 		if (error) {
852 			printf("%s: error %d creating mailbox DMA maps\n",
853 			    isp->isp_name, error);
854 			return (1);
855 		}
856 	}
857 	if (IS_FC(isp)) {
858 		fcparam *fcp = (fcparam *) isp->isp_param;
859 		fcp->isp_scratch = base +
860 			ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN) +
861 			ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
862 		im.error = 0;
863 		bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
864 		    fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
865 		if (im.error) {
866 			printf("%s: error %d loading FC scratch area\n",
867 			    isp->isp_name, im.error);
868 			return (1);
869 		}
870 	}
871 	return (0);
872 }
873 
874 static void dma2 __P((void *, bus_dma_segment_t *, int, int));
875 typedef struct {
876 	struct ispsoftc *isp;
877 	ISP_SCSI_XFER_T *ccb;
878 	ispreq_t *rq;
879 	u_int8_t *iptrp;
880 	u_int8_t optr;
881 	u_int error;
882 } mush_t;
883 
884 #define	MUSHERR_NOQENTRIES	-2
885 
886 static void
887 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
888 {
889 	mush_t *mp;
890 	ISP_SCSI_XFER_T *ccb;
891 	struct ispsoftc *isp;
892 	struct isp_pcisoftc *pci;
893 	bus_dmamap_t *dp;
894 	bus_dma_segment_t *eseg;
895 	ispreq_t *rq;
896 	u_int8_t *iptrp;
897 	u_int8_t optr;
898 	ispcontreq_t *crq;
899 	int drq, seglim, datalen;
900 
901 	mp = (mush_t *) arg;
902 	if (error) {
903 		mp->error = error;
904 		return;
905 	}
906 
907 	isp = mp->isp;
908 	if (nseg < 1) {
909 		printf("%s: zero or negative segment count\n", isp->isp_name);
910 		mp->error = EFAULT;
911 		return;
912 	}
913 	ccb = mp->ccb;
914 	rq = mp->rq;
915 	iptrp = mp->iptrp;
916 	optr = mp->optr;
917 
918 	pci = (struct isp_pcisoftc *)isp;
919 	dp = &pci->dmaps[rq->req_handle - 1];
920 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
921 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
922 		drq = REQFLAG_DATA_IN;
923 	} else {
924 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
925 		drq = REQFLAG_DATA_OUT;
926 	}
927 
928 	datalen = XS_XFRLEN(ccb);
929 	if (IS_FC(isp)) {
930 		seglim = ISP_RQDSEG_T2;
931 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
932 		((ispreqt2_t *)rq)->req_flags |= drq;
933 	} else {
934 		seglim = ISP_RQDSEG;
935 		rq->req_flags |= drq;
936 	}
937 
938 	eseg = dm_segs + nseg;
939 
940 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
941 		if (IS_FC(isp)) {
942 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
943 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
944 			    dm_segs->ds_addr;
945 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
946 			    dm_segs->ds_len;
947 		} else {
948 			rq->req_dataseg[rq->req_seg_count].ds_base =
949 				dm_segs->ds_addr;
950 			rq->req_dataseg[rq->req_seg_count].ds_count =
951 				dm_segs->ds_len;
952 		}
953 		datalen -= dm_segs->ds_len;
954 #if	0
955 		if (IS_FC(isp)) {
956 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
957 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
958 			    isp->isp_name, rq->req_seg_count,
959 			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
960 			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
961 		} else {
962 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
963 			    isp->isp_name, rq->req_seg_count,
964 			    rq->req_dataseg[rq->req_seg_count].ds_count,
965 			    rq->req_dataseg[rq->req_seg_count].ds_base);
966 		}
967 #endif
968 		rq->req_seg_count++;
969 		dm_segs++;
970 	}
971 
972 	while (datalen > 0 && dm_segs != eseg) {
973 		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
974 		*iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
975 		if (*iptrp == optr) {
976 #if	0
977 			printf("%s: Request Queue Overflow++\n", isp->isp_name);
978 #endif
979 			mp->error = MUSHERR_NOQENTRIES;
980 			return;
981 		}
982 		rq->req_header.rqs_entry_count++;
983 		bzero((void *)crq, sizeof (*crq));
984 		crq->req_header.rqs_entry_count = 1;
985 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
986 
987 		seglim = 0;
988 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
989 			crq->req_dataseg[seglim].ds_base =
990 			    dm_segs->ds_addr;
991 			crq->req_dataseg[seglim].ds_count =
992 			    dm_segs->ds_len;
993 #if	0
994 			printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
995 			    isp->isp_name, rq->req_header.rqs_entry_count-1,
996 			    seglim, crq->req_dataseg[seglim].ds_count,
997 			    crq->req_dataseg[seglim].ds_base);
998 #endif
999 			rq->req_seg_count++;
1000 			dm_segs++;
1001 			seglim++;
1002 			datalen -= dm_segs->ds_len;
1003 		}
1004 	}
1005 }
1006 
1007 static int
1008 isp_pci_dmasetup(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb, ispreq_t *rq,
1009 	u_int8_t *iptrp, u_int8_t optr)
1010 {
1011 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1012 	struct ccb_hdr *ccb_h;
1013 	struct ccb_scsiio *csio;
1014 	bus_dmamap_t *dp = NULL;
1015 	mush_t mush, *mp;
1016 
1017 	csio = (struct ccb_scsiio *) ccb;
1018 	ccb_h = &csio->ccb_h;
1019 
1020 	if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
1021 		rq->req_seg_count = 1;
1022 		return (CMD_QUEUED);
1023 	}
1024 
1025 	/*
1026 	 * Do a virtual grapevine step to collect info for
1027 	 * the callback dma allocation that we have to use...
1028 	 */
1029 	mp = &mush;
1030 	mp->isp = isp;
1031 	mp->ccb = ccb;
1032 	mp->rq = rq;
1033 	mp->iptrp = iptrp;
1034 	mp->optr = optr;
1035 	mp->error = 0;
1036 
1037 	if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1038 		if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1039 			int error, s;
1040 
1041 			dp = &pci->dmaps[rq->req_handle - 1];
1042 			s = splsoftvm();
1043 			error = bus_dmamap_load(pci->parent_dmat, *dp,
1044 			    csio->data_ptr, csio->dxfer_len, dma2, mp, 0);
1045 			if (error == EINPROGRESS) {
1046 				bus_dmamap_unload(pci->parent_dmat, *dp);
1047 				mp->error = EINVAL;
1048 				printf("%s: deferred dma allocation not "
1049 				    "supported\n", isp->isp_name);
1050 			} else if (error && mp->error == 0) {
1051 #ifdef	DIAGNOSTIC
1052 				printf("%s: error %d in dma mapping code\n",
1053 				    isp->isp_name, error);
1054 #endif
1055 				mp->error = error;
1056 			}
1057 			splx(s);
1058 		} else {
1059 			/* Pointer to physical buffer */
1060 			struct bus_dma_segment seg;
1061 			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1062 			seg.ds_len = csio->dxfer_len;
1063 			dma2(mp, &seg, 1, 0);
1064 		}
1065 	} else {
1066 		struct bus_dma_segment *segs;
1067 
1068 		if ((ccb_h->flags & CAM_DATA_PHYS) != 0) {
1069 			printf("%s: Physical segment pointers unsupported",
1070 				isp->isp_name);
1071 			mp->error = EINVAL;
1072 		} else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) {
1073 			printf("%s: Virtual segment addresses unsupported",
1074 				isp->isp_name);
1075 			mp->error = EINVAL;
1076 		} else {
1077 			/* Just use the segments provided */
1078 			segs = (struct bus_dma_segment *) csio->data_ptr;
1079 			dma2(mp, segs, csio->sglist_cnt, 0);
1080 		}
1081 	}
1082 	if (mp->error) {
1083 		int retval = CMD_COMPLETE;
1084 		if (mp->error == MUSHERR_NOQENTRIES) {
1085 			retval = CMD_EAGAIN;
1086 		} else if (mp->error == EFBIG) {
1087 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1088 		} else if (mp->error == EINVAL) {
1089 			XS_SETERR(csio, CAM_REQ_INVALID);
1090 		} else {
1091 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1092 		}
1093 		return (retval);
1094 	} else {
1095 		/*
1096 		 * Check to see if we weren't cancelled while sleeping on
1097 		 * getting DMA resources...
1098 		 */
1099 		if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1100 			if (dp) {
1101 				bus_dmamap_unload(pci->parent_dmat, *dp);
1102 			}
1103 			return (CMD_COMPLETE);
1104 		}
1105 		return (CMD_QUEUED);
1106 	}
1107 }
1108 
1109 static void
1110 isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb,
1111 	u_int32_t handle)
1112 {
1113 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1114 	bus_dmamap_t *dp = &pci->dmaps[handle];
1115 
1116 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1117 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1118 	} else {
1119 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1120 	}
1121 	bus_dmamap_unload(pci->parent_dmat, *dp);
1122 }
1123 
1124 
1125 static void
1126 isp_pci_reset1(struct ispsoftc *isp)
1127 {
1128 	/* Make sure the BIOS is disabled */
1129 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1130 }
1131 
1132 static void
1133 isp_pci_dumpregs(struct ispsoftc *isp)
1134 {
1135 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1136 	printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name,
1137 	    pci_conf_read(pci->pci_id, PCIR_COMMAND));
1138 }
1139