xref: /freebsd/sys/dev/isp/isp_pci.c (revision 4cf49a43559ed9fdad601bdcccd2c55963008675)
1 /* $FreeBSD$ */
2 /*
3  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4  * FreeBSD Version.
5  *
6  *---------------------------------------
7  * Copyright (c) 1997, 1998, 1999 by Matthew Jacob
8  * NASA/Ames Research Center
9  * All rights reserved.
10  *---------------------------------------
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice immediately at the beginning of the file, without modification,
17  *    this list of conditions, and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
28  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 #include <dev/isp/isp_freebsd.h>
37 #include <dev/isp/asm_pci.h>
38 #include <sys/malloc.h>
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 
42 
43 #include <pci/pcireg.h>
44 #include <pci/pcivar.h>
45 
46 #include <machine/bus_memio.h>
47 #include <machine/bus_pio.h>
48 #include <machine/bus.h>
49 #include <machine/md_var.h>
50 
51 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
52 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
53 #ifndef ISP_DISABLE_1080_SUPPORT
54 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
55 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
56 #endif
57 static int isp_pci_mbxdma __P((struct ispsoftc *));
58 static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
59 	ispreq_t *, u_int8_t *, u_int8_t));
60 static void
61 isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
62 
63 static void isp_pci_reset1 __P((struct ispsoftc *));
64 static void isp_pci_dumpregs __P((struct ispsoftc *));
65 
66 #ifndef ISP_DISABLE_1020_SUPPORT
67 static struct ispmdvec mdvec = {
68 	isp_pci_rd_reg,
69 	isp_pci_wr_reg,
70 	isp_pci_mbxdma,
71 	isp_pci_dmasetup,
72 	isp_pci_dmateardown,
73 	NULL,
74 	isp_pci_reset1,
75 	isp_pci_dumpregs,
76 	ISP_RISC_CODE,
77 	ISP_CODE_LENGTH,
78 	ISP_CODE_ORG,
79 	0,
80 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
81 	0
82 };
83 #endif
84 
85 #ifndef ISP_DISABLE_1080_SUPPORT
86 static struct ispmdvec mdvec_1080 = {
87 	isp_pci_rd_reg_1080,
88 	isp_pci_wr_reg_1080,
89 	isp_pci_mbxdma,
90 	isp_pci_dmasetup,
91 	isp_pci_dmateardown,
92 	NULL,
93 	isp_pci_reset1,
94 	isp_pci_dumpregs,
95 	ISP1080_RISC_CODE,
96 	ISP1080_CODE_LENGTH,
97 	ISP1080_CODE_ORG,
98 	0,
99 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
100 	0
101 };
102 #endif
103 
104 #ifndef ISP_DISABLE_2100_SUPPORT
105 static struct ispmdvec mdvec_2100 = {
106 	isp_pci_rd_reg,
107 	isp_pci_wr_reg,
108 	isp_pci_mbxdma,
109 	isp_pci_dmasetup,
110 	isp_pci_dmateardown,
111 	NULL,
112 	isp_pci_reset1,
113 	isp_pci_dumpregs,
114 	ISP2100_RISC_CODE,
115 	ISP2100_CODE_LENGTH,
116 	ISP2100_CODE_ORG,
117 	0,
118 	0,
119 	0
120 };
121 #endif
122 
123 #ifndef	ISP_DISABLE_2200_SUPPORT
124 static struct ispmdvec mdvec_2200 = {
125 	isp_pci_rd_reg,
126 	isp_pci_wr_reg,
127 	isp_pci_mbxdma,
128 	isp_pci_dmasetup,
129 	isp_pci_dmateardown,
130 	NULL,
131 	isp_pci_reset1,
132 	isp_pci_dumpregs,
133 	ISP2200_RISC_CODE,
134 	ISP2200_CODE_LENGTH,
135 	ISP2100_CODE_ORG,
136 	0,
137 	0,
138 	0
139 };
140 #endif
141 
142 #ifndef	SCSI_ISP_PREFER_MEM_MAP
143 #define	SCSI_ISP_PREFER_MEM_MAP	0
144 #endif
145 
146 #ifndef	PCIM_CMD_INVEN
147 #define	PCIM_CMD_INVEN			0x10
148 #endif
149 #ifndef	PCIM_CMD_BUSMASTEREN
150 #define	PCIM_CMD_BUSMASTEREN		0x0004
151 #endif
152 #ifndef	PCIM_CMD_PERRESPEN
153 #define	PCIM_CMD_PERRESPEN		0x0040
154 #endif
155 #ifndef	PCIM_CMD_SEREN
156 #define	PCIM_CMD_SEREN			0x0100
157 #endif
158 
159 #ifndef	PCIR_COMMAND
160 #define	PCIR_COMMAND			0x04
161 #endif
162 
163 #ifndef	PCIR_CACHELNSZ
164 #define	PCIR_CACHELNSZ			0x0c
165 #endif
166 
167 #ifndef	PCIR_LATTIMER
168 #define	PCIR_LATTIMER			0x0d
169 #endif
170 
171 #ifndef	PCIR_ROMADDR
172 #define	PCIR_ROMADDR			0x30
173 #endif
174 
175 #ifndef	PCI_VENDOR_QLOGIC
176 #define	PCI_VENDOR_QLOGIC	0x1077
177 #endif
178 
179 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
180 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
181 #endif
182 
183 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
184 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
185 #endif
186 
187 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
188 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
189 #endif
190 
191 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
192 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
193 #endif
194 
195 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
196 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
197 #endif
198 
199 #define	PCI_QLOGIC_ISP	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
200 
201 #define	PCI_QLOGIC_ISP1080	\
202 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
203 
204 #define	PCI_QLOGIC_ISP1240	\
205 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
206 
207 #define	PCI_QLOGIC_ISP2100	\
208 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
209 
210 #define	PCI_QLOGIC_ISP2200	\
211 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
212 
213 #define	IO_MAP_REG	0x10
214 #define	MEM_MAP_REG	0x14
215 
216 #define	PCI_DFLT_LTNCY	0x40
217 #define	PCI_DFLT_LNSZ	0x10
218 
219 static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type));
220 static void isp_pci_attach __P((pcici_t config_d, int unit));
221 
222 /* This distinguishing define is not right, but it does work */
223 #ifdef __alpha__
224 #define IO_SPACE_MAPPING	ALPHA_BUS_SPACE_IO
225 #define MEM_SPACE_MAPPING	ALPHA_BUS_SPACE_MEM
226 #else
227 #define IO_SPACE_MAPPING	I386_BUS_SPACE_IO
228 #define MEM_SPACE_MAPPING	I386_BUS_SPACE_MEM
229 #endif
230 
231 struct isp_pcisoftc {
232 	struct ispsoftc			pci_isp;
233         pcici_t				pci_id;
234 	bus_space_tag_t			pci_st;
235 	bus_space_handle_t		pci_sh;
236 	int16_t				pci_poff[_NREG_BLKS];
237 	bus_dma_tag_t			parent_dmat;
238 	bus_dma_tag_t			cntrol_dmat;
239 	bus_dmamap_t			cntrol_dmap;
240 	bus_dmamap_t			*dmaps;
241 };
242 
243 static u_long ispunit;
244 
245 static struct pci_device isp_pci_driver = {
246 	"isp",
247 	isp_pci_probe,
248 	isp_pci_attach,
249 	&ispunit,
250 	NULL
251 };
252 COMPAT_PCI_DRIVER (isp_pci, isp_pci_driver);
253 
254 
255 static const char *
256 isp_pci_probe(pcici_t tag, pcidi_t type)
257 {
258 	static int oneshot = 1;
259 	char *x;
260 
261         switch (type) {
262 #ifndef	ISP_DISABLE_1020_SUPPORT
263 	case PCI_QLOGIC_ISP:
264 		x = "Qlogic ISP 1020/1040 PCI SCSI Adapter";
265 		break;
266 #endif
267 #ifndef	ISP_DISABLE_1080_SUPPORT
268 	case PCI_QLOGIC_ISP1080:
269 		x = "Qlogic ISP 1080 PCI SCSI Adapter";
270 		break;
271 	case PCI_QLOGIC_ISP1240:
272 		x = "Qlogic ISP 1240 PCI SCSI Adapter";
273 		break;
274 #endif
275 #ifndef	ISP_DISABLE_2100_SUPPORT
276 	case PCI_QLOGIC_ISP2100:
277 		x = "Qlogic ISP 2100 PCI FC-AL Adapter";
278 		break;
279 #endif
280 #ifndef	ISP_DISABLE_2200_SUPPORT
281 	case PCI_QLOGIC_ISP2200:
282 		x = "Qlogic ISP 2200 PCI FC-AL Adapter";
283 		break;
284 #endif
285 	default:
286 		return (NULL);
287 	}
288 	if (oneshot) {
289 		oneshot = 0;
290 		CFGPRINTF("Qlogic ISP Driver, FreeBSD Version %d.%d, "
291 		    "Core Version %d.%d\n",
292 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
293 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
294 	}
295 	return (x);
296 }
297 
298 static void
299 isp_pci_attach(pcici_t cfid, int unit)
300 {
301 	int mapped, prefer_mem_map, bitmap;
302 	pci_port_t io_port;
303 	u_int32_t data, linesz, psize, basetype;
304 	struct isp_pcisoftc *pcs;
305 	struct ispsoftc *isp;
306 	vm_offset_t vaddr, paddr;
307 	struct ispmdvec *mdvp;
308 	bus_size_t lim;
309 	ISP_LOCKVAL_DECL;
310 
311 	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
312 	if (pcs == NULL) {
313 		printf("isp%d: cannot allocate softc\n", unit);
314 		return;
315 	}
316 	bzero(pcs, sizeof (struct isp_pcisoftc));
317 
318 	/*
319 	 * Figure out if we're supposed to skip this one.
320 	 */
321 	if (getenv_int("isp_disable", &bitmap)) {
322 		if (bitmap & (1 << unit)) {
323 			printf("isp%d: not configuring\n", unit);
324 			return;
325 		}
326 	}
327 
328 	/*
329 	 * Figure out which we should try first - memory mapping or i/o mapping?
330 	 */
331 #if	SCSI_ISP_PREFER_MEM_MAP == 1
332 	prefer_mem_map = 1;
333 #else
334 	prefer_mem_map = 0;
335 #endif
336 	bitmap = 0;
337 	if (getenv_int("isp_mem_map", &bitmap)) {
338 		if (bitmap & (1 << unit))
339 			prefer_mem_map = 1;
340 	}
341 	bitmap = 0;
342 	if (getenv_int("isp_io_map", &bitmap)) {
343 		if (bitmap & (1 << unit))
344 			prefer_mem_map = 0;
345 	}
346 
347 	vaddr = paddr = NULL;
348 	mapped = 0;
349 	linesz = PCI_DFLT_LNSZ;
350 	/*
351 	 * Note that pci_conf_read is a 32 bit word aligned function.
352 	 */
353 	data = pci_conf_read(cfid, PCIR_COMMAND);
354 	if (prefer_mem_map) {
355 		if (data & PCI_COMMAND_MEM_ENABLE) {
356 			if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
357 				pcs->pci_st = MEM_SPACE_MAPPING;
358 				pcs->pci_sh = vaddr;
359 				mapped++;
360 			}
361 		}
362 		if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
363 			if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
364 				pcs->pci_st = IO_SPACE_MAPPING;
365 				pcs->pci_sh = io_port;
366 				mapped++;
367 			}
368 		}
369 	} else {
370 		if (data & PCI_COMMAND_IO_ENABLE) {
371 			if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
372 				pcs->pci_st = IO_SPACE_MAPPING;
373 				pcs->pci_sh = io_port;
374 				mapped++;
375 			}
376 		}
377 		if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
378 			if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
379 				pcs->pci_st = MEM_SPACE_MAPPING;
380 				pcs->pci_sh = vaddr;
381 				mapped++;
382 			}
383 		}
384 	}
385 	if (mapped == 0) {
386 		printf("isp%d: unable to map any ports!\n", unit);
387 		free(pcs, M_DEVBUF);
388 		return;
389 	}
390 	if (bootverbose)
391 		printf("isp%d: using %s space register mapping\n", unit,
392 		    pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory");
393 
394 	data = pci_conf_read(cfid, PCI_ID_REG);
395 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
396 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
397 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
398 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
399 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
400 	/*
401  	 * GCC!
402 	 */
403 	mdvp = &mdvec;
404 	basetype = ISP_HA_SCSI_UNKNOWN;
405 	psize = sizeof (sdparam);
406 	lim = BUS_SPACE_MAXSIZE_32BIT;
407 #ifndef	ISP_DISABLE_1020_SUPPORT
408 	if (data == PCI_QLOGIC_ISP) {
409 		mdvp = &mdvec;
410 		basetype = ISP_HA_SCSI_UNKNOWN;
411 		psize = sizeof (sdparam);
412 		lim = BUS_SPACE_MAXSIZE_24BIT;
413 	}
414 #endif
415 #ifndef	ISP_DISABLE_1080_SUPPORT
416 	if (data == PCI_QLOGIC_ISP1080) {
417 		mdvp = &mdvec_1080;
418 		basetype = ISP_HA_SCSI_1080;
419 		psize = sizeof (sdparam);
420 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
421 		    ISP1080_DMA_REGS_OFF;
422 	}
423 	if (data == PCI_QLOGIC_ISP1240) {
424 		mdvp = &mdvec_1080;
425 		basetype = ISP_HA_SCSI_12X0;
426 		psize = 2 * sizeof (sdparam);
427 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
428 		    ISP1080_DMA_REGS_OFF;
429 	}
430 #endif
431 #ifndef	ISP_DISABLE_2100_SUPPORT
432 	if (data == PCI_QLOGIC_ISP2100) {
433 		mdvp = &mdvec_2100;
434 		basetype = ISP_HA_FC_2100;
435 		psize = sizeof (fcparam);
436 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
437 		    PCI_MBOX_REGS2100_OFF;
438 		data = pci_conf_read(cfid, PCI_CLASS_REG);
439 		if ((data & 0xff) < 3) {
440 			/*
441 			 * XXX: Need to get the actual revision
442 			 * XXX: number of the 2100 FB. At any rate,
443 			 * XXX: lower cache line size for early revision
444 			 * XXX; boards.
445 			 */
446 			linesz = 1;
447 		}
448 	}
449 #endif
450 #ifndef	ISP_DISABLE_2200_SUPPORT
451 	if (data == PCI_QLOGIC_ISP2200) {
452 		mdvp = &mdvec_2200;
453 		basetype = ISP_HA_FC_2200;
454 		psize = sizeof (fcparam);
455 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
456 		    PCI_MBOX_REGS2100_OFF;
457 	}
458 #endif
459 	isp = &pcs->pci_isp;
460 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT);
461 	if (isp->isp_param == NULL) {
462 		printf("isp%d: cannot allocate parameter data\n", unit);
463 		return;
464 	}
465 	bzero(isp->isp_param, psize);
466 	isp->isp_mdvec = mdvp;
467 	isp->isp_type = basetype;
468 	(void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
469 	isp->isp_osinfo.unit = unit;
470 
471 	ISP_LOCK(isp);
472 
473 	/*
474 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
475 	 * are set.
476 	 */
477 	data = pci_cfgread(cfid, PCIR_COMMAND, 2);
478 	data |=	PCIM_CMD_SEREN		|
479 		PCIM_CMD_PERRESPEN	|
480 		PCIM_CMD_BUSMASTEREN	|
481 		PCIM_CMD_INVEN;
482 	pci_cfgwrite(cfid, PCIR_COMMAND, 2, data);
483 
484 	/*
485 	 * Make sure the Cache Line Size register is set sensibly.
486 	 */
487 	data = pci_cfgread(cfid, PCIR_CACHELNSZ, 1);
488 	if (data != linesz) {
489 		data = PCI_DFLT_LNSZ;
490 		CFGPRINTF("%s: set PCI line size to %d\n", isp->isp_name, data);
491 		pci_cfgwrite(cfid, PCIR_CACHELNSZ, data, 1);
492 	}
493 
494 	/*
495 	 * Make sure the Latency Timer is sane.
496 	 */
497 	data = pci_cfgread(cfid, PCIR_LATTIMER, 1);
498 	if (data < PCI_DFLT_LTNCY) {
499 		data = PCI_DFLT_LTNCY;
500 		CFGPRINTF("%s: set PCI latency to %d\n", isp->isp_name, data);
501 		pci_cfgwrite(cfid, PCIR_LATTIMER, data, 1);
502 	}
503 
504 	/*
505 	 * Make sure we've disabled the ROM.
506 	 */
507 	data = pci_cfgread(cfid, PCIR_ROMADDR, 4);
508 	data &= ~1;
509 	pci_cfgwrite(cfid, PCIR_ROMADDR, data, 4);
510 	ISP_UNLOCK(isp);
511 
512 	if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
513 	    BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
514 	    255, lim, 0, &pcs->parent_dmat) != 0) {
515 		printf("%s: could not create master dma tag\n", isp->isp_name);
516 		free(pcs, M_DEVBUF);
517 		return;
518 	}
519 	if (pci_map_int(cfid, (void (*)(void *))isp_intr,
520 	    (void *)isp, &IMASK) == 0) {
521 		printf("%s: could not map interrupt\n", isp->isp_name);
522 		free(pcs, M_DEVBUF);
523 		return;
524 	}
525 
526 	pcs->pci_id = cfid;
527 #ifdef	SCSI_ISP_NO_FWLOAD_MASK
528 	if (SCSI_ISP_NO_FWLOAD_MASK && (SCSI_ISP_NO_FWLOAD_MASK & (1 << unit)))
529 		isp->isp_confopts |= ISP_CFG_NORELOAD;
530 #endif
531 	if (getenv_int("isp_no_fwload", &bitmap)) {
532 		if (bitmap & (1 << unit))
533 			isp->isp_confopts |= ISP_CFG_NORELOAD;
534 	}
535 	if (getenv_int("isp_fwload", &bitmap)) {
536 		if (bitmap & (1 << unit))
537 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
538 	}
539 
540 #ifdef	SCSI_ISP_NO_NVRAM_MASK
541 	if (SCSI_ISP_NO_NVRAM_MASK && (SCSI_ISP_NO_NVRAM_MASK & (1 << unit))) {
542 		printf("%s: ignoring NVRAM\n", isp->isp_name);
543 		isp->isp_confopts |= ISP_CFG_NONVRAM;
544 	}
545 #endif
546 	if (getenv_int("isp_no_nvram", &bitmap)) {
547 		if (bitmap & (1 << unit))
548 			isp->isp_confopts |= ISP_CFG_NONVRAM;
549 	}
550 	if (getenv_int("isp_nvram", &bitmap)) {
551 		if (bitmap & (1 << unit))
552 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
553 	}
554 
555 #ifdef	SCSI_ISP_FCDUPLEX
556 	if (IS_FC(isp)) {
557 		if (SCSI_ISP_FCDUPLEX && (SCSI_ISP_FCDUPLEX & (1 << unit))) {
558 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
559 		}
560 	}
561 #endif
562 	if (getenv_int("isp_fcduplex", &bitmap)) {
563 		if (bitmap & (1 << unit))
564 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
565 	}
566 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
567 		if (bitmap & (1 << unit))
568 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
569 	}
570 
571 	if (getenv_int("isp_seed", &isp->isp_osinfo.seed)) {
572 		isp->isp_osinfo.seed <<= 8;
573 		isp->isp_osinfo.seed += (unit + 1);
574 	} else {
575 		/*
576 		 * poor man's attempt at pseudo randomness.
577 		 */
578 		long i = (intptr_t) isp;
579 
580 		i >>= 5;
581 		i &= 0x7;
582 
583 		/*
584 		 * This isn't very random, but it's the best we can do for
585 		 * the real edge case of cards that don't have WWNs.
586 		 */
587 		isp->isp_osinfo.seed += ((int) cfid->bus) << 16;
588 		isp->isp_osinfo.seed += ((int) cfid->slot) << 8;
589 		isp->isp_osinfo.seed += ((int) cfid->func);
590 		while (version[i])
591 			isp->isp_osinfo.seed += (int) version[i++];
592 		isp->isp_osinfo.seed <<= 8;
593 		isp->isp_osinfo.seed += (unit + 1);
594 	}
595 	(void) getenv_int("isp_debug", &isp_debug);
596 	ISP_LOCK(isp);
597 	isp_reset(isp);
598 	if (isp->isp_state != ISP_RESETSTATE) {
599 		(void) pci_unmap_int(cfid);
600 		ISP_UNLOCK(isp);
601 		free(pcs, M_DEVBUF);
602 		return;
603 	}
604 	isp_init(isp);
605 	if (isp->isp_state != ISP_INITSTATE) {
606 		/* If we're a Fibre Channel Card, we allow deferred attach */
607 		if (IS_SCSI(isp)) {
608 			isp_uninit(isp);
609 			(void) pci_unmap_int(cfid); /* Does nothing */
610 			ISP_UNLOCK(isp);
611 			free(pcs, M_DEVBUF);
612 			return;
613 		}
614 	}
615 	isp_attach(isp);
616 	if (isp->isp_state != ISP_RUNSTATE) {
617 		/* If we're a Fibre Channel Card, we allow deferred attach */
618 		if (IS_SCSI(isp)) {
619 			isp_uninit(isp);
620 			(void) pci_unmap_int(cfid); /* Does nothing */
621 			ISP_UNLOCK(isp);
622 			free(pcs, M_DEVBUF);
623 			return;
624 		}
625 	}
626 	ISP_UNLOCK(isp);
627 #ifdef __alpha__
628 	/*
629 	 * THIS SHOULD NOT HAVE TO BE HERE
630 	 */
631 	alpha_register_pci_scsi(cfid->bus, cfid->slot, isp->isp_sim);
632 #endif
633 }
634 
635 static u_int16_t
636 isp_pci_rd_reg(isp, regoff)
637 	struct ispsoftc *isp;
638 	int regoff;
639 {
640 	u_int16_t rv;
641 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
642 	int offset, oldconf = 0;
643 
644 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
645 		/*
646 		 * We will assume that someone has paused the RISC processor.
647 		 */
648 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
649 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
650 	}
651 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
652 	offset += (regoff & 0xff);
653 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
654 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
655 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
656 	}
657 	return (rv);
658 }
659 
660 static void
661 isp_pci_wr_reg(isp, regoff, val)
662 	struct ispsoftc *isp;
663 	int regoff;
664 	u_int16_t val;
665 {
666 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
667 	int offset, oldconf = 0;
668 
669 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
670 		/*
671 		 * We will assume that someone has paused the RISC processor.
672 		 */
673 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
674 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
675 	}
676 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
677 	offset += (regoff & 0xff);
678 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
679 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
680 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
681 	}
682 }
683 
684 #ifndef	ISP_DISABLE_1080_SUPPORT
685 static u_int16_t
686 isp_pci_rd_reg_1080(isp, regoff)
687 	struct ispsoftc *isp;
688 	int regoff;
689 {
690 	u_int16_t rv;
691 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
692 	int offset, oc = 0;
693 
694 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
695 		/*
696 		 * We will assume that someone has paused the RISC processor.
697 		 */
698 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
699 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
700 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
701 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
702 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
703 	}
704 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
705 	offset += (regoff & 0xff);
706 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
707 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
708 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
709 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
710 	}
711 	return (rv);
712 }
713 
714 static void
715 isp_pci_wr_reg_1080(isp, regoff, val)
716 	struct ispsoftc *isp;
717 	int regoff;
718 	u_int16_t val;
719 {
720 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
721 	int offset, oc = 0;
722 
723 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
724 		/*
725 		 * We will assume that someone has paused the RISC processor.
726 		 */
727 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
728 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
729 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
730 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
731 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
732 	}
733 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
734 	offset += (regoff & 0xff);
735 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
736 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
737 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
738 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
739 	}
740 }
741 #endif
742 
743 
744 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
745 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
746 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
747 
748 struct imush {
749 	struct ispsoftc *isp;
750 	int error;
751 };
752 
753 static void
754 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
755 {
756 	struct imush *imushp = (struct imush *) arg;
757 	if (error) {
758 		imushp->error = error;
759 	} else {
760 		imushp->isp->isp_rquest_dma = segs->ds_addr;
761 	}
762 }
763 
764 static void
765 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
766 {
767 	struct imush *imushp = (struct imush *) arg;
768 	if (error) {
769 		imushp->error = error;
770 	} else {
771 		imushp->isp->isp_result_dma = segs->ds_addr;
772 	}
773 }
774 
775 static void
776 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
777 {
778 	struct imush *imushp = (struct imush *) arg;
779 	if (error) {
780 		imushp->error = error;
781 	} else {
782 		fcparam *fcp = imushp->isp->isp_param;
783 		fcp->isp_scdma = segs->ds_addr;
784 	}
785 }
786 
787 static int
788 isp_pci_mbxdma(struct ispsoftc *isp)
789 {
790 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
791 	caddr_t base;
792 	u_int32_t len;
793 	int i, error;
794 	bus_size_t lim;
795 	struct imush im;
796 
797 
798 	/*
799 	 * Already been here? If so, leave...
800 	 */
801 	if (isp->isp_rquest) {
802 		return (0);
803 	}
804 
805 	len = sizeof (ISP_SCSI_XFER_T **) * isp->isp_maxcmds;
806 	isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK);
807 	if (isp->isp_xflist == NULL) {
808 		printf("%s: can't alloc xflist array\n", isp->isp_name);
809 		return (1);
810 	}
811 	bzero(isp->isp_xflist, len);
812 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
813 	pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
814 	if (pci->dmaps == NULL) {
815 		printf("%s: can't alloc dma maps\n", isp->isp_name);
816 		free(isp->isp_xflist, M_DEVBUF);
817 		return (1);
818 	}
819 
820 	if (IS_FC(isp) || IS_1080(isp) || IS_12X0(isp))
821 		lim = BUS_SPACE_MAXADDR + 1;
822 	else
823 		lim = BUS_SPACE_MAXADDR_24BIT + 1;
824 
825 	/*
826 	 * Allocate and map the request, result queues, plus FC scratch area.
827 	 */
828 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
829 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
830 	if (IS_FC(isp)) {
831 		len += ISP2100_SCRLEN;
832 	}
833 	if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
834 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
835 	    BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
836 		printf("%s: cannot create a dma tag for control spaces\n",
837 		    isp->isp_name);
838 		free(isp->isp_xflist, M_DEVBUF);
839 		free(pci->dmaps, M_DEVBUF);
840 		return (1);
841 	}
842 	if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
843 	    BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
844 		printf("%s: cannot allocate %d bytes of CCB memory\n",
845 		    isp->isp_name, len);
846 		free(isp->isp_xflist, M_DEVBUF);
847 		free(pci->dmaps, M_DEVBUF);
848 		return (1);
849 	}
850 
851 	isp->isp_rquest = base;
852 	im.isp = isp;
853 	im.error = 0;
854 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
855 	    ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, &im, 0);
856 	if (im.error) {
857 		printf("%s: error %d loading dma map for DMA request queue\n",
858 		    isp->isp_name, im.error);
859 		free(isp->isp_xflist, M_DEVBUF);
860 		free(pci->dmaps, M_DEVBUF);
861 		isp->isp_rquest = NULL;
862 		return (1);
863 	}
864 	isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
865 	im.error = 0;
866 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
867 	    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, &im, 0);
868 	if (im.error) {
869 		printf("%s: error %d loading dma map for DMA result queue\n",
870 		    isp->isp_name, im.error);
871 		free(isp->isp_xflist, M_DEVBUF);
872 		free(pci->dmaps, M_DEVBUF);
873 		isp->isp_rquest = NULL;
874 		return (1);
875 	}
876 
877 	for (i = 0; i < isp->isp_maxcmds; i++) {
878 		error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
879 		if (error) {
880 			printf("%s: error %d creating per-cmd DMA maps\n",
881 			    isp->isp_name, error);
882 			free(isp->isp_xflist, M_DEVBUF);
883 			free(pci->dmaps, M_DEVBUF);
884 			isp->isp_rquest = NULL;
885 			return (1);
886 		}
887 	}
888 
889 	if (IS_FC(isp)) {
890 		fcparam *fcp = (fcparam *) isp->isp_param;
891 		fcp->isp_scratch = base +
892 			ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN) +
893 			ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
894 		im.error = 0;
895 		bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
896 		    fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
897 		if (im.error) {
898 			printf("%s: error %d loading FC scratch area\n",
899 			    isp->isp_name, im.error);
900 			free(isp->isp_xflist, M_DEVBUF);
901 			free(pci->dmaps, M_DEVBUF);
902 			isp->isp_rquest = NULL;
903 			return (1);
904 		}
905 	}
906 	return (0);
907 }
908 
909 static void dma2 __P((void *, bus_dma_segment_t *, int, int));
910 typedef struct {
911 	struct ispsoftc *isp;
912 	ISP_SCSI_XFER_T *ccb;
913 	ispreq_t *rq;
914 	u_int8_t *iptrp;
915 	u_int8_t optr;
916 	u_int error;
917 } mush_t;
918 
919 #define	MUSHERR_NOQENTRIES	-2
920 
921 static void
922 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
923 {
924 	mush_t *mp;
925 	ISP_SCSI_XFER_T *ccb;
926 	struct ispsoftc *isp;
927 	struct isp_pcisoftc *pci;
928 	bus_dmamap_t *dp;
929 	bus_dma_segment_t *eseg;
930 	ispreq_t *rq;
931 	u_int8_t *iptrp;
932 	u_int8_t optr;
933 	ispcontreq_t *crq;
934 	int drq, seglim, datalen;
935 
936 	mp = (mush_t *) arg;
937 	if (error) {
938 		mp->error = error;
939 		return;
940 	}
941 
942 	isp = mp->isp;
943 	if (nseg < 1) {
944 		printf("%s: zero or negative segment count\n", isp->isp_name);
945 		mp->error = EFAULT;
946 		return;
947 	}
948 	ccb = mp->ccb;
949 	rq = mp->rq;
950 	iptrp = mp->iptrp;
951 	optr = mp->optr;
952 	pci = (struct isp_pcisoftc *)isp;
953 	dp = &pci->dmaps[rq->req_handle - 1];
954 
955 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
956 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
957 		drq = REQFLAG_DATA_IN;
958 	} else {
959 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
960 		drq = REQFLAG_DATA_OUT;
961 	}
962 
963 	datalen = XS_XFRLEN(ccb);
964 	if (IS_FC(isp)) {
965 		seglim = ISP_RQDSEG_T2;
966 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
967 		((ispreqt2_t *)rq)->req_flags |= drq;
968 	} else {
969 		seglim = ISP_RQDSEG;
970 		rq->req_flags |= drq;
971 	}
972 
973 	eseg = dm_segs + nseg;
974 
975 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
976 		if (IS_FC(isp)) {
977 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
978 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
979 			    dm_segs->ds_addr;
980 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
981 			    dm_segs->ds_len;
982 		} else {
983 			rq->req_dataseg[rq->req_seg_count].ds_base =
984 				dm_segs->ds_addr;
985 			rq->req_dataseg[rq->req_seg_count].ds_count =
986 				dm_segs->ds_len;
987 		}
988 		datalen -= dm_segs->ds_len;
989 #if	0
990 		if (IS_FC(isp)) {
991 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
992 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
993 			    isp->isp_name, rq->req_seg_count,
994 			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
995 			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
996 		} else {
997 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
998 			    isp->isp_name, rq->req_seg_count,
999 			    rq->req_dataseg[rq->req_seg_count].ds_count,
1000 			    rq->req_dataseg[rq->req_seg_count].ds_base);
1001 		}
1002 #endif
1003 		rq->req_seg_count++;
1004 		dm_segs++;
1005 	}
1006 
1007 	while (datalen > 0 && dm_segs != eseg) {
1008 		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
1009 		*iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
1010 		if (*iptrp == optr) {
1011 #if	0
1012 			printf("%s: Request Queue Overflow++\n", isp->isp_name);
1013 #endif
1014 			mp->error = MUSHERR_NOQENTRIES;
1015 			return;
1016 		}
1017 		rq->req_header.rqs_entry_count++;
1018 		bzero((void *)crq, sizeof (*crq));
1019 		crq->req_header.rqs_entry_count = 1;
1020 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1021 
1022 		seglim = 0;
1023 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1024 			crq->req_dataseg[seglim].ds_base =
1025 			    dm_segs->ds_addr;
1026 			crq->req_dataseg[seglim].ds_count =
1027 			    dm_segs->ds_len;
1028 #if	0
1029 			printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1030 			    isp->isp_name, rq->req_header.rqs_entry_count-1,
1031 			    seglim, crq->req_dataseg[seglim].ds_count,
1032 			    crq->req_dataseg[seglim].ds_base);
1033 #endif
1034 			rq->req_seg_count++;
1035 			dm_segs++;
1036 			seglim++;
1037 			datalen -= dm_segs->ds_len;
1038 		}
1039 	}
1040 }
1041 
1042 static int
1043 isp_pci_dmasetup(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb, ispreq_t *rq,
1044 	u_int8_t *iptrp, u_int8_t optr)
1045 {
1046 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1047 	struct ccb_hdr *ccb_h;
1048 	struct ccb_scsiio *csio;
1049 	bus_dmamap_t *dp = NULL;
1050 	mush_t mush, *mp;
1051 
1052 	csio = (struct ccb_scsiio *) ccb;
1053 	ccb_h = &csio->ccb_h;
1054 
1055 	if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
1056 		rq->req_seg_count = 1;
1057 		return (CMD_QUEUED);
1058 	}
1059 
1060 	/*
1061 	 * Do a virtual grapevine step to collect info for
1062 	 * the callback dma allocation that we have to use...
1063 	 */
1064 	mp = &mush;
1065 	mp->isp = isp;
1066 	mp->ccb = ccb;
1067 	mp->rq = rq;
1068 	mp->iptrp = iptrp;
1069 	mp->optr = optr;
1070 	mp->error = 0;
1071 
1072 	if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1073 		if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1074 			int error, s;
1075 			dp = &pci->dmaps[rq->req_handle - 1];
1076 			s = splsoftvm();
1077 			error = bus_dmamap_load(pci->parent_dmat, *dp,
1078 			    csio->data_ptr, csio->dxfer_len, dma2, mp, 0);
1079 			if (error == EINPROGRESS) {
1080 				bus_dmamap_unload(pci->parent_dmat, *dp);
1081 				mp->error = EINVAL;
1082 				printf("%s: deferred dma allocation not "
1083 				    "supported\n", isp->isp_name);
1084 			} else if (error && mp->error == 0) {
1085 #ifdef	DIAGNOSTIC
1086 				printf("%s: error %d in dma mapping code\n",
1087 				    isp->isp_name, error);
1088 #endif
1089 				mp->error = error;
1090 			}
1091 			splx(s);
1092 		} else {
1093 			/* Pointer to physical buffer */
1094 			struct bus_dma_segment seg;
1095 			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1096 			seg.ds_len = csio->dxfer_len;
1097 			dma2(mp, &seg, 1, 0);
1098 		}
1099 	} else {
1100 		struct bus_dma_segment *segs;
1101 
1102 		if ((ccb_h->flags & CAM_DATA_PHYS) != 0) {
1103 			printf("%s: Physical segment pointers unsupported",
1104 				isp->isp_name);
1105 			mp->error = EINVAL;
1106 		} else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) {
1107 			printf("%s: Virtual segment addresses unsupported",
1108 				isp->isp_name);
1109 			mp->error = EINVAL;
1110 		} else {
1111 			/* Just use the segments provided */
1112 			segs = (struct bus_dma_segment *) csio->data_ptr;
1113 			dma2(mp, segs, csio->sglist_cnt, 0);
1114 		}
1115 	}
1116 	if (mp->error) {
1117 		int retval = CMD_COMPLETE;
1118 		if (mp->error == MUSHERR_NOQENTRIES) {
1119 			retval = CMD_EAGAIN;
1120 		} else if (mp->error == EFBIG) {
1121 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1122 		} else if (mp->error == EINVAL) {
1123 			XS_SETERR(csio, CAM_REQ_INVALID);
1124 		} else {
1125 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1126 		}
1127 		return (retval);
1128 	} else {
1129 		/*
1130 		 * Check to see if we weren't cancelled while sleeping on
1131 		 * getting DMA resources...
1132 		 */
1133 		if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1134 			if (dp) {
1135 				bus_dmamap_unload(pci->parent_dmat, *dp);
1136 			}
1137 			return (CMD_COMPLETE);
1138 		}
1139 		return (CMD_QUEUED);
1140 	}
1141 }
1142 
1143 static void
1144 isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *xs, u_int32_t handle)
1145 {
1146 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1147 	bus_dmamap_t *dp = &pci->dmaps[handle - 1];
1148 	KASSERT((handle > 0 && handle <= isp->isp_maxcmds),
1149 	    ("bad handle in isp_pci_dmateardonw"));
1150 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1151 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1152 	} else {
1153 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1154 	}
1155 	bus_dmamap_unload(pci->parent_dmat, *dp);
1156 }
1157 
1158 
1159 static void
1160 isp_pci_reset1(struct ispsoftc *isp)
1161 {
1162 	/* Make sure the BIOS is disabled */
1163 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1164 }
1165 
1166 static void
1167 isp_pci_dumpregs(struct ispsoftc *isp)
1168 {
1169 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1170 	printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name,
1171 	    pci_conf_read(pci->pci_id, PCIR_COMMAND));
1172 }
1173