xref: /freebsd/sys/dev/isp/isp_pci.c (revision a14a0223ae1b172e96dd2a1d849e22026a98b692)
1 /* $FreeBSD$ */
2 /*
3  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
4  * FreeBSD Version.
5  *
6  *---------------------------------------
7  * Copyright (c) 1997, 1998, 1999 by Matthew Jacob
8  * NASA/Ames Research Center
9  * All rights reserved.
10  *---------------------------------------
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice immediately at the beginning of the file, without modification,
17  *    this list of conditions, and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
28  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 #include <dev/isp/isp_freebsd.h>
37 #include <dev/isp/asm_pci.h>
38 #include <sys/malloc.h>
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 
42 
43 #include <pci/pcireg.h>
44 #include <pci/pcivar.h>
45 
46 #include <machine/bus_memio.h>
47 #include <machine/bus_pio.h>
48 #include <machine/bus.h>
49 #include <machine/md_var.h>
50 
51 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
52 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
53 #ifndef ISP_DISABLE_1080_SUPPORT
54 static u_int16_t isp_pci_rd_reg_1080 __P((struct ispsoftc *, int));
55 static void isp_pci_wr_reg_1080 __P((struct ispsoftc *, int, u_int16_t));
56 #endif
57 static int isp_pci_mbxdma __P((struct ispsoftc *));
58 static int isp_pci_dmasetup __P((struct ispsoftc *, ISP_SCSI_XFER_T *,
59 	ispreq_t *, u_int8_t *, u_int8_t));
60 static void
61 isp_pci_dmateardown __P((struct ispsoftc *, ISP_SCSI_XFER_T *, u_int32_t));
62 
63 static void isp_pci_reset1 __P((struct ispsoftc *));
64 static void isp_pci_dumpregs __P((struct ispsoftc *));
65 
66 #ifndef	ISP_CODE_ORG
67 #define	ISP_CODE_ORG		0x1000
68 #endif
69 #ifndef	ISP_1040_RISC_CODE
70 #define	ISP_1040_RISC_CODE	NULL
71 #endif
72 #ifndef	ISP_1080_RISC_CODE
73 #define	ISP_1080_RISC_CODE	NULL
74 #endif
75 #ifndef	ISP_2100_RISC_CODE
76 #define	ISP_2100_RISC_CODE	NULL
77 #endif
78 #ifndef	ISP_2200_RISC_CODE
79 #define	ISP_2200_RISC_CODE	NULL
80 #endif
81 
82 #ifndef ISP_DISABLE_1020_SUPPORT
83 static struct ispmdvec mdvec = {
84 	isp_pci_rd_reg,
85 	isp_pci_wr_reg,
86 	isp_pci_mbxdma,
87 	isp_pci_dmasetup,
88 	isp_pci_dmateardown,
89 	NULL,
90 	isp_pci_reset1,
91 	isp_pci_dumpregs,
92 	ISP_1040_RISC_CODE,
93 	0,
94 	ISP_CODE_ORG,
95 	0,
96 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
97 	0
98 };
99 #endif
100 
101 #ifndef ISP_DISABLE_1080_SUPPORT
102 static struct ispmdvec mdvec_1080 = {
103 	isp_pci_rd_reg_1080,
104 	isp_pci_wr_reg_1080,
105 	isp_pci_mbxdma,
106 	isp_pci_dmasetup,
107 	isp_pci_dmateardown,
108 	NULL,
109 	isp_pci_reset1,
110 	isp_pci_dumpregs,
111 	ISP_1080_RISC_CODE,
112 	0,
113 	ISP_CODE_ORG,
114 	0,
115 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64,
116 	0
117 };
118 #endif
119 
120 #ifndef ISP_DISABLE_2100_SUPPORT
121 static struct ispmdvec mdvec_2100 = {
122 	isp_pci_rd_reg,
123 	isp_pci_wr_reg,
124 	isp_pci_mbxdma,
125 	isp_pci_dmasetup,
126 	isp_pci_dmateardown,
127 	NULL,
128 	isp_pci_reset1,
129 	isp_pci_dumpregs,
130 	ISP_2100_RISC_CODE,
131 	0,
132 	ISP_CODE_ORG,
133 	0,
134 	0,
135 	0
136 };
137 #endif
138 
139 #ifndef	ISP_DISABLE_2200_SUPPORT
140 static struct ispmdvec mdvec_2200 = {
141 	isp_pci_rd_reg,
142 	isp_pci_wr_reg,
143 	isp_pci_mbxdma,
144 	isp_pci_dmasetup,
145 	isp_pci_dmateardown,
146 	NULL,
147 	isp_pci_reset1,
148 	isp_pci_dumpregs,
149 	ISP_2200_RISC_CODE,
150 	0,
151 	ISP_CODE_ORG,
152 	0,
153 	0,
154 	0
155 };
156 #endif
157 
158 #ifndef	SCSI_ISP_PREFER_MEM_MAP
159 #define	SCSI_ISP_PREFER_MEM_MAP	0
160 #endif
161 
162 #ifndef	PCIM_CMD_INVEN
163 #define	PCIM_CMD_INVEN			0x10
164 #endif
165 #ifndef	PCIM_CMD_BUSMASTEREN
166 #define	PCIM_CMD_BUSMASTEREN		0x0004
167 #endif
168 #ifndef	PCIM_CMD_PERRESPEN
169 #define	PCIM_CMD_PERRESPEN		0x0040
170 #endif
171 #ifndef	PCIM_CMD_SEREN
172 #define	PCIM_CMD_SEREN			0x0100
173 #endif
174 
175 #ifndef	PCIR_COMMAND
176 #define	PCIR_COMMAND			0x04
177 #endif
178 
179 #ifndef	PCIR_CACHELNSZ
180 #define	PCIR_CACHELNSZ			0x0c
181 #endif
182 
183 #ifndef	PCIR_LATTIMER
184 #define	PCIR_LATTIMER			0x0d
185 #endif
186 
187 #ifndef	PCIR_ROMADDR
188 #define	PCIR_ROMADDR			0x30
189 #endif
190 
191 #ifndef	PCI_VENDOR_QLOGIC
192 #define	PCI_VENDOR_QLOGIC	0x1077
193 #endif
194 
195 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
196 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
197 #endif
198 
199 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
200 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
201 #endif
202 
203 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
204 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
205 #endif
206 
207 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
208 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
209 #endif
210 
211 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
212 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
213 #endif
214 
215 #define	PCI_QLOGIC_ISP	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
216 
217 #define	PCI_QLOGIC_ISP1080	\
218 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
219 
220 #define	PCI_QLOGIC_ISP1240	\
221 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
222 
223 #define	PCI_QLOGIC_ISP2100	\
224 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
225 
226 #define	PCI_QLOGIC_ISP2200	\
227 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
228 
229 #define	IO_MAP_REG	0x10
230 #define	MEM_MAP_REG	0x14
231 
232 #define	PCI_DFLT_LTNCY	0x40
233 #define	PCI_DFLT_LNSZ	0x10
234 
235 static const char *isp_pci_probe __P((pcici_t tag, pcidi_t type));
236 static void isp_pci_attach __P((pcici_t config_d, int unit));
237 
238 /* This distinguishing define is not right, but it does work */
239 #ifdef __alpha__
240 #define IO_SPACE_MAPPING	ALPHA_BUS_SPACE_IO
241 #define MEM_SPACE_MAPPING	ALPHA_BUS_SPACE_MEM
242 #else
243 #define IO_SPACE_MAPPING	I386_BUS_SPACE_IO
244 #define MEM_SPACE_MAPPING	I386_BUS_SPACE_MEM
245 #endif
246 
247 struct isp_pcisoftc {
248 	struct ispsoftc			pci_isp;
249         pcici_t				pci_id;
250 	bus_space_tag_t			pci_st;
251 	bus_space_handle_t		pci_sh;
252 	int16_t				pci_poff[_NREG_BLKS];
253 	bus_dma_tag_t			parent_dmat;
254 	bus_dma_tag_t			cntrol_dmat;
255 	bus_dmamap_t			cntrol_dmap;
256 	bus_dmamap_t			*dmaps;
257 };
258 
259 static u_long ispunit;
260 
261 static struct pci_device isp_pci_driver = {
262 	"isp",
263 	isp_pci_probe,
264 	isp_pci_attach,
265 	&ispunit,
266 	NULL
267 };
268 COMPAT_PCI_DRIVER (isp_pci, isp_pci_driver);
269 
270 
271 static const char *
272 isp_pci_probe(pcici_t tag, pcidi_t type)
273 {
274 	static int oneshot = 1;
275 	char *x;
276 
277         switch (type) {
278 #ifndef	ISP_DISABLE_1020_SUPPORT
279 	case PCI_QLOGIC_ISP:
280 		x = "Qlogic ISP 1020/1040 PCI SCSI Adapter";
281 		break;
282 #endif
283 #ifndef	ISP_DISABLE_1080_SUPPORT
284 	case PCI_QLOGIC_ISP1080:
285 		x = "Qlogic ISP 1080 PCI SCSI Adapter";
286 		break;
287 	case PCI_QLOGIC_ISP1240:
288 		x = "Qlogic ISP 1240 PCI SCSI Adapter";
289 		break;
290 #endif
291 #ifndef	ISP_DISABLE_2100_SUPPORT
292 	case PCI_QLOGIC_ISP2100:
293 		x = "Qlogic ISP 2100 PCI FC-AL Adapter";
294 		break;
295 #endif
296 #ifndef	ISP_DISABLE_2200_SUPPORT
297 	case PCI_QLOGIC_ISP2200:
298 		x = "Qlogic ISP 2200 PCI FC-AL Adapter";
299 		break;
300 #endif
301 	default:
302 		return (NULL);
303 	}
304 	if (oneshot) {
305 		oneshot = 0;
306 		CFGPRINTF("Qlogic ISP Driver, FreeBSD Version %d.%d, "
307 		    "Core Version %d.%d\n",
308 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
309 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
310 	}
311 	return (x);
312 }
313 
314 static void
315 isp_pci_attach(pcici_t cfid, int unit)
316 {
317 	int mapped, prefer_mem_map, bitmap;
318 	pci_port_t io_port;
319 	u_int32_t data, linesz, psize, basetype;
320 	struct isp_pcisoftc *pcs;
321 	struct ispsoftc *isp;
322 	vm_offset_t vaddr, paddr;
323 	struct ispmdvec *mdvp;
324 	bus_size_t lim;
325 	ISP_LOCKVAL_DECL;
326 
327 	pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT);
328 	if (pcs == NULL) {
329 		printf("isp%d: cannot allocate softc\n", unit);
330 		return;
331 	}
332 	bzero(pcs, sizeof (struct isp_pcisoftc));
333 
334 	/*
335 	 * Figure out if we're supposed to skip this one.
336 	 */
337 	if (getenv_int("isp_disable", &bitmap)) {
338 		if (bitmap & (1 << unit)) {
339 			printf("isp%d: not configuring\n", unit);
340 			return;
341 		}
342 	}
343 
344 	/*
345 	 * Figure out which we should try first - memory mapping or i/o mapping?
346 	 */
347 #if	SCSI_ISP_PREFER_MEM_MAP == 1
348 	prefer_mem_map = 1;
349 #else
350 	prefer_mem_map = 0;
351 #endif
352 	bitmap = 0;
353 	if (getenv_int("isp_mem_map", &bitmap)) {
354 		if (bitmap & (1 << unit))
355 			prefer_mem_map = 1;
356 	}
357 	bitmap = 0;
358 	if (getenv_int("isp_io_map", &bitmap)) {
359 		if (bitmap & (1 << unit))
360 			prefer_mem_map = 0;
361 	}
362 
363 	vaddr = paddr = NULL;
364 	mapped = 0;
365 	linesz = PCI_DFLT_LNSZ;
366 	/*
367 	 * Note that pci_conf_read is a 32 bit word aligned function.
368 	 */
369 	data = pci_conf_read(cfid, PCIR_COMMAND);
370 	if (prefer_mem_map) {
371 		if (data & PCI_COMMAND_MEM_ENABLE) {
372 			if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
373 				pcs->pci_st = MEM_SPACE_MAPPING;
374 				pcs->pci_sh = vaddr;
375 				mapped++;
376 			}
377 		}
378 		if (mapped == 0 && (data & PCI_COMMAND_IO_ENABLE)) {
379 			if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
380 				pcs->pci_st = IO_SPACE_MAPPING;
381 				pcs->pci_sh = io_port;
382 				mapped++;
383 			}
384 		}
385 	} else {
386 		if (data & PCI_COMMAND_IO_ENABLE) {
387 			if (pci_map_port(cfid, PCI_MAP_REG_START, &io_port)) {
388 				pcs->pci_st = IO_SPACE_MAPPING;
389 				pcs->pci_sh = io_port;
390 				mapped++;
391 			}
392 		}
393 		if (mapped == 0 && (data & PCI_COMMAND_MEM_ENABLE)) {
394 			if (pci_map_mem(cfid, MEM_MAP_REG, &vaddr, &paddr)) {
395 				pcs->pci_st = MEM_SPACE_MAPPING;
396 				pcs->pci_sh = vaddr;
397 				mapped++;
398 			}
399 		}
400 	}
401 	if (mapped == 0) {
402 		printf("isp%d: unable to map any ports!\n", unit);
403 		free(pcs, M_DEVBUF);
404 		return;
405 	}
406 	if (bootverbose)
407 		printf("isp%d: using %s space register mapping\n", unit,
408 		    pcs->pci_st == IO_SPACE_MAPPING? "I/O" : "Memory");
409 
410 	data = pci_conf_read(cfid, PCI_ID_REG);
411 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
412 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
413 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
414 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
415 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
416 	/*
417  	 * GCC!
418 	 */
419 	mdvp = &mdvec;
420 	basetype = ISP_HA_SCSI_UNKNOWN;
421 	psize = sizeof (sdparam);
422 	lim = BUS_SPACE_MAXSIZE_32BIT;
423 #ifndef	ISP_DISABLE_1020_SUPPORT
424 	if (data == PCI_QLOGIC_ISP) {
425 		mdvp = &mdvec;
426 		basetype = ISP_HA_SCSI_UNKNOWN;
427 		psize = sizeof (sdparam);
428 		lim = BUS_SPACE_MAXSIZE_24BIT;
429 	}
430 #endif
431 #ifndef	ISP_DISABLE_1080_SUPPORT
432 	if (data == PCI_QLOGIC_ISP1080) {
433 		mdvp = &mdvec_1080;
434 		basetype = ISP_HA_SCSI_1080;
435 		psize = sizeof (sdparam);
436 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
437 		    ISP1080_DMA_REGS_OFF;
438 	}
439 	if (data == PCI_QLOGIC_ISP1240) {
440 		mdvp = &mdvec_1080;
441 		basetype = ISP_HA_SCSI_12X0;
442 		psize = 2 * sizeof (sdparam);
443 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
444 		    ISP1080_DMA_REGS_OFF;
445 	}
446 #endif
447 #ifndef	ISP_DISABLE_2100_SUPPORT
448 	if (data == PCI_QLOGIC_ISP2100) {
449 		mdvp = &mdvec_2100;
450 		basetype = ISP_HA_FC_2100;
451 		psize = sizeof (fcparam);
452 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
453 		    PCI_MBOX_REGS2100_OFF;
454 		data = pci_conf_read(cfid, PCI_CLASS_REG);
455 		if ((data & 0xff) < 3) {
456 			/*
457 			 * XXX: Need to get the actual revision
458 			 * XXX: number of the 2100 FB. At any rate,
459 			 * XXX: lower cache line size for early revision
460 			 * XXX; boards.
461 			 */
462 			linesz = 1;
463 		}
464 	}
465 #endif
466 #ifndef	ISP_DISABLE_2200_SUPPORT
467 	if (data == PCI_QLOGIC_ISP2200) {
468 		mdvp = &mdvec_2200;
469 		basetype = ISP_HA_FC_2200;
470 		psize = sizeof (fcparam);
471 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
472 		    PCI_MBOX_REGS2100_OFF;
473 	}
474 #endif
475 	isp = &pcs->pci_isp;
476 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT);
477 	if (isp->isp_param == NULL) {
478 		printf("isp%d: cannot allocate parameter data\n", unit);
479 		return;
480 	}
481 	bzero(isp->isp_param, psize);
482 	isp->isp_mdvec = mdvp;
483 	isp->isp_type = basetype;
484 	(void) snprintf(isp->isp_name, sizeof (isp->isp_name), "isp%d", unit);
485 	isp->isp_osinfo.unit = unit;
486 
487 	ISP_LOCK(isp);
488 
489 	/*
490 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
491 	 * are set.
492 	 */
493 	data = pci_cfgread(cfid, PCIR_COMMAND, 2);
494 	data |=	PCIM_CMD_SEREN		|
495 		PCIM_CMD_PERRESPEN	|
496 		PCIM_CMD_BUSMASTEREN	|
497 		PCIM_CMD_INVEN;
498 	pci_cfgwrite(cfid, PCIR_COMMAND, 2, data);
499 
500 	/*
501 	 * Make sure the Cache Line Size register is set sensibly.
502 	 */
503 	data = pci_cfgread(cfid, PCIR_CACHELNSZ, 1);
504 	if (data != linesz) {
505 		data = PCI_DFLT_LNSZ;
506 		CFGPRINTF("%s: set PCI line size to %d\n", isp->isp_name, data);
507 		pci_cfgwrite(cfid, PCIR_CACHELNSZ, data, 1);
508 	}
509 
510 	/*
511 	 * Make sure the Latency Timer is sane.
512 	 */
513 	data = pci_cfgread(cfid, PCIR_LATTIMER, 1);
514 	if (data < PCI_DFLT_LTNCY) {
515 		data = PCI_DFLT_LTNCY;
516 		CFGPRINTF("%s: set PCI latency to %d\n", isp->isp_name, data);
517 		pci_cfgwrite(cfid, PCIR_LATTIMER, data, 1);
518 	}
519 
520 	/*
521 	 * Make sure we've disabled the ROM.
522 	 */
523 	data = pci_cfgread(cfid, PCIR_ROMADDR, 4);
524 	data &= ~1;
525 	pci_cfgwrite(cfid, PCIR_ROMADDR, data, 4);
526 	ISP_UNLOCK(isp);
527 
528 	if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
529 	    BUS_SPACE_MAXADDR, NULL, NULL, lim + 1,
530 	    255, lim, 0, &pcs->parent_dmat) != 0) {
531 		printf("%s: could not create master dma tag\n", isp->isp_name);
532 		free(pcs, M_DEVBUF);
533 		return;
534 	}
535 	if (pci_map_int(cfid, (void (*)(void *))isp_intr,
536 	    (void *)isp, &IMASK) == 0) {
537 		printf("%s: could not map interrupt\n", isp->isp_name);
538 		free(pcs, M_DEVBUF);
539 		return;
540 	}
541 
542 	pcs->pci_id = cfid;
543 #ifdef	SCSI_ISP_NO_FWLOAD_MASK
544 	if (SCSI_ISP_NO_FWLOAD_MASK && (SCSI_ISP_NO_FWLOAD_MASK & (1 << unit)))
545 		isp->isp_confopts |= ISP_CFG_NORELOAD;
546 #endif
547 	if (getenv_int("isp_no_fwload", &bitmap)) {
548 		if (bitmap & (1 << unit))
549 			isp->isp_confopts |= ISP_CFG_NORELOAD;
550 	}
551 	if (getenv_int("isp_fwload", &bitmap)) {
552 		if (bitmap & (1 << unit))
553 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
554 	}
555 
556 #ifdef	SCSI_ISP_NO_NVRAM_MASK
557 	if (SCSI_ISP_NO_NVRAM_MASK && (SCSI_ISP_NO_NVRAM_MASK & (1 << unit))) {
558 		printf("%s: ignoring NVRAM\n", isp->isp_name);
559 		isp->isp_confopts |= ISP_CFG_NONVRAM;
560 	}
561 #endif
562 	if (getenv_int("isp_no_nvram", &bitmap)) {
563 		if (bitmap & (1 << unit))
564 			isp->isp_confopts |= ISP_CFG_NONVRAM;
565 	}
566 	if (getenv_int("isp_nvram", &bitmap)) {
567 		if (bitmap & (1 << unit))
568 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
569 	}
570 
571 #ifdef	SCSI_ISP_FCDUPLEX
572 	if (IS_FC(isp)) {
573 		if (SCSI_ISP_FCDUPLEX && (SCSI_ISP_FCDUPLEX & (1 << unit))) {
574 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
575 		}
576 	}
577 #endif
578 	if (getenv_int("isp_fcduplex", &bitmap)) {
579 		if (bitmap & (1 << unit))
580 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
581 	}
582 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
583 		if (bitmap & (1 << unit))
584 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
585 	}
586 
587 	if (getenv_int("isp_seed", &isp->isp_osinfo.seed)) {
588 		isp->isp_osinfo.seed <<= 8;
589 		isp->isp_osinfo.seed += (unit + 1);
590 	} else {
591 		/*
592 		 * poor man's attempt at pseudo randomness.
593 		 */
594 		long i = (intptr_t) isp;
595 
596 		i >>= 5;
597 		i &= 0x7;
598 
599 		/*
600 		 * This isn't very random, but it's the best we can do for
601 		 * the real edge case of cards that don't have WWNs.
602 		 */
603 		isp->isp_osinfo.seed += ((int) cfid->bus) << 16;
604 		isp->isp_osinfo.seed += ((int) cfid->slot) << 8;
605 		isp->isp_osinfo.seed += ((int) cfid->func);
606 		while (version[i])
607 			isp->isp_osinfo.seed += (int) version[i++];
608 		isp->isp_osinfo.seed <<= 8;
609 		isp->isp_osinfo.seed += (unit + 1);
610 	}
611 	(void) getenv_int("isp_debug", &isp_debug);
612 	ISP_LOCK(isp);
613 	isp_reset(isp);
614 	if (isp->isp_state != ISP_RESETSTATE) {
615 		(void) pci_unmap_int(cfid);
616 		ISP_UNLOCK(isp);
617 		free(pcs, M_DEVBUF);
618 		return;
619 	}
620 	isp_init(isp);
621 	if (isp->isp_state != ISP_INITSTATE) {
622 		/* If we're a Fibre Channel Card, we allow deferred attach */
623 		if (IS_SCSI(isp)) {
624 			isp_uninit(isp);
625 			(void) pci_unmap_int(cfid); /* Does nothing */
626 			ISP_UNLOCK(isp);
627 			free(pcs, M_DEVBUF);
628 			return;
629 		}
630 	}
631 	isp_attach(isp);
632 	if (isp->isp_state != ISP_RUNSTATE) {
633 		/* If we're a Fibre Channel Card, we allow deferred attach */
634 		if (IS_SCSI(isp)) {
635 			isp_uninit(isp);
636 			(void) pci_unmap_int(cfid); /* Does nothing */
637 			ISP_UNLOCK(isp);
638 			free(pcs, M_DEVBUF);
639 			return;
640 		}
641 	}
642 	ISP_UNLOCK(isp);
643 }
644 
645 static u_int16_t
646 isp_pci_rd_reg(isp, regoff)
647 	struct ispsoftc *isp;
648 	int regoff;
649 {
650 	u_int16_t rv;
651 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
652 	int offset, oldconf = 0;
653 
654 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
655 		/*
656 		 * We will assume that someone has paused the RISC processor.
657 		 */
658 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
659 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
660 	}
661 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
662 	offset += (regoff & 0xff);
663 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
664 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
665 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
666 	}
667 	return (rv);
668 }
669 
670 static void
671 isp_pci_wr_reg(isp, regoff, val)
672 	struct ispsoftc *isp;
673 	int regoff;
674 	u_int16_t val;
675 {
676 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
677 	int offset, oldconf = 0;
678 
679 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
680 		/*
681 		 * We will assume that someone has paused the RISC processor.
682 		 */
683 		oldconf = isp_pci_rd_reg(isp, BIU_CONF1);
684 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf | BIU_PCI_CONF1_SXP);
685 	}
686 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
687 	offset += (regoff & 0xff);
688 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
689 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
690 		isp_pci_wr_reg(isp, BIU_CONF1, oldconf);
691 	}
692 }
693 
694 #ifndef	ISP_DISABLE_1080_SUPPORT
695 static u_int16_t
696 isp_pci_rd_reg_1080(isp, regoff)
697 	struct ispsoftc *isp;
698 	int regoff;
699 {
700 	u_int16_t rv;
701 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
702 	int offset, oc = 0;
703 
704 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
705 		/*
706 		 * We will assume that someone has paused the RISC processor.
707 		 */
708 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
709 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
710 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
711 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
712 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
713 	}
714 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
715 	offset += (regoff & 0xff);
716 	rv = bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
717 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
718 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
719 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
720 	}
721 	return (rv);
722 }
723 
724 static void
725 isp_pci_wr_reg_1080(isp, regoff, val)
726 	struct ispsoftc *isp;
727 	int regoff;
728 	u_int16_t val;
729 {
730 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
731 	int offset, oc = 0;
732 
733 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
734 		/*
735 		 * We will assume that someone has paused the RISC processor.
736 		 */
737 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
738 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_SXP);
739 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
740 		oc = isp_pci_rd_reg(isp, BIU_CONF1);
741 		isp_pci_wr_reg(isp, BIU_CONF1, oc | BIU_PCI1080_CONF1_DMA);
742 	}
743 	offset = pcs->pci_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT];
744 	offset += (regoff & 0xff);
745 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
746 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
747 	    ((regoff & _BLK_REG_MASK) == DMA_BLOCK)) {
748 		isp_pci_wr_reg(isp, BIU_CONF1, oc);
749 	}
750 }
751 #endif
752 
753 
754 static void isp_map_rquest __P((void *, bus_dma_segment_t *, int, int));
755 static void isp_map_result __P((void *, bus_dma_segment_t *, int, int));
756 static void isp_map_fcscrt __P((void *, bus_dma_segment_t *, int, int));
757 
758 struct imush {
759 	struct ispsoftc *isp;
760 	int error;
761 };
762 
763 static void
764 isp_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
765 {
766 	struct imush *imushp = (struct imush *) arg;
767 	if (error) {
768 		imushp->error = error;
769 	} else {
770 		imushp->isp->isp_rquest_dma = segs->ds_addr;
771 	}
772 }
773 
774 static void
775 isp_map_result(void *arg, bus_dma_segment_t *segs, int nseg, int error)
776 {
777 	struct imush *imushp = (struct imush *) arg;
778 	if (error) {
779 		imushp->error = error;
780 	} else {
781 		imushp->isp->isp_result_dma = segs->ds_addr;
782 	}
783 }
784 
785 static void
786 isp_map_fcscrt(void *arg, bus_dma_segment_t *segs, int nseg, int error)
787 {
788 	struct imush *imushp = (struct imush *) arg;
789 	if (error) {
790 		imushp->error = error;
791 	} else {
792 		fcparam *fcp = imushp->isp->isp_param;
793 		fcp->isp_scdma = segs->ds_addr;
794 	}
795 }
796 
797 static int
798 isp_pci_mbxdma(struct ispsoftc *isp)
799 {
800 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
801 	caddr_t base;
802 	u_int32_t len;
803 	int i, error;
804 	bus_size_t lim;
805 	struct imush im;
806 
807 
808 	/*
809 	 * Already been here? If so, leave...
810 	 */
811 	if (isp->isp_rquest) {
812 		return (0);
813 	}
814 
815 	len = sizeof (ISP_SCSI_XFER_T **) * isp->isp_maxcmds;
816 	isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(len, M_DEVBUF, M_WAITOK);
817 	if (isp->isp_xflist == NULL) {
818 		printf("%s: can't alloc xflist array\n", isp->isp_name);
819 		return (1);
820 	}
821 	bzero(isp->isp_xflist, len);
822 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
823 	pci->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
824 	if (pci->dmaps == NULL) {
825 		printf("%s: can't alloc dma maps\n", isp->isp_name);
826 		free(isp->isp_xflist, M_DEVBUF);
827 		return (1);
828 	}
829 
830 	if (IS_FC(isp) || IS_1080(isp) || IS_12X0(isp))
831 		lim = BUS_SPACE_MAXADDR + 1;
832 	else
833 		lim = BUS_SPACE_MAXADDR_24BIT + 1;
834 
835 	/*
836 	 * Allocate and map the request, result queues, plus FC scratch area.
837 	 */
838 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
839 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
840 	if (IS_FC(isp)) {
841 		len += ISP2100_SCRLEN;
842 	}
843 	if (bus_dma_tag_create(pci->parent_dmat, PAGE_SIZE, lim,
844 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
845 	    BUS_SPACE_MAXSIZE_32BIT, 0, &pci->cntrol_dmat) != 0) {
846 		printf("%s: cannot create a dma tag for control spaces\n",
847 		    isp->isp_name);
848 		free(isp->isp_xflist, M_DEVBUF);
849 		free(pci->dmaps, M_DEVBUF);
850 		return (1);
851 	}
852 	if (bus_dmamem_alloc(pci->cntrol_dmat, (void **)&base,
853 	    BUS_DMA_NOWAIT, &pci->cntrol_dmap) != 0) {
854 		printf("%s: cannot allocate %d bytes of CCB memory\n",
855 		    isp->isp_name, len);
856 		free(isp->isp_xflist, M_DEVBUF);
857 		free(pci->dmaps, M_DEVBUF);
858 		return (1);
859 	}
860 
861 	isp->isp_rquest = base;
862 	im.isp = isp;
863 	im.error = 0;
864 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_rquest,
865 	    ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN), isp_map_rquest, &im, 0);
866 	if (im.error) {
867 		printf("%s: error %d loading dma map for DMA request queue\n",
868 		    isp->isp_name, im.error);
869 		free(isp->isp_xflist, M_DEVBUF);
870 		free(pci->dmaps, M_DEVBUF);
871 		isp->isp_rquest = NULL;
872 		return (1);
873 	}
874 	isp->isp_result = base + ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
875 	im.error = 0;
876 	bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap, isp->isp_result,
877 	    ISP_QUEUE_SIZE(RESULT_QUEUE_LEN), isp_map_result, &im, 0);
878 	if (im.error) {
879 		printf("%s: error %d loading dma map for DMA result queue\n",
880 		    isp->isp_name, im.error);
881 		free(isp->isp_xflist, M_DEVBUF);
882 		free(pci->dmaps, M_DEVBUF);
883 		isp->isp_rquest = NULL;
884 		return (1);
885 	}
886 
887 	for (i = 0; i < isp->isp_maxcmds; i++) {
888 		error = bus_dmamap_create(pci->parent_dmat, 0, &pci->dmaps[i]);
889 		if (error) {
890 			printf("%s: error %d creating per-cmd DMA maps\n",
891 			    isp->isp_name, error);
892 			free(isp->isp_xflist, M_DEVBUF);
893 			free(pci->dmaps, M_DEVBUF);
894 			isp->isp_rquest = NULL;
895 			return (1);
896 		}
897 	}
898 
899 	if (IS_FC(isp)) {
900 		fcparam *fcp = (fcparam *) isp->isp_param;
901 		fcp->isp_scratch = base +
902 			ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN) +
903 			ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
904 		im.error = 0;
905 		bus_dmamap_load(pci->cntrol_dmat, pci->cntrol_dmap,
906 		    fcp->isp_scratch, ISP2100_SCRLEN, isp_map_fcscrt, &im, 0);
907 		if (im.error) {
908 			printf("%s: error %d loading FC scratch area\n",
909 			    isp->isp_name, im.error);
910 			free(isp->isp_xflist, M_DEVBUF);
911 			free(pci->dmaps, M_DEVBUF);
912 			isp->isp_rquest = NULL;
913 			return (1);
914 		}
915 	}
916 	return (0);
917 }
918 
919 static void dma2 __P((void *, bus_dma_segment_t *, int, int));
920 typedef struct {
921 	struct ispsoftc *isp;
922 	ISP_SCSI_XFER_T *ccb;
923 	ispreq_t *rq;
924 	u_int8_t *iptrp;
925 	u_int8_t optr;
926 	u_int error;
927 } mush_t;
928 
929 #define	MUSHERR_NOQENTRIES	-2
930 
931 static void
932 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
933 {
934 	mush_t *mp;
935 	ISP_SCSI_XFER_T *ccb;
936 	struct ispsoftc *isp;
937 	struct isp_pcisoftc *pci;
938 	bus_dmamap_t *dp;
939 	bus_dma_segment_t *eseg;
940 	ispreq_t *rq;
941 	u_int8_t *iptrp;
942 	u_int8_t optr;
943 	ispcontreq_t *crq;
944 	int drq, seglim, datalen;
945 
946 	mp = (mush_t *) arg;
947 	if (error) {
948 		mp->error = error;
949 		return;
950 	}
951 
952 	isp = mp->isp;
953 	if (nseg < 1) {
954 		printf("%s: zero or negative segment count\n", isp->isp_name);
955 		mp->error = EFAULT;
956 		return;
957 	}
958 	ccb = mp->ccb;
959 	rq = mp->rq;
960 	iptrp = mp->iptrp;
961 	optr = mp->optr;
962 	pci = (struct isp_pcisoftc *)isp;
963 	dp = &pci->dmaps[rq->req_handle - 1];
964 
965 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
966 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREREAD);
967 		drq = REQFLAG_DATA_IN;
968 	} else {
969 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_PREWRITE);
970 		drq = REQFLAG_DATA_OUT;
971 	}
972 
973 	datalen = XS_XFRLEN(ccb);
974 	if (IS_FC(isp)) {
975 		seglim = ISP_RQDSEG_T2;
976 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
977 		((ispreqt2_t *)rq)->req_flags |= drq;
978 	} else {
979 		seglim = ISP_RQDSEG;
980 		rq->req_flags |= drq;
981 	}
982 
983 	eseg = dm_segs + nseg;
984 
985 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
986 		if (IS_FC(isp)) {
987 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
988 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
989 			    dm_segs->ds_addr;
990 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
991 			    dm_segs->ds_len;
992 		} else {
993 			rq->req_dataseg[rq->req_seg_count].ds_base =
994 				dm_segs->ds_addr;
995 			rq->req_dataseg[rq->req_seg_count].ds_count =
996 				dm_segs->ds_len;
997 		}
998 		datalen -= dm_segs->ds_len;
999 #if	0
1000 		if (IS_FC(isp)) {
1001 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
1002 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1003 			    isp->isp_name, rq->req_seg_count,
1004 			    rq2->req_dataseg[rq2->req_seg_count].ds_count,
1005 			    rq2->req_dataseg[rq2->req_seg_count].ds_base);
1006 		} else {
1007 			printf("%s: seg0[%d] cnt 0x%x paddr 0x%08x\n",
1008 			    isp->isp_name, rq->req_seg_count,
1009 			    rq->req_dataseg[rq->req_seg_count].ds_count,
1010 			    rq->req_dataseg[rq->req_seg_count].ds_base);
1011 		}
1012 #endif
1013 		rq->req_seg_count++;
1014 		dm_segs++;
1015 	}
1016 
1017 	while (datalen > 0 && dm_segs != eseg) {
1018 		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, *iptrp);
1019 		*iptrp = ISP_NXT_QENTRY(*iptrp, RQUEST_QUEUE_LEN);
1020 		if (*iptrp == optr) {
1021 #if	0
1022 			printf("%s: Request Queue Overflow++\n", isp->isp_name);
1023 #endif
1024 			mp->error = MUSHERR_NOQENTRIES;
1025 			return;
1026 		}
1027 		rq->req_header.rqs_entry_count++;
1028 		bzero((void *)crq, sizeof (*crq));
1029 		crq->req_header.rqs_entry_count = 1;
1030 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1031 
1032 		seglim = 0;
1033 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
1034 			crq->req_dataseg[seglim].ds_base =
1035 			    dm_segs->ds_addr;
1036 			crq->req_dataseg[seglim].ds_count =
1037 			    dm_segs->ds_len;
1038 #if	0
1039 			printf("%s: seg%d[%d] cnt 0x%x paddr 0x%08x\n",
1040 			    isp->isp_name, rq->req_header.rqs_entry_count-1,
1041 			    seglim, crq->req_dataseg[seglim].ds_count,
1042 			    crq->req_dataseg[seglim].ds_base);
1043 #endif
1044 			rq->req_seg_count++;
1045 			dm_segs++;
1046 			seglim++;
1047 			datalen -= dm_segs->ds_len;
1048 		}
1049 	}
1050 }
1051 
1052 static int
1053 isp_pci_dmasetup(struct ispsoftc *isp, ISP_SCSI_XFER_T *ccb, ispreq_t *rq,
1054 	u_int8_t *iptrp, u_int8_t optr)
1055 {
1056 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1057 	struct ccb_hdr *ccb_h;
1058 	struct ccb_scsiio *csio;
1059 	bus_dmamap_t *dp = NULL;
1060 	mush_t mush, *mp;
1061 
1062 	csio = (struct ccb_scsiio *) ccb;
1063 	ccb_h = &csio->ccb_h;
1064 
1065 	if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_NONE) {
1066 		rq->req_seg_count = 1;
1067 		return (CMD_QUEUED);
1068 	}
1069 
1070 	/*
1071 	 * Do a virtual grapevine step to collect info for
1072 	 * the callback dma allocation that we have to use...
1073 	 */
1074 	mp = &mush;
1075 	mp->isp = isp;
1076 	mp->ccb = ccb;
1077 	mp->rq = rq;
1078 	mp->iptrp = iptrp;
1079 	mp->optr = optr;
1080 	mp->error = 0;
1081 
1082 	if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) {
1083 		if ((ccb_h->flags & CAM_DATA_PHYS) == 0) {
1084 			int error, s;
1085 			dp = &pci->dmaps[rq->req_handle - 1];
1086 			s = splsoftvm();
1087 			error = bus_dmamap_load(pci->parent_dmat, *dp,
1088 			    csio->data_ptr, csio->dxfer_len, dma2, mp, 0);
1089 			if (error == EINPROGRESS) {
1090 				bus_dmamap_unload(pci->parent_dmat, *dp);
1091 				mp->error = EINVAL;
1092 				printf("%s: deferred dma allocation not "
1093 				    "supported\n", isp->isp_name);
1094 			} else if (error && mp->error == 0) {
1095 #ifdef	DIAGNOSTIC
1096 				printf("%s: error %d in dma mapping code\n",
1097 				    isp->isp_name, error);
1098 #endif
1099 				mp->error = error;
1100 			}
1101 			splx(s);
1102 		} else {
1103 			/* Pointer to physical buffer */
1104 			struct bus_dma_segment seg;
1105 			seg.ds_addr = (bus_addr_t)csio->data_ptr;
1106 			seg.ds_len = csio->dxfer_len;
1107 			dma2(mp, &seg, 1, 0);
1108 		}
1109 	} else {
1110 		struct bus_dma_segment *segs;
1111 
1112 		if ((ccb_h->flags & CAM_DATA_PHYS) != 0) {
1113 			printf("%s: Physical segment pointers unsupported",
1114 				isp->isp_name);
1115 			mp->error = EINVAL;
1116 		} else if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) {
1117 			printf("%s: Virtual segment addresses unsupported",
1118 				isp->isp_name);
1119 			mp->error = EINVAL;
1120 		} else {
1121 			/* Just use the segments provided */
1122 			segs = (struct bus_dma_segment *) csio->data_ptr;
1123 			dma2(mp, segs, csio->sglist_cnt, 0);
1124 		}
1125 	}
1126 	if (mp->error) {
1127 		int retval = CMD_COMPLETE;
1128 		if (mp->error == MUSHERR_NOQENTRIES) {
1129 			retval = CMD_EAGAIN;
1130 		} else if (mp->error == EFBIG) {
1131 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
1132 		} else if (mp->error == EINVAL) {
1133 			XS_SETERR(csio, CAM_REQ_INVALID);
1134 		} else {
1135 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
1136 		}
1137 		return (retval);
1138 	} else {
1139 		/*
1140 		 * Check to see if we weren't cancelled while sleeping on
1141 		 * getting DMA resources...
1142 		 */
1143 		if ((ccb_h->status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1144 			if (dp) {
1145 				bus_dmamap_unload(pci->parent_dmat, *dp);
1146 			}
1147 			return (CMD_COMPLETE);
1148 		}
1149 		return (CMD_QUEUED);
1150 	}
1151 }
1152 
1153 static void
1154 isp_pci_dmateardown(struct ispsoftc *isp, ISP_SCSI_XFER_T *xs, u_int32_t handle)
1155 {
1156 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1157 	bus_dmamap_t *dp = &pci->dmaps[handle - 1];
1158 	KASSERT((handle > 0 && handle <= isp->isp_maxcmds),
1159 	    ("bad handle in isp_pci_dmateardonw"));
1160 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1161 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTREAD);
1162 	} else {
1163 		bus_dmamap_sync(pci->parent_dmat, *dp, BUS_DMASYNC_POSTWRITE);
1164 	}
1165 	bus_dmamap_unload(pci->parent_dmat, *dp);
1166 }
1167 
1168 
1169 static void
1170 isp_pci_reset1(struct ispsoftc *isp)
1171 {
1172 	/* Make sure the BIOS is disabled */
1173 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
1174 }
1175 
1176 static void
1177 isp_pci_dumpregs(struct ispsoftc *isp)
1178 {
1179 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
1180 	printf("%s: PCI Status Command/Status=%lx\n", pci->pci_isp.isp_name,
1181 	    pci_conf_read(pci->pci_id, PCIR_COMMAND));
1182 }
1183