xref: /freebsd/sys/dev/isp/isp_pci.c (revision a5921bc3653e2e286715e6fe8d473ec0d02da38c)
1 /*-
2  * Copyright (c) 1997-2008 by Matthew Jacob
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. The name of the author may not be used to endorse or promote products
12  *    derived from this software without specific prior written permission.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
18  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 /*
27  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
28  * FreeBSD Version.
29  */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/linker.h>
38 #include <sys/firmware.h>
39 #include <sys/bus.h>
40 #include <sys/stdint.h>
41 #include <dev/pci/pcireg.h>
42 #include <dev/pci/pcivar.h>
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 #include <sys/rman.h>
46 #include <sys/malloc.h>
47 #include <sys/uio.h>
48 
49 #ifdef __sparc64__
50 #include <dev/ofw/openfirm.h>
51 #include <machine/ofw_machdep.h>
52 #endif
53 
54 #include <dev/isp/isp_freebsd.h>
55 
56 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int);
57 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t);
58 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
59 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t);
60 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int);
61 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t);
62 static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int);
63 static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t);
64 static int isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
65 static int isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
66 static int isp_pci_rd_isr_2400(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
67 static int isp_pci_mbxdma(ispsoftc_t *);
68 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *);
69 
70 
71 static void isp_pci_reset0(ispsoftc_t *);
72 static void isp_pci_reset1(ispsoftc_t *);
73 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
74 
75 static struct ispmdvec mdvec = {
76 	isp_pci_rd_isr,
77 	isp_pci_rd_reg,
78 	isp_pci_wr_reg,
79 	isp_pci_mbxdma,
80 	isp_pci_dmasetup,
81 	isp_common_dmateardown,
82 	isp_pci_reset0,
83 	isp_pci_reset1,
84 	isp_pci_dumpregs,
85 	NULL,
86 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
87 };
88 
89 static struct ispmdvec mdvec_1080 = {
90 	isp_pci_rd_isr,
91 	isp_pci_rd_reg_1080,
92 	isp_pci_wr_reg_1080,
93 	isp_pci_mbxdma,
94 	isp_pci_dmasetup,
95 	isp_common_dmateardown,
96 	isp_pci_reset0,
97 	isp_pci_reset1,
98 	isp_pci_dumpregs,
99 	NULL,
100 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
101 };
102 
103 static struct ispmdvec mdvec_12160 = {
104 	isp_pci_rd_isr,
105 	isp_pci_rd_reg_1080,
106 	isp_pci_wr_reg_1080,
107 	isp_pci_mbxdma,
108 	isp_pci_dmasetup,
109 	isp_common_dmateardown,
110 	isp_pci_reset0,
111 	isp_pci_reset1,
112 	isp_pci_dumpregs,
113 	NULL,
114 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
115 };
116 
117 static struct ispmdvec mdvec_2100 = {
118 	isp_pci_rd_isr,
119 	isp_pci_rd_reg,
120 	isp_pci_wr_reg,
121 	isp_pci_mbxdma,
122 	isp_pci_dmasetup,
123 	isp_common_dmateardown,
124 	isp_pci_reset0,
125 	isp_pci_reset1,
126 	isp_pci_dumpregs
127 };
128 
129 static struct ispmdvec mdvec_2200 = {
130 	isp_pci_rd_isr,
131 	isp_pci_rd_reg,
132 	isp_pci_wr_reg,
133 	isp_pci_mbxdma,
134 	isp_pci_dmasetup,
135 	isp_common_dmateardown,
136 	isp_pci_reset0,
137 	isp_pci_reset1,
138 	isp_pci_dumpregs
139 };
140 
141 static struct ispmdvec mdvec_2300 = {
142 	isp_pci_rd_isr_2300,
143 	isp_pci_rd_reg,
144 	isp_pci_wr_reg,
145 	isp_pci_mbxdma,
146 	isp_pci_dmasetup,
147 	isp_common_dmateardown,
148 	isp_pci_reset0,
149 	isp_pci_reset1,
150 	isp_pci_dumpregs
151 };
152 
153 static struct ispmdvec mdvec_2400 = {
154 	isp_pci_rd_isr_2400,
155 	isp_pci_rd_reg_2400,
156 	isp_pci_wr_reg_2400,
157 	isp_pci_mbxdma,
158 	isp_pci_dmasetup,
159 	isp_common_dmateardown,
160 	isp_pci_reset0,
161 	isp_pci_reset1,
162 	NULL
163 };
164 
165 static struct ispmdvec mdvec_2500 = {
166 	isp_pci_rd_isr_2400,
167 	isp_pci_rd_reg_2400,
168 	isp_pci_wr_reg_2400,
169 	isp_pci_mbxdma,
170 	isp_pci_dmasetup,
171 	isp_common_dmateardown,
172 	isp_pci_reset0,
173 	isp_pci_reset1,
174 	NULL
175 };
176 
177 static struct ispmdvec mdvec_2600 = {
178 	isp_pci_rd_isr_2400,
179 	isp_pci_rd_reg_2600,
180 	isp_pci_wr_reg_2600,
181 	isp_pci_mbxdma,
182 	isp_pci_dmasetup,
183 	isp_common_dmateardown,
184 	isp_pci_reset0,
185 	isp_pci_reset1,
186 	NULL
187 };
188 
189 #ifndef	PCIM_CMD_INVEN
190 #define	PCIM_CMD_INVEN			0x10
191 #endif
192 #ifndef	PCIM_CMD_BUSMASTEREN
193 #define	PCIM_CMD_BUSMASTEREN		0x0004
194 #endif
195 #ifndef	PCIM_CMD_PERRESPEN
196 #define	PCIM_CMD_PERRESPEN		0x0040
197 #endif
198 #ifndef	PCIM_CMD_SEREN
199 #define	PCIM_CMD_SEREN			0x0100
200 #endif
201 #ifndef	PCIM_CMD_INTX_DISABLE
202 #define	PCIM_CMD_INTX_DISABLE		0x0400
203 #endif
204 
205 #ifndef	PCIR_COMMAND
206 #define	PCIR_COMMAND			0x04
207 #endif
208 
209 #ifndef	PCIR_CACHELNSZ
210 #define	PCIR_CACHELNSZ			0x0c
211 #endif
212 
213 #ifndef	PCIR_LATTIMER
214 #define	PCIR_LATTIMER			0x0d
215 #endif
216 
217 #ifndef	PCIR_ROMADDR
218 #define	PCIR_ROMADDR			0x30
219 #endif
220 
221 #ifndef	PCI_VENDOR_QLOGIC
222 #define	PCI_VENDOR_QLOGIC		0x1077
223 #endif
224 
225 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
226 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
227 #endif
228 
229 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
230 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
231 #endif
232 
233 #ifndef	PCI_PRODUCT_QLOGIC_ISP10160
234 #define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
235 #endif
236 
237 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
238 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
239 #endif
240 
241 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
242 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
243 #endif
244 
245 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
246 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
247 #endif
248 
249 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
250 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
251 #endif
252 
253 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
254 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
255 #endif
256 
257 #ifndef	PCI_PRODUCT_QLOGIC_ISP2300
258 #define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
259 #endif
260 
261 #ifndef	PCI_PRODUCT_QLOGIC_ISP2312
262 #define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
263 #endif
264 
265 #ifndef	PCI_PRODUCT_QLOGIC_ISP2322
266 #define	PCI_PRODUCT_QLOGIC_ISP2322	0x2322
267 #endif
268 
269 #ifndef	PCI_PRODUCT_QLOGIC_ISP2422
270 #define	PCI_PRODUCT_QLOGIC_ISP2422	0x2422
271 #endif
272 
273 #ifndef	PCI_PRODUCT_QLOGIC_ISP2432
274 #define	PCI_PRODUCT_QLOGIC_ISP2432	0x2432
275 #endif
276 
277 #ifndef	PCI_PRODUCT_QLOGIC_ISP2532
278 #define	PCI_PRODUCT_QLOGIC_ISP2532	0x2532
279 #endif
280 
281 #ifndef	PCI_PRODUCT_QLOGIC_ISP6312
282 #define	PCI_PRODUCT_QLOGIC_ISP6312	0x6312
283 #endif
284 
285 #ifndef	PCI_PRODUCT_QLOGIC_ISP6322
286 #define	PCI_PRODUCT_QLOGIC_ISP6322	0x6322
287 #endif
288 
289 #ifndef        PCI_PRODUCT_QLOGIC_ISP5432
290 #define        PCI_PRODUCT_QLOGIC_ISP5432      0x5432
291 #endif
292 
293 #ifndef	PCI_PRODUCT_QLOGIC_ISP2031
294 #define	PCI_PRODUCT_QLOGIC_ISP2031	0x2031
295 #endif
296 
297 #ifndef	PCI_PRODUCT_QLOGIC_ISP8031
298 #define	PCI_PRODUCT_QLOGIC_ISP8031	0x8031
299 #endif
300 
301 #define        PCI_QLOGIC_ISP5432      \
302        ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC)
303 
304 #define	PCI_QLOGIC_ISP1020	\
305 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
306 
307 #define	PCI_QLOGIC_ISP1080	\
308 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
309 
310 #define	PCI_QLOGIC_ISP10160	\
311 	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
312 
313 #define	PCI_QLOGIC_ISP12160	\
314 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
315 
316 #define	PCI_QLOGIC_ISP1240	\
317 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
318 
319 #define	PCI_QLOGIC_ISP1280	\
320 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
321 
322 #define	PCI_QLOGIC_ISP2100	\
323 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
324 
325 #define	PCI_QLOGIC_ISP2200	\
326 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
327 
328 #define	PCI_QLOGIC_ISP2300	\
329 	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
330 
331 #define	PCI_QLOGIC_ISP2312	\
332 	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
333 
334 #define	PCI_QLOGIC_ISP2322	\
335 	((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
336 
337 #define	PCI_QLOGIC_ISP2422	\
338 	((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
339 
340 #define	PCI_QLOGIC_ISP2432	\
341 	((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC)
342 
343 #define	PCI_QLOGIC_ISP2532	\
344 	((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC)
345 
346 #define	PCI_QLOGIC_ISP6312	\
347 	((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
348 
349 #define	PCI_QLOGIC_ISP6322	\
350 	((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
351 
352 #define	PCI_QLOGIC_ISP2031	\
353 	((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC)
354 
355 #define	PCI_QLOGIC_ISP8031	\
356 	((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC)
357 
358 /*
359  * Odd case for some AMI raid cards... We need to *not* attach to this.
360  */
361 #define	AMI_RAID_SUBVENDOR_ID	0x101e
362 
363 #define	PCI_DFLT_LTNCY	0x40
364 #define	PCI_DFLT_LNSZ	0x10
365 
366 static int isp_pci_probe (device_t);
367 static int isp_pci_attach (device_t);
368 static int isp_pci_detach (device_t);
369 
370 
371 #define	ISP_PCD(isp)	((struct isp_pcisoftc *)isp)->pci_dev
372 struct isp_pcisoftc {
373 	ispsoftc_t			pci_isp;
374 	device_t			pci_dev;
375 	struct resource *		regs;
376 	struct resource *		regs1;
377 	struct resource *		regs2;
378 	void *				irq;
379 	int				iqd;
380 	int				rtp;
381 	int				rgd;
382 	int				rtp1;
383 	int				rgd1;
384 	int				rtp2;
385 	int				rgd2;
386 	void *				ih;
387 	int16_t				pci_poff[_NREG_BLKS];
388 	bus_dma_tag_t			dmat;
389 	int				msicount;
390 };
391 
392 
393 static device_method_t isp_pci_methods[] = {
394 	/* Device interface */
395 	DEVMETHOD(device_probe,		isp_pci_probe),
396 	DEVMETHOD(device_attach,	isp_pci_attach),
397 	DEVMETHOD(device_detach,	isp_pci_detach),
398 	{ 0, 0 }
399 };
400 
401 static driver_t isp_pci_driver = {
402 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
403 };
404 static devclass_t isp_devclass;
405 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
406 MODULE_DEPEND(isp, cam, 1, 1, 1);
407 MODULE_DEPEND(isp, firmware, 1, 1, 1);
408 static int isp_nvports = 0;
409 
410 static int
411 isp_pci_probe(device_t dev)
412 {
413 	switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
414 	case PCI_QLOGIC_ISP1020:
415 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
416 		break;
417 	case PCI_QLOGIC_ISP1080:
418 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
419 		break;
420 	case PCI_QLOGIC_ISP1240:
421 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
422 		break;
423 	case PCI_QLOGIC_ISP1280:
424 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
425 		break;
426 	case PCI_QLOGIC_ISP10160:
427 		device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
428 		break;
429 	case PCI_QLOGIC_ISP12160:
430 		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
431 			return (ENXIO);
432 		}
433 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
434 		break;
435 	case PCI_QLOGIC_ISP2100:
436 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
437 		break;
438 	case PCI_QLOGIC_ISP2200:
439 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
440 		break;
441 	case PCI_QLOGIC_ISP2300:
442 		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
443 		break;
444 	case PCI_QLOGIC_ISP2312:
445 		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
446 		break;
447 	case PCI_QLOGIC_ISP2322:
448 		device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
449 		break;
450 	case PCI_QLOGIC_ISP2422:
451 		device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
452 		break;
453 	case PCI_QLOGIC_ISP2432:
454 		device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter");
455 		break;
456 	case PCI_QLOGIC_ISP2532:
457 		device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter");
458 		break;
459 	case PCI_QLOGIC_ISP5432:
460 		device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter");
461 		break;
462 	case PCI_QLOGIC_ISP6312:
463 		device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
464 		break;
465 	case PCI_QLOGIC_ISP6322:
466 		device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
467 		break;
468 	case PCI_QLOGIC_ISP2031:
469 		device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter");
470 		break;
471 	case PCI_QLOGIC_ISP8031:
472 		device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter");
473 		break;
474 	default:
475 		return (ENXIO);
476 	}
477 	if (isp_announced == 0 && bootverbose) {
478 		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
479 		    "Core Version %d.%d\n",
480 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
481 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
482 		isp_announced++;
483 	}
484 	/*
485 	 * XXXX: Here is where we might load the f/w module
486 	 * XXXX: (or increase a reference count to it).
487 	 */
488 	return (BUS_PROBE_DEFAULT);
489 }
490 
491 static void
492 isp_get_generic_options(device_t dev, ispsoftc_t *isp)
493 {
494 	int tval;
495 
496 	tval = 0;
497 	if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) {
498 		isp->isp_confopts |= ISP_CFG_NORELOAD;
499 	}
500 	tval = 0;
501 	if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) {
502 		isp->isp_confopts |= ISP_CFG_NONVRAM;
503 	}
504 	tval = 0;
505 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval);
506 	if (tval) {
507 		isp->isp_dblev = tval;
508 	} else {
509 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
510 	}
511 	if (bootverbose) {
512 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
513 	}
514 	tval = -1;
515 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval);
516 	if (tval > 0 && tval <= 254) {
517 		isp_nvports = tval;
518 	}
519 	tval = 7;
520 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval);
521 	isp_quickboot_time = tval;
522 }
523 
524 static void
525 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp)
526 {
527 	const char *sptr;
528 	int tval = 0;
529 	char prefix[12], name[16];
530 
531 	if (chan == 0)
532 		prefix[0] = 0;
533 	else
534 		snprintf(prefix, sizeof(prefix), "chan%d.", chan);
535 	snprintf(name, sizeof(name), "%siid", prefix);
536 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
537 	    name, &tval)) {
538 		if (IS_FC(isp)) {
539 			ISP_FC_PC(isp, chan)->default_id = 109 - chan;
540 		} else {
541 #ifdef __sparc64__
542 			ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev);
543 #else
544 			ISP_SPI_PC(isp, chan)->iid = 7;
545 #endif
546 		}
547 	} else {
548 		if (IS_FC(isp)) {
549 			ISP_FC_PC(isp, chan)->default_id = tval - chan;
550 		} else {
551 			ISP_SPI_PC(isp, chan)->iid = tval;
552 		}
553 		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
554 	}
555 
556 	if (IS_SCSI(isp))
557 		return;
558 
559 	tval = -1;
560 	snprintf(name, sizeof(name), "%srole", prefix);
561 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
562 	    name, &tval) == 0) {
563 		switch (tval) {
564 		case ISP_ROLE_NONE:
565 		case ISP_ROLE_INITIATOR:
566 		case ISP_ROLE_TARGET:
567 		case ISP_ROLE_BOTH:
568 			device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval);
569 			break;
570 		default:
571 			tval = -1;
572 			break;
573 		}
574 	}
575 	if (tval == -1) {
576 		tval = ISP_DEFAULT_ROLES;
577 	}
578 	ISP_FC_PC(isp, chan)->def_role = tval;
579 
580 	tval = 0;
581 	snprintf(name, sizeof(name), "%sfullduplex", prefix);
582 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
583 	    name, &tval) == 0 && tval != 0) {
584 		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
585 	}
586 	sptr = 0;
587 	snprintf(name, sizeof(name), "%stopology", prefix);
588 	if (resource_string_value(device_get_name(dev), device_get_unit(dev),
589 	    name, (const char **) &sptr) == 0 && sptr != 0) {
590 		if (strcmp(sptr, "lport") == 0) {
591 			isp->isp_confopts |= ISP_CFG_LPORT;
592 		} else if (strcmp(sptr, "nport") == 0) {
593 			isp->isp_confopts |= ISP_CFG_NPORT;
594 		} else if (strcmp(sptr, "lport-only") == 0) {
595 			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
596 		} else if (strcmp(sptr, "nport-only") == 0) {
597 			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
598 		}
599 	}
600 
601 	tval = 0;
602 	snprintf(name, sizeof(name), "%snofctape", prefix);
603 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
604 	    name, &tval);
605 	if (tval) {
606 		isp->isp_confopts |= ISP_CFG_NOFCTAPE;
607 	}
608 
609 	tval = 0;
610 	snprintf(name, sizeof(name), "%sfctape", prefix);
611 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
612 	    name, &tval);
613 	if (tval) {
614 		isp->isp_confopts &= ~ISP_CFG_NOFCTAPE;
615 		isp->isp_confopts |= ISP_CFG_FCTAPE;
616 	}
617 
618 
619 	/*
620 	 * Because the resource_*_value functions can neither return
621 	 * 64 bit integer values, nor can they be directly coerced
622 	 * to interpret the right hand side of the assignment as
623 	 * you want them to interpret it, we have to force WWN
624 	 * hint replacement to specify WWN strings with a leading
625 	 * 'w' (e..g w50000000aaaa0001). Sigh.
626 	 */
627 	sptr = 0;
628 	snprintf(name, sizeof(name), "%sportwwn", prefix);
629 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
630 	    name, (const char **) &sptr);
631 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
632 		char *eptr = 0;
633 		ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16);
634 		if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) {
635 			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
636 			ISP_FC_PC(isp, chan)->def_wwpn = 0;
637 		}
638 	}
639 
640 	sptr = 0;
641 	snprintf(name, sizeof(name), "%snodewwn", prefix);
642 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
643 	    name, (const char **) &sptr);
644 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
645 		char *eptr = 0;
646 		ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16);
647 		if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) {
648 			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
649 			ISP_FC_PC(isp, chan)->def_wwnn = 0;
650 		}
651 	}
652 
653 	tval = -1;
654 	snprintf(name, sizeof(name), "%sloop_down_limit", prefix);
655 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
656 	    name, &tval);
657 	if (tval >= 0 && tval < 0xffff) {
658 		ISP_FC_PC(isp, chan)->loop_down_limit = tval;
659 	} else {
660 		ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit;
661 	}
662 
663 	tval = -1;
664 	snprintf(name, sizeof(name), "%sgone_device_time", prefix);
665 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
666 	    name, &tval);
667 	if (tval >= 0 && tval < 0xffff) {
668 		ISP_FC_PC(isp, chan)->gone_device_time = tval;
669 	} else {
670 		ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time;
671 	}
672 }
673 
674 static int
675 isp_pci_attach(device_t dev)
676 {
677 	int i, locksetup = 0;
678 	uint32_t data, cmd, linesz, did;
679 	struct isp_pcisoftc *pcs;
680 	ispsoftc_t *isp;
681 	size_t psize, xsize;
682 	char fwname[32];
683 
684 	pcs = device_get_softc(dev);
685 	if (pcs == NULL) {
686 		device_printf(dev, "cannot get softc\n");
687 		return (ENOMEM);
688 	}
689 	memset(pcs, 0, sizeof (*pcs));
690 
691 	pcs->pci_dev = dev;
692 	isp = &pcs->pci_isp;
693 	isp->isp_dev = dev;
694 	isp->isp_nchan = 1;
695 	if (sizeof (bus_addr_t) > 4)
696 		isp->isp_osinfo.sixtyfourbit = 1;
697 
698 	/*
699 	 * Get Generic Options
700 	 */
701 	isp_nvports = 0;
702 	isp_get_generic_options(dev, isp);
703 
704 	linesz = PCI_DFLT_LNSZ;
705 	pcs->irq = pcs->regs = pcs->regs2 = NULL;
706 	pcs->rgd = pcs->rtp = pcs->iqd = 0;
707 
708 	pcs->pci_dev = dev;
709 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
710 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
711 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
712 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
713 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
714 
715 	switch (pci_get_devid(dev)) {
716 	case PCI_QLOGIC_ISP1020:
717 		did = 0x1040;
718 		isp->isp_mdvec = &mdvec;
719 		isp->isp_type = ISP_HA_SCSI_UNKNOWN;
720 		break;
721 	case PCI_QLOGIC_ISP1080:
722 		did = 0x1080;
723 		isp->isp_mdvec = &mdvec_1080;
724 		isp->isp_type = ISP_HA_SCSI_1080;
725 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
726 		break;
727 	case PCI_QLOGIC_ISP1240:
728 		did = 0x1080;
729 		isp->isp_mdvec = &mdvec_1080;
730 		isp->isp_type = ISP_HA_SCSI_1240;
731 		isp->isp_nchan = 2;
732 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
733 		break;
734 	case PCI_QLOGIC_ISP1280:
735 		did = 0x1080;
736 		isp->isp_mdvec = &mdvec_1080;
737 		isp->isp_type = ISP_HA_SCSI_1280;
738 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
739 		break;
740 	case PCI_QLOGIC_ISP10160:
741 		did = 0x12160;
742 		isp->isp_mdvec = &mdvec_12160;
743 		isp->isp_type = ISP_HA_SCSI_10160;
744 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
745 		break;
746 	case PCI_QLOGIC_ISP12160:
747 		did = 0x12160;
748 		isp->isp_nchan = 2;
749 		isp->isp_mdvec = &mdvec_12160;
750 		isp->isp_type = ISP_HA_SCSI_12160;
751 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
752 		break;
753 	case PCI_QLOGIC_ISP2100:
754 		did = 0x2100;
755 		isp->isp_mdvec = &mdvec_2100;
756 		isp->isp_type = ISP_HA_FC_2100;
757 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF;
758 		if (pci_get_revid(dev) < 3) {
759 			/*
760 			 * XXX: Need to get the actual revision
761 			 * XXX: number of the 2100 FB. At any rate,
762 			 * XXX: lower cache line size for early revision
763 			 * XXX; boards.
764 			 */
765 			linesz = 1;
766 		}
767 		break;
768 	case PCI_QLOGIC_ISP2200:
769 		did = 0x2200;
770 		isp->isp_mdvec = &mdvec_2200;
771 		isp->isp_type = ISP_HA_FC_2200;
772 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF;
773 		break;
774 	case PCI_QLOGIC_ISP2300:
775 		did = 0x2300;
776 		isp->isp_mdvec = &mdvec_2300;
777 		isp->isp_type = ISP_HA_FC_2300;
778 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
779 		break;
780 	case PCI_QLOGIC_ISP2312:
781 	case PCI_QLOGIC_ISP6312:
782 		did = 0x2300;
783 		isp->isp_mdvec = &mdvec_2300;
784 		isp->isp_type = ISP_HA_FC_2312;
785 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
786 		break;
787 	case PCI_QLOGIC_ISP2322:
788 	case PCI_QLOGIC_ISP6322:
789 		did = 0x2322;
790 		isp->isp_mdvec = &mdvec_2300;
791 		isp->isp_type = ISP_HA_FC_2322;
792 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
793 		break;
794 	case PCI_QLOGIC_ISP2422:
795 	case PCI_QLOGIC_ISP2432:
796 		did = 0x2400;
797 		isp->isp_nchan += isp_nvports;
798 		isp->isp_mdvec = &mdvec_2400;
799 		isp->isp_type = ISP_HA_FC_2400;
800 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
801 		break;
802 	case PCI_QLOGIC_ISP2532:
803 		did = 0x2500;
804 		isp->isp_nchan += isp_nvports;
805 		isp->isp_mdvec = &mdvec_2500;
806 		isp->isp_type = ISP_HA_FC_2500;
807 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
808 		break;
809 	case PCI_QLOGIC_ISP5432:
810 		did = 0x2500;
811 		isp->isp_mdvec = &mdvec_2500;
812 		isp->isp_type = ISP_HA_FC_2500;
813 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
814 		break;
815 	case PCI_QLOGIC_ISP2031:
816 	case PCI_QLOGIC_ISP8031:
817 		did = 0x2600;
818 		isp->isp_nchan += isp_nvports;
819 		isp->isp_mdvec = &mdvec_2600;
820 		isp->isp_type = ISP_HA_FC_2600;
821 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
822 		break;
823 	default:
824 		device_printf(dev, "unknown device type\n");
825 		goto bad;
826 		break;
827 	}
828 	isp->isp_revision = pci_get_revid(dev);
829 
830 	if (IS_26XX(isp)) {
831 		pcs->rtp = SYS_RES_MEMORY;
832 		pcs->rgd = PCIR_BAR(0);
833 		pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd,
834 		    RF_ACTIVE);
835 		pcs->rtp1 = SYS_RES_MEMORY;
836 		pcs->rgd1 = PCIR_BAR(2);
837 		pcs->regs1 = bus_alloc_resource_any(dev, pcs->rtp1, &pcs->rgd1,
838 		    RF_ACTIVE);
839 		pcs->rtp2 = SYS_RES_MEMORY;
840 		pcs->rgd2 = PCIR_BAR(4);
841 		pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2,
842 		    RF_ACTIVE);
843 	} else {
844 		pcs->rtp = SYS_RES_MEMORY;
845 		pcs->rgd = PCIR_BAR(1);
846 		pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd,
847 		    RF_ACTIVE);
848 		if (pcs->regs == NULL) {
849 			pcs->rtp = SYS_RES_IOPORT;
850 			pcs->rgd = PCIR_BAR(0);
851 			pcs->regs = bus_alloc_resource_any(dev, pcs->rtp,
852 			    &pcs->rgd, RF_ACTIVE);
853 		}
854 	}
855 	if (pcs->regs == NULL) {
856 		device_printf(dev, "Unable to map any ports\n");
857 		goto bad;
858 	}
859 	if (bootverbose) {
860 		device_printf(dev, "Using %s space register mapping\n",
861 		    (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory");
862 	}
863 	isp->isp_regs = pcs->regs;
864 	isp->isp_regs2 = pcs->regs2;
865 
866 	if (IS_FC(isp)) {
867 		psize = sizeof (fcparam);
868 		xsize = sizeof (struct isp_fc);
869 	} else {
870 		psize = sizeof (sdparam);
871 		xsize = sizeof (struct isp_spi);
872 	}
873 	psize *= isp->isp_nchan;
874 	xsize *= isp->isp_nchan;
875 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
876 	if (isp->isp_param == NULL) {
877 		device_printf(dev, "cannot allocate parameter data\n");
878 		goto bad;
879 	}
880 	isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO);
881 	if (isp->isp_osinfo.pc.ptr == NULL) {
882 		device_printf(dev, "cannot allocate parameter data\n");
883 		goto bad;
884 	}
885 
886 	/*
887 	 * Now that we know who we are (roughly) get/set specific options
888 	 */
889 	for (i = 0; i < isp->isp_nchan; i++) {
890 		isp_get_specific_options(dev, i, isp);
891 	}
892 
893 	isp->isp_osinfo.fw = NULL;
894 	if (isp->isp_osinfo.fw == NULL) {
895 		snprintf(fwname, sizeof (fwname), "isp_%04x", did);
896 		isp->isp_osinfo.fw = firmware_get(fwname);
897 	}
898 	if (isp->isp_osinfo.fw != NULL) {
899 		isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname);
900 		isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data;
901 	}
902 
903 	/*
904 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set.
905 	 */
906 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
907 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
908 	if (IS_2300(isp)) {	/* per QLogic errata */
909 		cmd &= ~PCIM_CMD_INVEN;
910 	}
911 	if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
912 		cmd &= ~PCIM_CMD_INTX_DISABLE;
913 	}
914 	if (IS_24XX(isp)) {
915 		cmd &= ~PCIM_CMD_INTX_DISABLE;
916 	}
917 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
918 
919 	/*
920 	 * Make sure the Cache Line Size register is set sensibly.
921 	 */
922 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
923 	if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) {
924 		isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data);
925 		data = linesz;
926 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
927 	}
928 
929 	/*
930 	 * Make sure the Latency Timer is sane.
931 	 */
932 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
933 	if (data < PCI_DFLT_LTNCY) {
934 		data = PCI_DFLT_LTNCY;
935 		isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data);
936 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
937 	}
938 
939 	/*
940 	 * Make sure we've disabled the ROM.
941 	 */
942 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
943 	data &= ~1;
944 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
945 
946 	if (IS_26XX(isp)) {
947 		/* 26XX chips support only MSI-X, so start from them. */
948 		pcs->msicount = imin(pci_msix_count(dev), 1);
949 		if (pcs->msicount > 0 &&
950 		    (i = pci_alloc_msix(dev, &pcs->msicount)) == 0) {
951 			pcs->iqd = 1;
952 		} else {
953 			pcs->msicount = 0;
954 		}
955 	}
956 	if (pcs->msicount == 0 && (IS_24XX(isp) || IS_2322(isp))) {
957 		/*
958 		 * Older chips support both MSI and MSI-X, but I have
959 		 * feeling that older firmware may not support MSI-X,
960 		 * but we have no way to check the firmware flag here.
961 		 */
962 		pcs->msicount = imin(pci_msi_count(dev), 1);
963 		if (pcs->msicount > 0 &&
964 		    pci_alloc_msi(dev, &pcs->msicount) == 0) {
965 			pcs->iqd = 1;
966 		} else {
967 			pcs->msicount = 0;
968 		}
969 	}
970 	pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE);
971 	if (pcs->irq == NULL) {
972 		device_printf(dev, "could not allocate interrupt\n");
973 		goto bad;
974 	}
975 
976 	/* Make sure the lock is set up. */
977 	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
978 	locksetup++;
979 
980 	if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) {
981 		device_printf(dev, "could not setup interrupt\n");
982 		goto bad;
983 	}
984 
985 	/*
986 	 * Last minute checks...
987 	 */
988 	if (IS_23XX(isp) || IS_24XX(isp)) {
989 		isp->isp_port = pci_get_function(dev);
990 	}
991 
992 	/*
993 	 * Make sure we're in reset state.
994 	 */
995 	ISP_LOCK(isp);
996 	if (isp_reinit(isp, 1) != 0) {
997 		ISP_UNLOCK(isp);
998 		goto bad;
999 	}
1000 	ISP_UNLOCK(isp);
1001 	if (isp_attach(isp)) {
1002 		ISP_LOCK(isp);
1003 		isp_uninit(isp);
1004 		ISP_UNLOCK(isp);
1005 		goto bad;
1006 	}
1007 	return (0);
1008 
1009 bad:
1010 	if (pcs->ih) {
1011 		(void) bus_teardown_intr(dev, pcs->irq, pcs->ih);
1012 	}
1013 	if (locksetup) {
1014 		mtx_destroy(&isp->isp_osinfo.lock);
1015 	}
1016 	if (pcs->irq) {
1017 		(void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq);
1018 	}
1019 	if (pcs->msicount) {
1020 		pci_release_msi(dev);
1021 	}
1022 	if (pcs->regs)
1023 		(void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs);
1024 	if (pcs->regs1)
1025 		(void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1);
1026 	if (pcs->regs2)
1027 		(void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2);
1028 	if (pcs->pci_isp.isp_param) {
1029 		free(pcs->pci_isp.isp_param, M_DEVBUF);
1030 		pcs->pci_isp.isp_param = NULL;
1031 	}
1032 	if (pcs->pci_isp.isp_osinfo.pc.ptr) {
1033 		free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF);
1034 		pcs->pci_isp.isp_osinfo.pc.ptr = NULL;
1035 	}
1036 	return (ENXIO);
1037 }
1038 
1039 static int
1040 isp_pci_detach(device_t dev)
1041 {
1042 	struct isp_pcisoftc *pcs;
1043 	ispsoftc_t *isp;
1044 	int status;
1045 
1046 	pcs = device_get_softc(dev);
1047 	if (pcs == NULL) {
1048 		return (ENXIO);
1049 	}
1050 	isp = (ispsoftc_t *) pcs;
1051 	status = isp_detach(isp);
1052 	if (status)
1053 		return (status);
1054 	ISP_LOCK(isp);
1055 	isp_uninit(isp);
1056 	if (pcs->ih) {
1057 		(void) bus_teardown_intr(dev, pcs->irq, pcs->ih);
1058 	}
1059 	ISP_UNLOCK(isp);
1060 	mtx_destroy(&isp->isp_osinfo.lock);
1061 	(void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq);
1062 	if (pcs->msicount) {
1063 		pci_release_msi(dev);
1064 	}
1065 	(void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs);
1066 	if (pcs->regs1)
1067 		(void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1);
1068 	if (pcs->regs2)
1069 		(void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2);
1070 	/*
1071 	 * XXX: THERE IS A LOT OF LEAKAGE HERE
1072 	 */
1073 	if (pcs->pci_isp.isp_param) {
1074 		free(pcs->pci_isp.isp_param, M_DEVBUF);
1075 		pcs->pci_isp.isp_param = NULL;
1076 	}
1077 	if (pcs->pci_isp.isp_osinfo.pc.ptr) {
1078 		free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF);
1079 		pcs->pci_isp.isp_osinfo.pc.ptr = NULL;
1080 	}
1081 	return (0);
1082 }
1083 
1084 #define	IspVirt2Off(a, x)	\
1085 	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1086 	_BLK_REG_SHFT] + ((x) & 0xfff))
1087 
1088 #define	BXR2(isp, off)		bus_read_2((isp)->isp_regs, (off))
1089 #define	BXW2(isp, off, v)	bus_write_2((isp)->isp_regs, (off), (v))
1090 #define	BXR4(isp, off)		bus_read_4((isp)->isp_regs, (off))
1091 #define	BXW4(isp, off, v)	bus_write_4((isp)->isp_regs, (off), (v))
1092 #define	B2R4(isp, off)		bus_read_4((isp)->isp_regs2, (off))
1093 #define	B2W4(isp, off, v)	bus_write_4((isp)->isp_regs2, (off), (v))
1094 
1095 static ISP_INLINE int
1096 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1097 {
1098 	uint32_t val0, val1;
1099 	int i = 0;
1100 
1101 	do {
1102 		val0 = BXR2(isp, IspVirt2Off(isp, off));
1103 		val1 = BXR2(isp, IspVirt2Off(isp, off));
1104 	} while (val0 != val1 && ++i < 1000);
1105 	if (val0 != val1) {
1106 		return (1);
1107 	}
1108 	*rp = val0;
1109 	return (0);
1110 }
1111 
1112 static int
1113 isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info)
1114 {
1115 	uint16_t isr, sema;
1116 
1117 	if (IS_2100(isp)) {
1118 		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1119 		    return (0);
1120 		}
1121 		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1122 		    return (0);
1123 		}
1124 	} else {
1125 		isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR));
1126 		sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA));
1127 	}
1128 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1129 	isr &= INT_PENDING_MASK(isp);
1130 	sema &= BIU_SEMA_LOCK;
1131 	if (isr == 0 && sema == 0) {
1132 		return (0);
1133 	}
1134 	*isrp = isr;
1135 	if ((*semap = sema) != 0) {
1136 		if (IS_2100(isp)) {
1137 			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, info)) {
1138 				return (0);
1139 			}
1140 		} else {
1141 			*info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0));
1142 		}
1143 	}
1144 	return (1);
1145 }
1146 
1147 static int
1148 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info)
1149 {
1150 	uint32_t hccr, r2hisr;
1151 
1152 	if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1153 		*isrp = 0;
1154 		return (0);
1155 	}
1156 	r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO));
1157 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1158 	if ((r2hisr & BIU_R2HST_INTR) == 0) {
1159 		*isrp = 0;
1160 		return (0);
1161 	}
1162 	switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) {
1163 	case ISPR2HST_ROM_MBX_OK:
1164 	case ISPR2HST_ROM_MBX_FAIL:
1165 	case ISPR2HST_MBX_OK:
1166 	case ISPR2HST_MBX_FAIL:
1167 	case ISPR2HST_ASYNC_EVENT:
1168 		*semap = 1;
1169 		break;
1170 	case ISPR2HST_RIO_16:
1171 		*info = ASYNC_RIO16_1;
1172 		*semap = 1;
1173 		return (1);
1174 	case ISPR2HST_FPOST:
1175 		*info = ASYNC_CMD_CMPLT;
1176 		*semap = 1;
1177 		return (1);
1178 	case ISPR2HST_FPOST_CTIO:
1179 		*info = ASYNC_CTIO_DONE;
1180 		*semap = 1;
1181 		return (1);
1182 	case ISPR2HST_RSPQ_UPDATE:
1183 		*semap = 0;
1184 		break;
1185 	default:
1186 		hccr = ISP_READ(isp, HCCR);
1187 		if (hccr & HCCR_PAUSE) {
1188 			ISP_WRITE(isp, HCCR, HCCR_RESET);
1189 			isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR));
1190 			ISP_WRITE(isp, BIU_ICR, 0);
1191 		} else {
1192 			isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1193 		}
1194 		return (0);
1195 	}
1196 	*info = (r2hisr >> 16);
1197 	return (1);
1198 }
1199 
1200 static int
1201 isp_pci_rd_isr_2400(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info)
1202 {
1203 	uint32_t r2hisr;
1204 
1205 	r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO));
1206 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1207 	if ((r2hisr & BIU_R2HST_INTR) == 0) {
1208 		*isrp = 0;
1209 		return (0);
1210 	}
1211 	switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) {
1212 	case ISPR2HST_ROM_MBX_OK:
1213 	case ISPR2HST_ROM_MBX_FAIL:
1214 	case ISPR2HST_MBX_OK:
1215 	case ISPR2HST_MBX_FAIL:
1216 	case ISPR2HST_ASYNC_EVENT:
1217 		*semap = 1;
1218 		break;
1219 	case ISPR2HST_RSPQ_UPDATE:
1220 	case ISPR2HST_RSPQ_UPDATE2:
1221 	case ISPR2HST_ATIO_UPDATE:
1222 	case ISPR2HST_ATIO_RSPQ_UPDATE:
1223 	case ISPR2HST_ATIO_UPDATE2:
1224 		*semap = 0;
1225 		break;
1226 	default:
1227 		ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
1228 		isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1229 		return (0);
1230 	}
1231 	*info = (r2hisr >> 16);
1232 	return (1);
1233 }
1234 
1235 static uint32_t
1236 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1237 {
1238 	uint16_t rv;
1239 	int oldconf = 0;
1240 
1241 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1242 		/*
1243 		 * We will assume that someone has paused the RISC processor.
1244 		 */
1245 		oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1246 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP);
1247 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1248 	}
1249 	rv = BXR2(isp, IspVirt2Off(isp, regoff));
1250 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1251 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf);
1252 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1253 	}
1254 	return (rv);
1255 }
1256 
1257 static void
1258 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
1259 {
1260 	int oldconf = 0;
1261 
1262 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1263 		/*
1264 		 * We will assume that someone has paused the RISC processor.
1265 		 */
1266 		oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1267 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1268 		    oldconf | BIU_PCI_CONF1_SXP);
1269 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1270 	}
1271 	BXW2(isp, IspVirt2Off(isp, regoff), val);
1272 	MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1273 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1274 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf);
1275 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1276 	}
1277 
1278 }
1279 
1280 static uint32_t
1281 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1282 {
1283 	uint32_t rv, oc = 0;
1284 
1285 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1286 		uint32_t tc;
1287 		/*
1288 		 * We will assume that someone has paused the RISC processor.
1289 		 */
1290 		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1291 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1292 		if (regoff & SXP_BANK1_SELECT)
1293 			tc |= BIU_PCI1080_CONF1_SXP1;
1294 		else
1295 			tc |= BIU_PCI1080_CONF1_SXP0;
1296 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc);
1297 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1298 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1299 		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1300 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1301 		    oc | BIU_PCI1080_CONF1_DMA);
1302 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1303 	}
1304 	rv = BXR2(isp, IspVirt2Off(isp, regoff));
1305 	if (oc) {
1306 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc);
1307 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1308 	}
1309 	return (rv);
1310 }
1311 
1312 static void
1313 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val)
1314 {
1315 	int oc = 0;
1316 
1317 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1318 		uint32_t tc;
1319 		/*
1320 		 * We will assume that someone has paused the RISC processor.
1321 		 */
1322 		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1323 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1324 		if (regoff & SXP_BANK1_SELECT)
1325 			tc |= BIU_PCI1080_CONF1_SXP1;
1326 		else
1327 			tc |= BIU_PCI1080_CONF1_SXP0;
1328 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc);
1329 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1330 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1331 		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1332 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1333 		    oc | BIU_PCI1080_CONF1_DMA);
1334 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1335 	}
1336 	BXW2(isp, IspVirt2Off(isp, regoff), val);
1337 	MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1338 	if (oc) {
1339 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc);
1340 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1341 	}
1342 }
1343 
1344 static uint32_t
1345 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff)
1346 {
1347 	uint32_t rv;
1348 	int block = regoff & _BLK_REG_MASK;
1349 
1350 	switch (block) {
1351 	case BIU_BLOCK:
1352 		break;
1353 	case MBOX_BLOCK:
1354 		return (BXR2(isp, IspVirt2Off(isp, regoff)));
1355 	case SXP_BLOCK:
1356 		isp_prt(isp, ISP_LOGERR, "SXP_BLOCK read at 0x%x", regoff);
1357 		return (0xffffffff);
1358 	case RISC_BLOCK:
1359 		isp_prt(isp, ISP_LOGERR, "RISC_BLOCK read at 0x%x", regoff);
1360 		return (0xffffffff);
1361 	case DMA_BLOCK:
1362 		isp_prt(isp, ISP_LOGERR, "DMA_BLOCK read at 0x%x", regoff);
1363 		return (0xffffffff);
1364 	default:
1365 		isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff);
1366 		return (0xffffffff);
1367 	}
1368 
1369 	switch (regoff) {
1370 	case BIU2400_FLASH_ADDR:
1371 	case BIU2400_FLASH_DATA:
1372 	case BIU2400_ICR:
1373 	case BIU2400_ISR:
1374 	case BIU2400_CSR:
1375 	case BIU2400_REQINP:
1376 	case BIU2400_REQOUTP:
1377 	case BIU2400_RSPINP:
1378 	case BIU2400_RSPOUTP:
1379 	case BIU2400_PRI_REQINP:
1380 	case BIU2400_PRI_REQOUTP:
1381 	case BIU2400_ATIO_RSPINP:
1382 	case BIU2400_ATIO_RSPOUTP:
1383 	case BIU2400_HCCR:
1384 	case BIU2400_GPIOD:
1385 	case BIU2400_GPIOE:
1386 	case BIU2400_HSEMA:
1387 		rv = BXR4(isp, IspVirt2Off(isp, regoff));
1388 		break;
1389 	case BIU2400_R2HSTSLO:
1390 		rv = BXR4(isp, IspVirt2Off(isp, regoff));
1391 		break;
1392 	case BIU2400_R2HSTSHI:
1393 		rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16;
1394 		break;
1395 	default:
1396 		isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x",
1397 		    regoff);
1398 		rv = 0xffffffff;
1399 		break;
1400 	}
1401 	return (rv);
1402 }
1403 
1404 static void
1405 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val)
1406 {
1407 	int block = regoff & _BLK_REG_MASK;
1408 
1409 	switch (block) {
1410 	case BIU_BLOCK:
1411 		break;
1412 	case MBOX_BLOCK:
1413 		BXW2(isp, IspVirt2Off(isp, regoff), val);
1414 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1415 		return;
1416 	case SXP_BLOCK:
1417 		isp_prt(isp, ISP_LOGERR, "SXP_BLOCK write at 0x%x", regoff);
1418 		return;
1419 	case RISC_BLOCK:
1420 		isp_prt(isp, ISP_LOGERR, "RISC_BLOCK write at 0x%x", regoff);
1421 		return;
1422 	case DMA_BLOCK:
1423 		isp_prt(isp, ISP_LOGERR, "DMA_BLOCK write at 0x%x", regoff);
1424 		return;
1425 	default:
1426 		isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff);
1427 		break;
1428 	}
1429 
1430 	switch (regoff) {
1431 	case BIU2400_FLASH_ADDR:
1432 	case BIU2400_FLASH_DATA:
1433 	case BIU2400_ICR:
1434 	case BIU2400_ISR:
1435 	case BIU2400_CSR:
1436 	case BIU2400_REQINP:
1437 	case BIU2400_REQOUTP:
1438 	case BIU2400_RSPINP:
1439 	case BIU2400_RSPOUTP:
1440 	case BIU2400_PRI_REQINP:
1441 	case BIU2400_PRI_REQOUTP:
1442 	case BIU2400_ATIO_RSPINP:
1443 	case BIU2400_ATIO_RSPOUTP:
1444 	case BIU2400_HCCR:
1445 	case BIU2400_GPIOD:
1446 	case BIU2400_GPIOE:
1447 	case BIU2400_HSEMA:
1448 		BXW4(isp, IspVirt2Off(isp, regoff), val);
1449 #ifdef MEMORYBARRIERW
1450 		if (regoff == BIU2400_REQINP ||
1451 		    regoff == BIU2400_RSPOUTP ||
1452 		    regoff == BIU2400_PRI_REQINP ||
1453 		    regoff == BIU2400_ATIO_RSPOUTP)
1454 			MEMORYBARRIERW(isp, SYNC_REG,
1455 			    IspVirt2Off(isp, regoff), 4, -1)
1456 		else
1457 #endif
1458 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1);
1459 		break;
1460 	default:
1461 		isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x",
1462 		    regoff);
1463 		break;
1464 	}
1465 }
1466 
1467 static uint32_t
1468 isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff)
1469 {
1470 	uint32_t rv;
1471 
1472 	switch (regoff) {
1473 	case BIU2400_PRI_REQINP:
1474 	case BIU2400_PRI_REQOUTP:
1475 		isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x",
1476 		    regoff);
1477 		rv = 0xffffffff;
1478 		break;
1479 	case BIU2400_REQINP:
1480 		rv = B2R4(isp, 0x00);
1481 		break;
1482 	case BIU2400_REQOUTP:
1483 		rv = B2R4(isp, 0x04);
1484 		break;
1485 	case BIU2400_RSPINP:
1486 		rv = B2R4(isp, 0x08);
1487 		break;
1488 	case BIU2400_RSPOUTP:
1489 		rv = B2R4(isp, 0x0c);
1490 		break;
1491 	case BIU2400_ATIO_RSPINP:
1492 		rv = B2R4(isp, 0x10);
1493 		break;
1494 	case BIU2400_ATIO_RSPOUTP:
1495 		rv = B2R4(isp, 0x14);
1496 		break;
1497 	default:
1498 		rv = isp_pci_rd_reg_2400(isp, regoff);
1499 		break;
1500 	}
1501 	return (rv);
1502 }
1503 
1504 static void
1505 isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val)
1506 {
1507 	int off;
1508 
1509 	switch (regoff) {
1510 	case BIU2400_PRI_REQINP:
1511 	case BIU2400_PRI_REQOUTP:
1512 		isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x",
1513 		    regoff);
1514 		return;
1515 	case BIU2400_REQINP:
1516 		off = 0x00;
1517 		break;
1518 	case BIU2400_REQOUTP:
1519 		off = 0x04;
1520 		break;
1521 	case BIU2400_RSPINP:
1522 		off = 0x08;
1523 		break;
1524 	case BIU2400_RSPOUTP:
1525 		off = 0x0c;
1526 		break;
1527 	case BIU2400_ATIO_RSPINP:
1528 		off = 0x10;
1529 		break;
1530 	case BIU2400_ATIO_RSPOUTP:
1531 		off = 0x14;
1532 		break;
1533 	default:
1534 		isp_pci_wr_reg_2400(isp, regoff, val);
1535 		return;
1536 	}
1537 	B2W4(isp, off, val);
1538 }
1539 
1540 
1541 struct imush {
1542 	bus_addr_t maddr;
1543 	int error;
1544 };
1545 
1546 static void
1547 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1548 {
1549 	struct imush *imushp = (struct imush *) arg;
1550 
1551 	if (!(imushp->error = error))
1552 		imushp->maddr = segs[0].ds_addr;
1553 }
1554 
1555 static int
1556 isp_pci_mbxdma(ispsoftc_t *isp)
1557 {
1558 	caddr_t base;
1559 	uint32_t len, nsegs;
1560 	int i, error, cmap = 0;
1561 	bus_size_t slim;	/* segment size */
1562 	bus_addr_t llim;	/* low limit of unavailable dma */
1563 	bus_addr_t hlim;	/* high limit of unavailable dma */
1564 	struct imush im;
1565 	isp_ecmd_t *ecmd;
1566 
1567 	/*
1568 	 * Already been here? If so, leave...
1569 	 */
1570 	if (isp->isp_rquest) {
1571 		return (0);
1572 	}
1573 	ISP_UNLOCK(isp);
1574 
1575 	if (isp->isp_maxcmds == 0) {
1576 		isp_prt(isp, ISP_LOGERR, "maxcmds not set");
1577 		ISP_LOCK(isp);
1578 		return (1);
1579 	}
1580 
1581 	hlim = BUS_SPACE_MAXADDR;
1582 	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1583 		if (sizeof (bus_size_t) > 4) {
1584 			slim = (bus_size_t) (1ULL << 32);
1585 		} else {
1586 			slim = (bus_size_t) (1UL << 31);
1587 		}
1588 		llim = BUS_SPACE_MAXADDR;
1589 	} else {
1590 		llim = BUS_SPACE_MAXADDR_32BIT;
1591 		slim = (1UL << 24);
1592 	}
1593 
1594 	len = isp->isp_maxcmds * sizeof (struct isp_pcmd);
1595 	isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1596 	if (isp->isp_osinfo.pcmd_pool == NULL) {
1597 		isp_prt(isp, ISP_LOGERR, "cannot allocate pcmds");
1598 		ISP_LOCK(isp);
1599 		return (1);
1600 	}
1601 
1602 	if (isp->isp_osinfo.sixtyfourbit) {
1603 		nsegs = ISP_NSEG64_MAX;
1604 	} else {
1605 		nsegs = ISP_NSEG_MAX;
1606 	}
1607 
1608 	if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, &isp->isp_osinfo.dmat)) {
1609 		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1610 		ISP_LOCK(isp);
1611 		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1612 		return (1);
1613 	}
1614 
1615 	len = sizeof (isp_hdl_t) * isp->isp_maxcmds;
1616 	isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1617 	if (isp->isp_xflist == NULL) {
1618 		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1619 		ISP_LOCK(isp);
1620 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1621 		return (1);
1622 	}
1623 	for (len = 0; len < isp->isp_maxcmds - 1; len++) {
1624 		isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1];
1625 	}
1626 	isp->isp_xffree = isp->isp_xflist;
1627 
1628 	/*
1629 	 * Allocate and map the request queue and a region for external
1630 	 * DMA addressable command/status structures (22XX and later).
1631 	 */
1632 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1633 	if (isp->isp_type >= ISP_HA_FC_2200)
1634 		len += (N_XCMDS * XCMD_SIZE);
1635 	if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
1636 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1637 	    len, 1, len, 0, &isp->isp_osinfo.reqdmat)) {
1638 		isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag");
1639 		goto bad1;
1640 	}
1641 	if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base,
1642 	    BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) {
1643 		isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory");
1644 		bus_dma_tag_destroy(isp->isp_osinfo.reqdmat);
1645 		goto bad1;
1646 	}
1647 	isp->isp_rquest = base;
1648 	im.error = 0;
1649 	if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap,
1650 	    base, len, imc, &im, 0) || im.error) {
1651 		isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error);
1652 		goto bad1;
1653 	}
1654 	isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx",
1655 	    (uintmax_t)im.maddr, (uintmax_t)len);
1656 	isp->isp_rquest_dma = im.maddr;
1657 	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1658 	im.maddr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1659 	if (isp->isp_type >= ISP_HA_FC_2200) {
1660 		isp->isp_osinfo.ecmd_dma = im.maddr;
1661 		isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base;
1662 		isp->isp_osinfo.ecmd_base = isp->isp_osinfo.ecmd_free;
1663 		for (ecmd = isp->isp_osinfo.ecmd_free;
1664 		    ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) {
1665 			if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1])
1666 				ecmd->next = NULL;
1667 			else
1668 				ecmd->next = ecmd + 1;
1669 		}
1670 	}
1671 
1672 	/*
1673 	 * Allocate and map the result queue.
1674 	 */
1675 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1676 	if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
1677 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1678 	    len, 1, len, 0, &isp->isp_osinfo.respdmat)) {
1679 		isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag");
1680 		goto bad1;
1681 	}
1682 	if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base,
1683 	    BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) {
1684 		isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory");
1685 		bus_dma_tag_destroy(isp->isp_osinfo.respdmat);
1686 		goto bad1;
1687 	}
1688 	isp->isp_result = base;
1689 	im.error = 0;
1690 	if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap,
1691 	    base, len, imc, &im, 0) || im.error) {
1692 		isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error);
1693 		goto bad1;
1694 	}
1695 	isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx",
1696 	    (uintmax_t)im.maddr, (uintmax_t)len);
1697 	isp->isp_result_dma = im.maddr;
1698 
1699 #ifdef	ISP_TARGET_MODE
1700 	/*
1701 	 * Allocate and map ATIO queue on 24xx with target mode.
1702 	 */
1703 	if (IS_24XX(isp)) {
1704 		len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1705 		if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
1706 		    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1707 		    len, 1, len, 0, &isp->isp_osinfo.atiodmat)) {
1708 			isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag");
1709 			goto bad1;
1710 		}
1711 		if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base,
1712 		    BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) {
1713 			isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory");
1714 			bus_dma_tag_destroy(isp->isp_osinfo.atiodmat);
1715 			goto bad1;
1716 		}
1717 		isp->isp_atioq = base;
1718 		im.error = 0;
1719 		if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap,
1720 		    base, len, imc, &im, 0) || im.error) {
1721 			isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error);
1722 			goto bad;
1723 		}
1724 		isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx",
1725 		    (uintmax_t)im.maddr, (uintmax_t)len);
1726 		isp->isp_atioq_dma = im.maddr;
1727 	}
1728 #endif
1729 
1730 	if (IS_FC(isp)) {
1731 		if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim,
1732 		    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1733 		    2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, &isp->isp_osinfo.iocbdmat)) {
1734 			goto bad;
1735 		}
1736 		if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat,
1737 		    (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0)
1738 			goto bad;
1739 		isp->isp_iocb = base;
1740 		im.error = 0;
1741 		if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap,
1742 		    base, 2*QENTRY_LEN, imc, &im, 0) || im.error)
1743 			goto bad;
1744 		isp->isp_iocb_dma = im.maddr;
1745 
1746 		if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim,
1747 		    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1748 		    ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, &isp->isp_osinfo.scdmat))
1749 			goto bad;
1750 		for (cmap = 0; cmap < isp->isp_nchan; cmap++) {
1751 			struct isp_fc *fc = ISP_FC_PC(isp, cmap);
1752 			if (bus_dmamem_alloc(isp->isp_osinfo.scdmat,
1753 			    (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0)
1754 				goto bad;
1755 			FCPARAM(isp, cmap)->isp_scratch = base;
1756 			im.error = 0;
1757 			if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap,
1758 			    base, ISP_FC_SCRLEN, imc, &im, 0) || im.error) {
1759 				bus_dmamem_free(isp->isp_osinfo.scdmat,
1760 				    base, fc->scmap);
1761 				goto bad;
1762 			}
1763 			FCPARAM(isp, cmap)->isp_scdma = im.maddr;
1764 			if (!IS_2100(isp)) {
1765 				for (i = 0; i < INITIAL_NEXUS_COUNT; i++) {
1766 					struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO);
1767 					if (n == NULL) {
1768 						while (fc->nexus_free_list) {
1769 							n = fc->nexus_free_list;
1770 							fc->nexus_free_list = n->next;
1771 							free(n, M_DEVBUF);
1772 						}
1773 						goto bad;
1774 					}
1775 					n->next = fc->nexus_free_list;
1776 					fc->nexus_free_list = n;
1777 				}
1778 			}
1779 		}
1780 	}
1781 
1782 	for (i = 0; i < isp->isp_maxcmds; i++) {
1783 		struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i];
1784 		error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap);
1785 		if (error) {
1786 			isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error);
1787 			while (--i >= 0) {
1788 				bus_dmamap_destroy(isp->isp_osinfo.dmat, isp->isp_osinfo.pcmd_pool[i].dmap);
1789 			}
1790 			goto bad;
1791 		}
1792 		callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0);
1793 		if (i == isp->isp_maxcmds-1) {
1794 			pcmd->next = NULL;
1795 		} else {
1796 			pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1];
1797 		}
1798 	}
1799 	isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0];
1800 	ISP_LOCK(isp);
1801 	return (0);
1802 
1803 bad:
1804 	if (IS_FC(isp)) {
1805 		while (--cmap >= 0) {
1806 			struct isp_fc *fc = ISP_FC_PC(isp, cmap);
1807 			bus_dmamap_unload(isp->isp_osinfo.scdmat, fc->scmap);
1808 			bus_dmamem_free(isp->isp_osinfo.scdmat,
1809 			    FCPARAM(isp, cmap)->isp_scratch, fc->scmap);
1810 			while (fc->nexus_free_list) {
1811 				struct isp_nexus *n = fc->nexus_free_list;
1812 				fc->nexus_free_list = n->next;
1813 				free(n, M_DEVBUF);
1814 			}
1815 		}
1816 		bus_dma_tag_destroy(isp->isp_osinfo.scdmat);
1817 		bus_dmamap_unload(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap);
1818 		bus_dmamem_free(isp->isp_osinfo.iocbdmat, isp->isp_iocb,
1819 		    isp->isp_osinfo.iocbmap);
1820 		bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat);
1821 	}
1822 bad1:
1823 	if (isp->isp_rquest_dma != 0) {
1824 		bus_dmamap_unload(isp->isp_osinfo.reqdmat,
1825 		    isp->isp_osinfo.reqmap);
1826 	}
1827 	if (isp->isp_rquest != NULL) {
1828 		bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest,
1829 		    isp->isp_osinfo.reqmap);
1830 		bus_dma_tag_destroy(isp->isp_osinfo.reqdmat);
1831 	}
1832 	if (isp->isp_result_dma != 0) {
1833 		bus_dmamap_unload(isp->isp_osinfo.respdmat,
1834 		    isp->isp_osinfo.respmap);
1835 	}
1836 	if (isp->isp_result != NULL) {
1837 		bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result,
1838 		    isp->isp_osinfo.respmap);
1839 		bus_dma_tag_destroy(isp->isp_osinfo.respdmat);
1840 	}
1841 #ifdef	ISP_TARGET_MODE
1842 	if (IS_24XX(isp)) {
1843 		if (isp->isp_atioq_dma != 0) {
1844 			bus_dmamap_unload(isp->isp_osinfo.atiodmat,
1845 			    isp->isp_osinfo.atiomap);
1846 		}
1847 		if (isp->isp_atioq != NULL) {
1848 			bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_atioq,
1849 			    isp->isp_osinfo.atiomap);
1850 			bus_dma_tag_destroy(isp->isp_osinfo.atiodmat);
1851 		}
1852 	}
1853 #endif
1854 	free(isp->isp_xflist, M_DEVBUF);
1855 	free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1856 	isp->isp_rquest = NULL;
1857 	ISP_LOCK(isp);
1858 	return (1);
1859 }
1860 
1861 typedef struct {
1862 	ispsoftc_t *isp;
1863 	void *cmd_token;
1864 	void *rq;	/* original request */
1865 	int error;
1866 	bus_size_t mapsize;
1867 } mush_t;
1868 
1869 #define	MUSHERR_NOQENTRIES	-2
1870 
1871 #ifdef	ISP_TARGET_MODE
1872 static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int);
1873 static void tdma2(void *, bus_dma_segment_t *, int, int);
1874 
1875 static void
1876 tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error)
1877 {
1878 	mush_t *mp;
1879 	mp = (mush_t *)arg;
1880 	mp->mapsize = mapsize;
1881 	tdma2(arg, dm_segs, nseg, error);
1882 }
1883 
1884 static void
1885 tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1886 {
1887 	mush_t *mp;
1888 	ispsoftc_t *isp;
1889 	struct ccb_scsiio *csio;
1890 	isp_ddir_t ddir;
1891 	ispreq_t *rq;
1892 
1893 	mp = (mush_t *) arg;
1894 	if (error) {
1895 		mp->error = error;
1896 		return;
1897 	}
1898 	csio = mp->cmd_token;
1899 	isp = mp->isp;
1900 	rq = mp->rq;
1901 	if (nseg) {
1902 		if (isp->isp_osinfo.sixtyfourbit) {
1903 			if (nseg >= ISP_NSEG64_MAX) {
1904 				isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX);
1905 				mp->error = EFAULT;
1906 				return;
1907 			}
1908 			if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) {
1909 				rq->req_header.rqs_entry_type = RQSTYPE_CTIO3;
1910 			}
1911 		} else {
1912 			if (nseg >= ISP_NSEG_MAX) {
1913 				isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX);
1914 				mp->error = EFAULT;
1915 				return;
1916 			}
1917 		}
1918 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1919 			bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE);
1920 			ddir = ISP_TO_DEVICE;
1921 		} else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1922 			bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD);
1923 			ddir = ISP_FROM_DEVICE;
1924 		} else {
1925 			dm_segs = NULL;
1926 			nseg = 0;
1927 			ddir = ISP_NOXFR;
1928 		}
1929 	} else {
1930 		dm_segs = NULL;
1931 		nseg = 0;
1932 		ddir = ISP_NOXFR;
1933 	}
1934 
1935 	error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len);
1936 	switch (error) {
1937 	case CMD_EAGAIN:
1938 		mp->error = MUSHERR_NOQENTRIES;
1939 	case CMD_QUEUED:
1940 		break;
1941 	default:
1942 		mp->error = EIO;
1943 	}
1944 }
1945 #endif
1946 
1947 static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int);
1948 static void dma2(void *, bus_dma_segment_t *, int, int);
1949 
1950 static void
1951 dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error)
1952 {
1953 	mush_t *mp;
1954 	mp = (mush_t *)arg;
1955 	mp->mapsize = mapsize;
1956 	dma2(arg, dm_segs, nseg, error);
1957 }
1958 
1959 static void
1960 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1961 {
1962 	mush_t *mp;
1963 	ispsoftc_t *isp;
1964 	struct ccb_scsiio *csio;
1965 	isp_ddir_t ddir;
1966 	ispreq_t *rq;
1967 
1968 	mp = (mush_t *) arg;
1969 	if (error) {
1970 		mp->error = error;
1971 		return;
1972 	}
1973 	csio = mp->cmd_token;
1974 	isp = mp->isp;
1975 	rq = mp->rq;
1976 	if (nseg) {
1977 		if (isp->isp_osinfo.sixtyfourbit) {
1978 			if (nseg >= ISP_NSEG64_MAX) {
1979 				isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX);
1980 				mp->error = EFAULT;
1981 				return;
1982 			}
1983 			if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) {
1984 				rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
1985 			} else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) {
1986 				rq->req_header.rqs_entry_type = RQSTYPE_A64;
1987 			}
1988 		} else {
1989 			if (nseg >= ISP_NSEG_MAX) {
1990 				isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX);
1991 				mp->error = EFAULT;
1992 				return;
1993 			}
1994 		}
1995 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1996 			bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD);
1997 			ddir = ISP_FROM_DEVICE;
1998 		} else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1999 			bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE);
2000 			ddir = ISP_TO_DEVICE;
2001 		} else {
2002 			ddir = ISP_NOXFR;
2003 		}
2004 	} else {
2005 		dm_segs = NULL;
2006 		nseg = 0;
2007 		ddir = ISP_NOXFR;
2008 	}
2009 
2010 	error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map);
2011 	switch (error) {
2012 	case CMD_EAGAIN:
2013 		mp->error = MUSHERR_NOQENTRIES;
2014 		break;
2015 	case CMD_QUEUED:
2016 		break;
2017 	default:
2018 		mp->error = EIO;
2019 		break;
2020 	}
2021 }
2022 
2023 static int
2024 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
2025 {
2026 	mush_t mush, *mp;
2027 	void (*eptr)(void *, bus_dma_segment_t *, int, int);
2028 	void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int);
2029 	int error;
2030 
2031 	mp = &mush;
2032 	mp->isp = isp;
2033 	mp->cmd_token = csio;
2034 	mp->rq = ff;
2035 	mp->error = 0;
2036 	mp->mapsize = 0;
2037 
2038 #ifdef	ISP_TARGET_MODE
2039 	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
2040 		eptr = tdma2;
2041 		eptr2 = tdma2_2;
2042 	} else
2043 #endif
2044 	{
2045 		eptr = dma2;
2046 		eptr2 = dma2_2;
2047 	}
2048 
2049 
2050 	error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
2051 	    (union ccb *)csio, eptr, mp, 0);
2052 	if (error == EINPROGRESS) {
2053 		bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
2054 		mp->error = EINVAL;
2055 		isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported");
2056 	} else if (error && mp->error == 0) {
2057 #ifdef	DIAGNOSTIC
2058 		isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
2059 #endif
2060 		mp->error = error;
2061 	}
2062 	if (mp->error) {
2063 		int retval = CMD_COMPLETE;
2064 		if (mp->error == MUSHERR_NOQENTRIES) {
2065 			retval = CMD_EAGAIN;
2066 		} else if (mp->error == EFBIG) {
2067 			csio->ccb_h.status = CAM_REQ_TOO_BIG;
2068 		} else if (mp->error == EINVAL) {
2069 			csio->ccb_h.status = CAM_REQ_INVALID;
2070 		} else {
2071 			csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
2072 		}
2073 		return (retval);
2074 	}
2075 	return (CMD_QUEUED);
2076 }
2077 
2078 static void
2079 isp_pci_reset0(ispsoftc_t *isp)
2080 {
2081 	ISP_DISABLE_INTS(isp);
2082 }
2083 
2084 static void
2085 isp_pci_reset1(ispsoftc_t *isp)
2086 {
2087 	if (!IS_24XX(isp)) {
2088 		/* Make sure the BIOS is disabled */
2089 		isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2090 	}
2091 	/* and enable interrupts */
2092 	ISP_ENABLE_INTS(isp);
2093 }
2094 
2095 static void
2096 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
2097 {
2098 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2099 	if (msg)
2100 		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2101 	else
2102 		printf("%s:\n", device_get_nameunit(isp->isp_dev));
2103 	if (IS_SCSI(isp))
2104 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2105 	else
2106 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2107 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2108 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2109 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2110 
2111 
2112 	if (IS_SCSI(isp)) {
2113 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2114 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2115 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2116 			ISP_READ(isp, CDMA_FIFO_STS));
2117 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2118 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2119 			ISP_READ(isp, DDMA_FIFO_STS));
2120 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2121 			ISP_READ(isp, SXP_INTERRUPT),
2122 			ISP_READ(isp, SXP_GROSS_ERR),
2123 			ISP_READ(isp, SXP_PINS_CTRL));
2124 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2125 	}
2126 	printf("    mbox regs: %x %x %x %x %x\n",
2127 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2128 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2129 	    ISP_READ(isp, OUTMAILBOX4));
2130 	printf("    PCI Status Command/Status=%x\n",
2131 	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2132 }
2133