xref: /freebsd/sys/dev/isp/isp_pci.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c) 1997-2006 by Matthew Jacob
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #if __FreeBSD_version >= 700000
39 #include <sys/linker.h>
40 #include <sys/firmware.h>
41 #endif
42 #include <sys/bus.h>
43 #if __FreeBSD_version < 500000
44 #include <pci/pcireg.h>
45 #include <pci/pcivar.h>
46 #include <machine/bus_memio.h>
47 #include <machine/bus_pio.h>
48 #else
49 #include <sys/stdint.h>
50 #include <dev/pci/pcireg.h>
51 #include <dev/pci/pcivar.h>
52 #endif
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <sys/rman.h>
56 #include <sys/malloc.h>
57 
58 #include <dev/isp/isp_freebsd.h>
59 
60 #if __FreeBSD_version < 500000
61 #define	BUS_PROBE_DEFAULT	0
62 #endif
63 
64 static uint16_t isp_pci_rd_reg(ispsoftc_t *, int);
65 static void isp_pci_wr_reg(ispsoftc_t *, int, uint16_t);
66 static uint16_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
67 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint16_t);
68 static int
69 isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
70 static int
71 isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
72 static int isp_pci_mbxdma(ispsoftc_t *);
73 static int
74 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint16_t *, uint16_t);
75 static void
76 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint16_t);
77 
78 
79 static void isp_pci_reset1(ispsoftc_t *);
80 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
81 
82 static struct ispmdvec mdvec = {
83 	isp_pci_rd_isr,
84 	isp_pci_rd_reg,
85 	isp_pci_wr_reg,
86 	isp_pci_mbxdma,
87 	isp_pci_dmasetup,
88 	isp_pci_dmateardown,
89 	NULL,
90 	isp_pci_reset1,
91 	isp_pci_dumpregs,
92 	NULL,
93 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
94 };
95 
96 static struct ispmdvec mdvec_1080 = {
97 	isp_pci_rd_isr,
98 	isp_pci_rd_reg_1080,
99 	isp_pci_wr_reg_1080,
100 	isp_pci_mbxdma,
101 	isp_pci_dmasetup,
102 	isp_pci_dmateardown,
103 	NULL,
104 	isp_pci_reset1,
105 	isp_pci_dumpregs,
106 	NULL,
107 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
108 };
109 
110 static struct ispmdvec mdvec_12160 = {
111 	isp_pci_rd_isr,
112 	isp_pci_rd_reg_1080,
113 	isp_pci_wr_reg_1080,
114 	isp_pci_mbxdma,
115 	isp_pci_dmasetup,
116 	isp_pci_dmateardown,
117 	NULL,
118 	isp_pci_reset1,
119 	isp_pci_dumpregs,
120 	NULL,
121 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
122 };
123 
124 static struct ispmdvec mdvec_2100 = {
125 	isp_pci_rd_isr,
126 	isp_pci_rd_reg,
127 	isp_pci_wr_reg,
128 	isp_pci_mbxdma,
129 	isp_pci_dmasetup,
130 	isp_pci_dmateardown,
131 	NULL,
132 	isp_pci_reset1,
133 	isp_pci_dumpregs
134 };
135 
136 static struct ispmdvec mdvec_2200 = {
137 	isp_pci_rd_isr,
138 	isp_pci_rd_reg,
139 	isp_pci_wr_reg,
140 	isp_pci_mbxdma,
141 	isp_pci_dmasetup,
142 	isp_pci_dmateardown,
143 	NULL,
144 	isp_pci_reset1,
145 	isp_pci_dumpregs
146 };
147 
148 static struct ispmdvec mdvec_2300 = {
149 	isp_pci_rd_isr_2300,
150 	isp_pci_rd_reg,
151 	isp_pci_wr_reg,
152 	isp_pci_mbxdma,
153 	isp_pci_dmasetup,
154 	isp_pci_dmateardown,
155 	NULL,
156 	isp_pci_reset1,
157 	isp_pci_dumpregs
158 };
159 
160 #ifndef	PCIM_CMD_INVEN
161 #define	PCIM_CMD_INVEN			0x10
162 #endif
163 #ifndef	PCIM_CMD_BUSMASTEREN
164 #define	PCIM_CMD_BUSMASTEREN		0x0004
165 #endif
166 #ifndef	PCIM_CMD_PERRESPEN
167 #define	PCIM_CMD_PERRESPEN		0x0040
168 #endif
169 #ifndef	PCIM_CMD_SEREN
170 #define	PCIM_CMD_SEREN			0x0100
171 #endif
172 #ifndef	PCIM_CMD_INTX_DISABLE
173 #define	PCIM_CMD_INTX_DISABLE		0x0400
174 #endif
175 
176 #ifndef	PCIR_COMMAND
177 #define	PCIR_COMMAND			0x04
178 #endif
179 
180 #ifndef	PCIR_CACHELNSZ
181 #define	PCIR_CACHELNSZ			0x0c
182 #endif
183 
184 #ifndef	PCIR_LATTIMER
185 #define	PCIR_LATTIMER			0x0d
186 #endif
187 
188 #ifndef	PCIR_ROMADDR
189 #define	PCIR_ROMADDR			0x30
190 #endif
191 
192 #ifndef	PCI_VENDOR_QLOGIC
193 #define	PCI_VENDOR_QLOGIC		0x1077
194 #endif
195 
196 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
197 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
198 #endif
199 
200 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
201 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
202 #endif
203 
204 #ifndef	PCI_PRODUCT_QLOGIC_ISP10160
205 #define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
206 #endif
207 
208 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
209 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
210 #endif
211 
212 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
213 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
214 #endif
215 
216 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
217 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
218 #endif
219 
220 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
221 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
222 #endif
223 
224 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
225 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
226 #endif
227 
228 #ifndef	PCI_PRODUCT_QLOGIC_ISP2300
229 #define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
230 #endif
231 
232 #ifndef	PCI_PRODUCT_QLOGIC_ISP2312
233 #define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
234 #endif
235 
236 #ifndef	PCI_PRODUCT_QLOGIC_ISP2322
237 #define	PCI_PRODUCT_QLOGIC_ISP2322	0x2322
238 #endif
239 
240 #ifndef	PCI_PRODUCT_QLOGIC_ISP2422
241 #define	PCI_PRODUCT_QLOGIC_ISP2422	0x2422
242 #endif
243 
244 #ifndef	PCI_PRODUCT_QLOGIC_ISP6312
245 #define	PCI_PRODUCT_QLOGIC_ISP6312	0x6312
246 #endif
247 
248 #ifndef	PCI_PRODUCT_QLOGIC_ISP6322
249 #define	PCI_PRODUCT_QLOGIC_ISP6322	0x6322
250 #endif
251 
252 
253 #define	PCI_QLOGIC_ISP1020	\
254 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
255 
256 #define	PCI_QLOGIC_ISP1080	\
257 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
258 
259 #define	PCI_QLOGIC_ISP10160	\
260 	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
261 
262 #define	PCI_QLOGIC_ISP12160	\
263 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
264 
265 #define	PCI_QLOGIC_ISP1240	\
266 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
267 
268 #define	PCI_QLOGIC_ISP1280	\
269 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
270 
271 #define	PCI_QLOGIC_ISP2100	\
272 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
273 
274 #define	PCI_QLOGIC_ISP2200	\
275 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
276 
277 #define	PCI_QLOGIC_ISP2300	\
278 	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
279 
280 #define	PCI_QLOGIC_ISP2312	\
281 	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
282 
283 #define	PCI_QLOGIC_ISP2322	\
284 	((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
285 
286 #define	PCI_QLOGIC_ISP2422	\
287 	((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
288 
289 #define	PCI_QLOGIC_ISP6312	\
290 	((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
291 
292 #define	PCI_QLOGIC_ISP6322	\
293 	((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
294 
295 /*
296  * Odd case for some AMI raid cards... We need to *not* attach to this.
297  */
298 #define	AMI_RAID_SUBVENDOR_ID	0x101e
299 
300 #define	IO_MAP_REG	0x10
301 #define	MEM_MAP_REG	0x14
302 
303 #define	PCI_DFLT_LTNCY	0x40
304 #define	PCI_DFLT_LNSZ	0x10
305 
306 static int isp_pci_probe (device_t);
307 static int isp_pci_attach (device_t);
308 
309 
310 struct isp_pcisoftc {
311 	ispsoftc_t			pci_isp;
312 	device_t			pci_dev;
313 	struct resource *		pci_reg;
314 	bus_space_tag_t			pci_st;
315 	bus_space_handle_t		pci_sh;
316 	void *				ih;
317 	int16_t				pci_poff[_NREG_BLKS];
318 	bus_dma_tag_t			dmat;
319 	bus_dmamap_t			*dmaps;
320 };
321 
322 static device_method_t isp_pci_methods[] = {
323 	/* Device interface */
324 	DEVMETHOD(device_probe,		isp_pci_probe),
325 	DEVMETHOD(device_attach,	isp_pci_attach),
326 	{ 0, 0 }
327 };
328 static void isp_pci_intr(void *);
329 
330 static driver_t isp_pci_driver = {
331 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
332 };
333 static devclass_t isp_devclass;
334 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
335 #if __FreeBSD_version >= 700000
336 MODULE_DEPEND(isp, ispfw, 1, 1, 1);
337 MODULE_DEPEND(isp, firmware, 1, 1, 1);
338 #else
339 typedef void ispfwfunc(int, int, int, uint16_t **);
340 extern ispfwfunc *isp_get_firmware_p;
341 #endif
342 
343 static int
344 isp_pci_probe(device_t dev)
345 {
346         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
347 	case PCI_QLOGIC_ISP1020:
348 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
349 		break;
350 	case PCI_QLOGIC_ISP1080:
351 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
352 		break;
353 	case PCI_QLOGIC_ISP1240:
354 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
355 		break;
356 	case PCI_QLOGIC_ISP1280:
357 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
358 		break;
359 	case PCI_QLOGIC_ISP10160:
360 		device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
361 		break;
362 	case PCI_QLOGIC_ISP12160:
363 		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
364 			return (ENXIO);
365 		}
366 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
367 		break;
368 	case PCI_QLOGIC_ISP2100:
369 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
370 		break;
371 	case PCI_QLOGIC_ISP2200:
372 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
373 		break;
374 	case PCI_QLOGIC_ISP2300:
375 		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
376 		break;
377 	case PCI_QLOGIC_ISP2312:
378 		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
379 		break;
380 	case PCI_QLOGIC_ISP2322:
381 		device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
382 		break;
383 #if	0
384 	case PCI_QLOGIC_ISP2422:
385 		device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
386 		break;
387 #endif
388 	case PCI_QLOGIC_ISP6312:
389 		device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
390 		break;
391 	case PCI_QLOGIC_ISP6322:
392 		device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
393 		break;
394 	default:
395 		return (ENXIO);
396 	}
397 	if (isp_announced == 0 && bootverbose) {
398 		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
399 		    "Core Version %d.%d\n",
400 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
401 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
402 		isp_announced++;
403 	}
404 	/*
405 	 * XXXX: Here is where we might load the f/w module
406 	 * XXXX: (or increase a reference count to it).
407 	 */
408 	return (BUS_PROBE_DEFAULT);
409 }
410 
411 #if __FreeBSD_version < 500000
412 static void
413 isp_get_options(device_t dev, ispsoftc_t *isp)
414 {
415 	uint64_t wwn;
416 	int bitmap, unit;
417 
418 	unit = device_get_unit(dev);
419 	if (getenv_int("isp_disable", &bitmap)) {
420 		if (bitmap & (1 << unit)) {
421 			isp->isp_osinfo.disabled = 1;
422 			return;
423 		}
424 	}
425 
426 	if (getenv_int("isp_no_fwload", &bitmap)) {
427 		if (bitmap & (1 << unit))
428 			isp->isp_confopts |= ISP_CFG_NORELOAD;
429 	}
430 	if (getenv_int("isp_fwload", &bitmap)) {
431 		if (bitmap & (1 << unit))
432 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
433 	}
434 	if (getenv_int("isp_no_nvram", &bitmap)) {
435 		if (bitmap & (1 << unit))
436 			isp->isp_confopts |= ISP_CFG_NONVRAM;
437 	}
438 	if (getenv_int("isp_nvram", &bitmap)) {
439 		if (bitmap & (1 << unit))
440 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
441 	}
442 	if (getenv_int("isp_fcduplex", &bitmap)) {
443 		if (bitmap & (1 << unit))
444 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
445 	}
446 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
447 		if (bitmap & (1 << unit))
448 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
449 	}
450 	if (getenv_int("isp_nport", &bitmap)) {
451 		if (bitmap & (1 << unit))
452 			isp->isp_confopts |= ISP_CFG_NPORT;
453 	}
454 
455 	/*
456 	 * Because the resource_*_value functions can neither return
457 	 * 64 bit integer values, nor can they be directly coerced
458 	 * to interpret the right hand side of the assignment as
459 	 * you want them to interpret it, we have to force WWN
460 	 * hint replacement to specify WWN strings with a leading
461 	 * 'w' (e..g w50000000aaaa0001). Sigh.
462 	 */
463 	if (getenv_quad("isp_portwwn", &wwn)) {
464 		isp->isp_osinfo.default_port_wwn = wwn;
465 		isp->isp_confopts |= ISP_CFG_OWNWWPN;
466 	}
467 	if (isp->isp_osinfo.default_port_wwn == 0) {
468 		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
469 	}
470 
471 	if (getenv_quad("isp_nodewwn", &wwn)) {
472 		isp->isp_osinfo.default_node_wwn = wwn;
473 		isp->isp_confopts |= ISP_CFG_OWNWWNN;
474 	}
475 	if (isp->isp_osinfo.default_node_wwn == 0) {
476 		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
477 	}
478 
479 	bitmap = 0;
480 	(void) getenv_int("isp_debug", &bitmap);
481 	if (bitmap) {
482 		isp->isp_dblev = bitmap;
483 	} else {
484 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
485 	}
486 	if (bootverbose) {
487 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
488 	}
489 
490 #ifdef	ISP_FW_CRASH_DUMP
491 	bitmap = 0;
492 	if (getenv_int("isp_fw_dump_enable", &bitmap)) {
493 		if (bitmap & (1 << unit) {
494 			size_t amt = 0;
495 			if (IS_2200(isp)) {
496 				amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
497 			} else if (IS_23XX(isp)) {
498 				amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
499 			}
500 			if (amt) {
501 				FCPARAM(isp)->isp_dump_data =
502 				    malloc(amt, M_DEVBUF, M_WAITOK);
503 				memset(FCPARAM(isp)->isp_dump_data, 0, amt);
504 			} else {
505 				device_printf(dev,
506 				    "f/w crash dumps not supported for card\n");
507 			}
508 		}
509 	}
510 #endif
511 }
512 
513 static void
514 isp_get_pci_options(device_t dev, int *m1, int *m2)
515 {
516 	int bitmap;
517 	int unit = device_get_unit(dev);
518 
519 	*m1 = PCIM_CMD_MEMEN;
520 	*m2 = PCIM_CMD_PORTEN;
521 	if (getenv_int("isp_mem_map", &bitmap)) {
522 		if (bitmap & (1 << unit)) {
523 			*m1 = PCIM_CMD_MEMEN;
524 			*m2 = PCIM_CMD_PORTEN;
525 		}
526 	}
527 	bitmap = 0;
528 	if (getenv_int("isp_io_map", &bitmap)) {
529 		if (bitmap & (1 << unit)) {
530 			*m1 = PCIM_CMD_PORTEN;
531 			*m2 = PCIM_CMD_MEMEN;
532 		}
533 	}
534 }
535 #else
536 static void
537 isp_get_options(device_t dev, ispsoftc_t *isp)
538 {
539 	int tval;
540 	const char *sptr;
541 	/*
542 	 * Figure out if we're supposed to skip this one.
543 	 */
544 
545 	tval = 0;
546 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
547 	    "disable", &tval) == 0 && tval) {
548 		device_printf(dev, "disabled at user request\n");
549 		isp->isp_osinfo.disabled = 1;
550 		return;
551 	}
552 
553 	tval = -1;
554 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
555 	    "role", &tval) == 0 && tval != -1) {
556 		tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
557 		isp->isp_role = tval;
558 		device_printf(dev, "setting role to 0x%x\n", isp->isp_role);
559 	} else {
560 #ifdef	ISP_TARGET_MODE
561 		isp->isp_role = ISP_ROLE_TARGET;
562 #else
563 		isp->isp_role = ISP_DEFAULT_ROLES;
564 #endif
565 	}
566 
567 	tval = 0;
568         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
569             "fwload_disable", &tval) == 0 && tval != 0) {
570 		isp->isp_confopts |= ISP_CFG_NORELOAD;
571 	}
572 	tval = 0;
573         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
574             "ignore_nvram", &tval) == 0 && tval != 0) {
575 		isp->isp_confopts |= ISP_CFG_NONVRAM;
576 	}
577 	tval = 0;
578         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
579             "fullduplex", &tval) == 0 && tval != 0) {
580 		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
581 	}
582 #ifdef	ISP_FW_CRASH_DUMP
583 	tval = 0;
584         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
585             "fw_dump_enable", &tval) == 0 && tval != 0) {
586 		size_t amt = 0;
587 		if (IS_2200(isp)) {
588 			amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
589 		} else if (IS_23XX(isp)) {
590 			amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
591 		}
592 		if (amt) {
593 			FCPARAM(isp)->isp_dump_data =
594 			    malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
595 		} else {
596 			device_printf(dev,
597 			    "f/w crash dumps not supported for this model\n");
598 		}
599 	}
600 #endif
601 
602 	sptr = 0;
603         if (resource_string_value(device_get_name(dev), device_get_unit(dev),
604             "topology", (const char **) &sptr) == 0 && sptr != 0) {
605 		if (strcmp(sptr, "lport") == 0) {
606 			isp->isp_confopts |= ISP_CFG_LPORT;
607 		} else if (strcmp(sptr, "nport") == 0) {
608 			isp->isp_confopts |= ISP_CFG_NPORT;
609 		} else if (strcmp(sptr, "lport-only") == 0) {
610 			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
611 		} else if (strcmp(sptr, "nport-only") == 0) {
612 			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
613 		}
614 	}
615 
616 	/*
617 	 * Because the resource_*_value functions can neither return
618 	 * 64 bit integer values, nor can they be directly coerced
619 	 * to interpret the right hand side of the assignment as
620 	 * you want them to interpret it, we have to force WWN
621 	 * hint replacement to specify WWN strings with a leading
622 	 * 'w' (e..g w50000000aaaa0001). Sigh.
623 	 */
624 	sptr = 0;
625 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
626             "portwwn", (const char **) &sptr);
627 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
628 		char *eptr = 0;
629 		isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
630 		if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
631 			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
632 			isp->isp_osinfo.default_port_wwn = 0;
633 		} else {
634 			isp->isp_confopts |= ISP_CFG_OWNWWPN;
635 		}
636 	}
637 	if (isp->isp_osinfo.default_port_wwn == 0) {
638 		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
639 	}
640 
641 	sptr = 0;
642 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
643             "nodewwn", (const char **) &sptr);
644 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
645 		char *eptr = 0;
646 		isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
647 		if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
648 			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
649 			isp->isp_osinfo.default_node_wwn = 0;
650 		} else {
651 			isp->isp_confopts |= ISP_CFG_OWNWWNN;
652 		}
653 	}
654 	if (isp->isp_osinfo.default_node_wwn == 0) {
655 		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
656 	}
657 
658 	isp->isp_osinfo.default_id = -1;
659 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
660             "iid", &tval) == 0) {
661 		isp->isp_osinfo.default_id = tval;
662 		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
663 	}
664 	if (isp->isp_osinfo.default_id == -1) {
665 		if (IS_FC(isp)) {
666 			isp->isp_osinfo.default_id = 109;
667 		} else {
668 			isp->isp_osinfo.default_id = 7;
669 		}
670 	}
671 
672 	/*
673 	 * Set up logging levels.
674 	 */
675 	tval = 0;
676         (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
677             "debug", &tval);
678 	if (tval) {
679 		isp->isp_dblev = tval;
680 	} else {
681 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
682 	}
683 	if (bootverbose) {
684 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
685 	}
686 
687 }
688 
689 static void
690 isp_get_pci_options(device_t dev, int *m1, int *m2)
691 {
692 	int tval;
693 	/*
694 	 * Which we should try first - memory mapping or i/o mapping?
695 	 *
696 	 * We used to try memory first followed by i/o on alpha, otherwise
697 	 * the reverse, but we should just try memory first all the time now.
698 	 */
699 	*m1 = PCIM_CMD_MEMEN;
700 	*m2 = PCIM_CMD_PORTEN;
701 
702 	tval = 0;
703         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
704             "prefer_iomap", &tval) == 0 && tval != 0) {
705 		*m1 = PCIM_CMD_PORTEN;
706 		*m2 = PCIM_CMD_MEMEN;
707 	}
708 	tval = 0;
709         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
710             "prefer_memmap", &tval) == 0 && tval != 0) {
711 		*m1 = PCIM_CMD_MEMEN;
712 		*m2 = PCIM_CMD_PORTEN;
713 	}
714 }
715 #endif
716 
717 static int
718 isp_pci_attach(device_t dev)
719 {
720 	struct resource *regs, *irq;
721 	int rtp, rgd, iqd, m1, m2;
722 	uint32_t data, cmd, linesz, psize, basetype;
723 	struct isp_pcisoftc *pcs;
724 	ispsoftc_t *isp = NULL;
725 	struct ispmdvec *mdvp;
726 #if __FreeBSD_version >= 500000
727 	int locksetup = 0;
728 #endif
729 
730 	pcs = device_get_softc(dev);
731 	if (pcs == NULL) {
732 		device_printf(dev, "cannot get softc\n");
733 		return (ENOMEM);
734 	}
735 	memset(pcs, 0, sizeof (*pcs));
736 	pcs->pci_dev = dev;
737 	isp = &pcs->pci_isp;
738 
739 	/*
740 	 * Get Generic Options
741 	 */
742 	isp_get_options(dev, isp);
743 
744 	/*
745 	 * Check to see if options have us disabled
746 	 */
747 	if (isp->isp_osinfo.disabled) {
748 		/*
749 		 * But return zero to preserve unit numbering
750 		 */
751 		return (0);
752 	}
753 
754 	/*
755 	 * Get PCI options- which in this case are just mapping preferences.
756 	 */
757 	isp_get_pci_options(dev, &m1, &m2);
758 
759 
760 	linesz = PCI_DFLT_LNSZ;
761 	irq = regs = NULL;
762 	rgd = rtp = iqd = 0;
763 
764 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
765 	if (cmd & m1) {
766 		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
767 		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
768 		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
769 	}
770 	if (regs == NULL && (cmd & m2)) {
771 		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
772 		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
773 		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
774 	}
775 	if (regs == NULL) {
776 		device_printf(dev, "unable to map any ports\n");
777 		goto bad;
778 	}
779 	if (bootverbose) {
780 		device_printf(dev, "using %s space register mapping\n",
781 		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
782 	}
783 	pcs->pci_dev = dev;
784 	pcs->pci_reg = regs;
785 	pcs->pci_st = rman_get_bustag(regs);
786 	pcs->pci_sh = rman_get_bushandle(regs);
787 
788 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
789 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
790 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
791 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
792 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
793 	mdvp = &mdvec;
794 	basetype = ISP_HA_SCSI_UNKNOWN;
795 	psize = sizeof (sdparam);
796 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
797 		mdvp = &mdvec;
798 		basetype = ISP_HA_SCSI_UNKNOWN;
799 		psize = sizeof (sdparam);
800 	}
801 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
802 		mdvp = &mdvec_1080;
803 		basetype = ISP_HA_SCSI_1080;
804 		psize = sizeof (sdparam);
805 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
806 		    ISP1080_DMA_REGS_OFF;
807 	}
808 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
809 		mdvp = &mdvec_1080;
810 		basetype = ISP_HA_SCSI_1240;
811 		psize = 2 * sizeof (sdparam);
812 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
813 		    ISP1080_DMA_REGS_OFF;
814 	}
815 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
816 		mdvp = &mdvec_1080;
817 		basetype = ISP_HA_SCSI_1280;
818 		psize = 2 * sizeof (sdparam);
819 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
820 		    ISP1080_DMA_REGS_OFF;
821 	}
822 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
823 		mdvp = &mdvec_12160;
824 		basetype = ISP_HA_SCSI_10160;
825 		psize = sizeof (sdparam);
826 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
827 		    ISP1080_DMA_REGS_OFF;
828 	}
829 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
830 		mdvp = &mdvec_12160;
831 		basetype = ISP_HA_SCSI_12160;
832 		psize = 2 * sizeof (sdparam);
833 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
834 		    ISP1080_DMA_REGS_OFF;
835 	}
836 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
837 		mdvp = &mdvec_2100;
838 		basetype = ISP_HA_FC_2100;
839 		psize = sizeof (fcparam);
840 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
841 		    PCI_MBOX_REGS2100_OFF;
842 		if (pci_get_revid(dev) < 3) {
843 			/*
844 			 * XXX: Need to get the actual revision
845 			 * XXX: number of the 2100 FB. At any rate,
846 			 * XXX: lower cache line size for early revision
847 			 * XXX; boards.
848 			 */
849 			linesz = 1;
850 		}
851 	}
852 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
853 		mdvp = &mdvec_2200;
854 		basetype = ISP_HA_FC_2200;
855 		psize = sizeof (fcparam);
856 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
857 		    PCI_MBOX_REGS2100_OFF;
858 	}
859 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
860 		mdvp = &mdvec_2300;
861 		basetype = ISP_HA_FC_2300;
862 		psize = sizeof (fcparam);
863 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
864 		    PCI_MBOX_REGS2300_OFF;
865 	}
866 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
867 	    pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
868 		mdvp = &mdvec_2300;
869 		basetype = ISP_HA_FC_2312;
870 		psize = sizeof (fcparam);
871 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
872 		    PCI_MBOX_REGS2300_OFF;
873 	}
874 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 ||
875 	    pci_get_devid(dev) == PCI_QLOGIC_ISP6322) {
876 		mdvp = &mdvec_2300;
877 		basetype = ISP_HA_FC_2322;
878 		psize = sizeof (fcparam);
879 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
880 		    PCI_MBOX_REGS2300_OFF;
881 	}
882 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) {
883 		mdvp = &mdvec_2300;
884 		basetype = ISP_HA_FC_2422;
885 		psize = sizeof (fcparam);
886 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
887 		    PCI_MBOX_REGS2300_OFF;
888 	}
889 	isp = &pcs->pci_isp;
890 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
891 	if (isp->isp_param == NULL) {
892 		device_printf(dev, "cannot allocate parameter data\n");
893 		goto bad;
894 	}
895 	isp->isp_mdvec = mdvp;
896 	isp->isp_type = basetype;
897 	isp->isp_revision = pci_get_revid(dev);
898 	isp->isp_dev = dev;
899 
900 #if __FreeBSD_version >= 700000
901 	/*
902 	 * Try and find firmware for this device.
903 	 */
904 	{
905 		char fwname[32];
906 		unsigned int did = pci_get_device(dev);
907 
908 		/*
909 		 * Map a few pci ids to fw names
910 		 */
911 		switch (did) {
912 		case PCI_PRODUCT_QLOGIC_ISP1020:
913 			did = 0x1040;
914 			break;
915 		case PCI_PRODUCT_QLOGIC_ISP1240:
916 			did = 0x1080;
917 			break;
918 		case PCI_PRODUCT_QLOGIC_ISP10160:
919 		case PCI_PRODUCT_QLOGIC_ISP12160:
920 			did = 0x12160;
921 			break;
922 		case PCI_PRODUCT_QLOGIC_ISP6312:
923 		case PCI_PRODUCT_QLOGIC_ISP2312:
924 			did = 0x2300;
925 			break;
926 		case PCI_PRODUCT_QLOGIC_ISP6322:
927 			did = 0x2322;
928 			break;
929 		default:
930 			break;
931 		}
932 
933 		isp->isp_osinfo.fw = NULL;
934 		if (isp->isp_role & ISP_ROLE_TARGET) {
935 			snprintf(fwname, sizeof (fwname), "isp_%04x_it", did);
936 			isp->isp_osinfo.fw = firmware_get(fwname);
937 		}
938 		if (isp->isp_osinfo.fw == NULL) {
939 			snprintf(fwname, sizeof (fwname), "isp_%04x", did);
940 			isp->isp_osinfo.fw = firmware_get(fwname);
941 		}
942 		if (isp->isp_osinfo.fw != NULL) {
943 			union {
944 				const void *fred;
945 				uint16_t *bob;
946 			} u;
947 			u.fred = isp->isp_osinfo.fw->data;
948 			isp->isp_mdvec->dv_ispfw = u.bob;
949 		}
950 	}
951 #else
952 	if (isp_get_firmware_p) {
953 		int device = (int) pci_get_device(dev);
954 #ifdef	ISP_TARGET_MODE
955 		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
956 #else
957 		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
958 #endif
959 	}
960 #endif
961 
962 	/*
963 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
964 	 * are set.
965 	 */
966 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
967 		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
968 
969 	if (IS_2300(isp)) {	/* per QLogic errata */
970 		cmd &= ~PCIM_CMD_INVEN;
971 	}
972 
973 	if (IS_23XX(isp)) {
974 		/*
975 		 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
976 		 */
977 		isp->isp_touched = 1;
978 
979 	}
980 
981 	if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
982 		cmd &= ~PCIM_CMD_INTX_DISABLE;
983 	}
984 
985 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
986 
987 	/*
988 	 * Make sure the Cache Line Size register is set sensibly.
989 	 */
990 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
991 	if (data != linesz) {
992 		data = PCI_DFLT_LNSZ;
993 		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
994 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
995 	}
996 
997 	/*
998 	 * Make sure the Latency Timer is sane.
999 	 */
1000 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
1001 	if (data < PCI_DFLT_LTNCY) {
1002 		data = PCI_DFLT_LTNCY;
1003 		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
1004 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
1005 	}
1006 
1007 	/*
1008 	 * Make sure we've disabled the ROM.
1009 	 */
1010 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
1011 	data &= ~1;
1012 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
1013 
1014 	iqd = 0;
1015 	irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
1016 	    RF_ACTIVE | RF_SHAREABLE);
1017 	if (irq == NULL) {
1018 		device_printf(dev, "could not allocate interrupt\n");
1019 		goto bad;
1020 	}
1021 
1022 #if __FreeBSD_version >= 500000
1023 	/* Make sure the lock is set up. */
1024 	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
1025 	locksetup++;
1026 #endif
1027 
1028 	if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
1029 		device_printf(dev, "could not setup interrupt\n");
1030 		goto bad;
1031 	}
1032 
1033 	/*
1034 	 * Last minute checks...
1035 	 */
1036 	if (IS_23XX(isp)) {
1037 		isp->isp_port = pci_get_function(dev);
1038 	}
1039 
1040 	/*
1041 	 * Make sure we're in reset state.
1042 	 */
1043 	ISP_LOCK(isp);
1044 	isp_reset(isp);
1045 	if (isp->isp_state != ISP_RESETSTATE) {
1046 		ISP_UNLOCK(isp);
1047 		goto bad;
1048 	}
1049 	isp_init(isp);
1050 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
1051 		isp_uninit(isp);
1052 		ISP_UNLOCK(isp);
1053 		goto bad;
1054 	}
1055 	isp_attach(isp);
1056 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
1057 		isp_uninit(isp);
1058 		ISP_UNLOCK(isp);
1059 		goto bad;
1060 	}
1061 	/*
1062 	 * XXXX: Here is where we might unload the f/w module
1063 	 * XXXX: (or decrease the reference count to it).
1064 	 */
1065 	ISP_UNLOCK(isp);
1066 	return (0);
1067 
1068 bad:
1069 
1070 	if (pcs && pcs->ih) {
1071 		(void) bus_teardown_intr(dev, irq, pcs->ih);
1072 	}
1073 
1074 #if __FreeBSD_version >= 500000
1075 	if (locksetup && isp) {
1076 		mtx_destroy(&isp->isp_osinfo.lock);
1077 	}
1078 #endif
1079 
1080 	if (irq) {
1081 		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
1082 	}
1083 
1084 
1085 	if (regs) {
1086 		(void) bus_release_resource(dev, rtp, rgd, regs);
1087 	}
1088 
1089 	if (pcs) {
1090 		if (pcs->pci_isp.isp_param) {
1091 #ifdef	ISP_FW_CRASH_DUMP
1092 			if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) {
1093 				free(FCPARAM(isp)->isp_dump_data, M_DEVBUF);
1094 			}
1095 #endif
1096 			free(pcs->pci_isp.isp_param, M_DEVBUF);
1097 		}
1098 	}
1099 
1100 	/*
1101 	 * XXXX: Here is where we might unload the f/w module
1102 	 * XXXX: (or decrease the reference count to it).
1103 	 */
1104 	return (ENXIO);
1105 }
1106 
1107 static void
1108 isp_pci_intr(void *arg)
1109 {
1110 	ispsoftc_t *isp = arg;
1111 	uint16_t isr, sema, mbox;
1112 
1113 	ISP_LOCK(isp);
1114 	isp->isp_intcnt++;
1115 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1116 		isp->isp_intbogus++;
1117 	} else {
1118 		int iok = isp->isp_osinfo.intsok;
1119 		isp->isp_osinfo.intsok = 0;
1120 		isp_intr(isp, isr, sema, mbox);
1121 		isp->isp_osinfo.intsok = iok;
1122 	}
1123 	ISP_UNLOCK(isp);
1124 }
1125 
1126 
1127 #define	IspVirt2Off(a, x)	\
1128 	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1129 	_BLK_REG_SHFT] + ((x) & 0xff))
1130 
1131 #define	BXR2(pcs, off)		\
1132 	bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
1133 #define	BXW2(pcs, off, v)	\
1134 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
1135 
1136 
1137 static __inline int
1138 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1139 {
1140 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1141 	uint16_t val0, val1;
1142 	int i = 0;
1143 
1144 	do {
1145 		val0 = BXR2(pcs, IspVirt2Off(isp, off));
1146 		val1 = BXR2(pcs, IspVirt2Off(isp, off));
1147 	} while (val0 != val1 && ++i < 1000);
1148 	if (val0 != val1) {
1149 		return (1);
1150 	}
1151 	*rp = val0;
1152 	return (0);
1153 }
1154 
1155 static int
1156 isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp,
1157     uint16_t *semap, uint16_t *mbp)
1158 {
1159 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1160 	uint16_t isr, sema;
1161 
1162 	if (IS_2100(isp)) {
1163 		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1164 		    return (0);
1165 		}
1166 		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1167 		    return (0);
1168 		}
1169 	} else {
1170 		isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
1171 		sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
1172 	}
1173 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1174 	isr &= INT_PENDING_MASK(isp);
1175 	sema &= BIU_SEMA_LOCK;
1176 	if (isr == 0 && sema == 0) {
1177 		return (0);
1178 	}
1179 	*isrp = isr;
1180 	if ((*semap = sema) != 0) {
1181 		if (IS_2100(isp)) {
1182 			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
1183 				return (0);
1184 			}
1185 		} else {
1186 			*mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
1187 		}
1188 	}
1189 	return (1);
1190 }
1191 
1192 static int
1193 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp,
1194     uint16_t *semap, uint16_t *mbox0p)
1195 {
1196 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1197 	uint16_t hccr;
1198 	uint32_t r2hisr;
1199 
1200 	if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1201 		*isrp = 0;
1202 		return (0);
1203 	}
1204 	r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
1205 	    IspVirt2Off(pcs, BIU_R2HSTSLO));
1206 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1207 	if ((r2hisr & BIU_R2HST_INTR) == 0) {
1208 		*isrp = 0;
1209 		return (0);
1210 	}
1211 	switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
1212 	case ISPR2HST_ROM_MBX_OK:
1213 	case ISPR2HST_ROM_MBX_FAIL:
1214 	case ISPR2HST_MBX_OK:
1215 	case ISPR2HST_MBX_FAIL:
1216 	case ISPR2HST_ASYNC_EVENT:
1217 		*isrp = r2hisr & 0xffff;
1218 		*mbox0p = (r2hisr >> 16);
1219 		*semap = 1;
1220 		return (1);
1221 	case ISPR2HST_RIO_16:
1222 		*isrp = r2hisr & 0xffff;
1223 		*mbox0p = ASYNC_RIO1;
1224 		*semap = 1;
1225 		return (1);
1226 	case ISPR2HST_FPOST:
1227 		*isrp = r2hisr & 0xffff;
1228 		*mbox0p = ASYNC_CMD_CMPLT;
1229 		*semap = 1;
1230 		return (1);
1231 	case ISPR2HST_FPOST_CTIO:
1232 		*isrp = r2hisr & 0xffff;
1233 		*mbox0p = ASYNC_CTIO_DONE;
1234 		*semap = 1;
1235 		return (1);
1236 	case ISPR2HST_RSPQ_UPDATE:
1237 		*isrp = r2hisr & 0xffff;
1238 		*mbox0p = 0;
1239 		*semap = 0;
1240 		return (1);
1241 	default:
1242 		hccr = ISP_READ(isp, HCCR);
1243 		if (hccr & HCCR_PAUSE) {
1244 			ISP_WRITE(isp, HCCR, HCCR_RESET);
1245 			isp_prt(isp, ISP_LOGERR,
1246 			    "RISC paused at interrupt (%x->%x\n", hccr,
1247 			    ISP_READ(isp, HCCR));
1248 		} else {
1249 			isp_prt(isp, ISP_LOGERR, "unknown interrerupt 0x%x\n",
1250 			    r2hisr);
1251 		}
1252 		return (0);
1253 	}
1254 }
1255 
1256 static uint16_t
1257 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1258 {
1259 	uint16_t rv;
1260 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1261 	int oldconf = 0;
1262 
1263 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1264 		/*
1265 		 * We will assume that someone has paused the RISC processor.
1266 		 */
1267 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1268 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1269 		    oldconf | BIU_PCI_CONF1_SXP);
1270 	}
1271 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1272 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1273 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1274 	}
1275 	return (rv);
1276 }
1277 
1278 static void
1279 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint16_t val)
1280 {
1281 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1282 	int oldconf = 0;
1283 
1284 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1285 		/*
1286 		 * We will assume that someone has paused the RISC processor.
1287 		 */
1288 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1289 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1290 		    oldconf | BIU_PCI_CONF1_SXP);
1291 	}
1292 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1293 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1294 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1295 	}
1296 }
1297 
1298 static uint16_t
1299 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1300 {
1301 	uint16_t rv, oc = 0;
1302 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1303 
1304 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1305 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1306 		uint16_t tc;
1307 		/*
1308 		 * We will assume that someone has paused the RISC processor.
1309 		 */
1310 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1311 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1312 		if (regoff & SXP_BANK1_SELECT)
1313 			tc |= BIU_PCI1080_CONF1_SXP1;
1314 		else
1315 			tc |= BIU_PCI1080_CONF1_SXP0;
1316 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1317 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1318 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1319 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1320 		    oc | BIU_PCI1080_CONF1_DMA);
1321 	}
1322 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1323 	if (oc) {
1324 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1325 	}
1326 	return (rv);
1327 }
1328 
1329 static void
1330 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint16_t val)
1331 {
1332 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1333 	int oc = 0;
1334 
1335 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1336 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1337 		uint16_t tc;
1338 		/*
1339 		 * We will assume that someone has paused the RISC processor.
1340 		 */
1341 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1342 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1343 		if (regoff & SXP_BANK1_SELECT)
1344 			tc |= BIU_PCI1080_CONF1_SXP1;
1345 		else
1346 			tc |= BIU_PCI1080_CONF1_SXP0;
1347 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1348 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1349 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1350 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1351 		    oc | BIU_PCI1080_CONF1_DMA);
1352 	}
1353 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1354 	if (oc) {
1355 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1356 	}
1357 }
1358 
1359 
1360 struct imush {
1361 	ispsoftc_t *isp;
1362 	int error;
1363 };
1364 
1365 static void imc(void *, bus_dma_segment_t *, int, int);
1366 
1367 static void
1368 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1369 {
1370 	struct imush *imushp = (struct imush *) arg;
1371 	if (error) {
1372 		imushp->error = error;
1373 	} else {
1374 		ispsoftc_t *isp =imushp->isp;
1375 		bus_addr_t addr = segs->ds_addr;
1376 
1377 		isp->isp_rquest_dma = addr;
1378 		addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1379 		isp->isp_result_dma = addr;
1380 		if (IS_FC(isp)) {
1381 			addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1382 			FCPARAM(isp)->isp_scdma = addr;
1383 		}
1384 	}
1385 }
1386 
1387 /*
1388  * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1389  */
1390 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1391 
1392 #if __FreeBSD_version < 500000
1393 #define	isp_dma_tag_create	bus_dma_tag_create
1394 #else
1395 #define	isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z)	\
1396 	bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \
1397 	    busdma_lock_mutex, &Giant, z)
1398 #endif
1399 
1400 static int
1401 isp_pci_mbxdma(ispsoftc_t *isp)
1402 {
1403 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1404 	caddr_t base;
1405 	uint32_t len;
1406 	int i, error, ns;
1407 	bus_size_t slim;	/* segment size */
1408 	bus_addr_t llim;	/* low limit of unavailable dma */
1409 	bus_addr_t hlim;	/* high limit of unavailable dma */
1410 	struct imush im;
1411 
1412 	/*
1413 	 * Already been here? If so, leave...
1414 	 */
1415 	if (isp->isp_rquest) {
1416 		return (0);
1417 	}
1418 
1419 	hlim = BUS_SPACE_MAXADDR;
1420 	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1421 		slim = (bus_size_t) (1ULL << 32);
1422 		llim = BUS_SPACE_MAXADDR;
1423 	} else {
1424 		llim = BUS_SPACE_MAXADDR_32BIT;
1425 		slim = (1 << 24);
1426 	}
1427 
1428 	/*
1429 	 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1430 	 */
1431 #ifdef	ISP_TARGET_MODE
1432 	if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) {
1433 		isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet");
1434 		return (1);
1435 	}
1436 #endif
1437 
1438 	ISP_UNLOCK(isp);
1439 	if (isp_dma_tag_create(NULL, 1, slim, llim, hlim,
1440 	    NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) {
1441 		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1442 		ISP_LOCK(isp);
1443 		return (1);
1444 	}
1445 
1446 
1447 	len = sizeof (XS_T **) * isp->isp_maxcmds;
1448 	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1449 	if (isp->isp_xflist == NULL) {
1450 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1451 		ISP_LOCK(isp);
1452 		return (1);
1453 	}
1454 #ifdef	ISP_TARGET_MODE
1455 	len = sizeof (void **) * isp->isp_maxcmds;
1456 	isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1457 	if (isp->isp_tgtlist == NULL) {
1458 		isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1459 		ISP_LOCK(isp);
1460 		return (1);
1461 	}
1462 #endif
1463 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1464 	pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
1465 	if (pcs->dmaps == NULL) {
1466 		isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1467 		free(isp->isp_xflist, M_DEVBUF);
1468 #ifdef	ISP_TARGET_MODE
1469 		free(isp->isp_tgtlist, M_DEVBUF);
1470 #endif
1471 		ISP_LOCK(isp);
1472 		return (1);
1473 	}
1474 
1475 	/*
1476 	 * Allocate and map the request, result queues, plus FC scratch area.
1477 	 */
1478 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1479 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1480 	if (IS_FC(isp)) {
1481 		len += ISP2100_SCRLEN;
1482 	}
1483 
1484 	ns = (len / PAGE_SIZE) + 1;
1485 	/*
1486 	 * Create a tag for the control spaces- force it to within 32 bits.
1487 	 */
1488 	if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim,
1489 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1490 	    NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1491 		isp_prt(isp, ISP_LOGERR,
1492 		    "cannot create a dma tag for control spaces");
1493 		free(pcs->dmaps, M_DEVBUF);
1494 		free(isp->isp_xflist, M_DEVBUF);
1495 #ifdef	ISP_TARGET_MODE
1496 		free(isp->isp_tgtlist, M_DEVBUF);
1497 #endif
1498 		ISP_LOCK(isp);
1499 		return (1);
1500 	}
1501 
1502 	if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1503 	    &isp->isp_cdmap) != 0) {
1504 		isp_prt(isp, ISP_LOGERR,
1505 		    "cannot allocate %d bytes of CCB memory", len);
1506 		bus_dma_tag_destroy(isp->isp_cdmat);
1507 		free(isp->isp_xflist, M_DEVBUF);
1508 #ifdef	ISP_TARGET_MODE
1509 		free(isp->isp_tgtlist, M_DEVBUF);
1510 #endif
1511 		free(pcs->dmaps, M_DEVBUF);
1512 		ISP_LOCK(isp);
1513 		return (1);
1514 	}
1515 
1516 	for (i = 0; i < isp->isp_maxcmds; i++) {
1517 		error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1518 		if (error) {
1519 			isp_prt(isp, ISP_LOGERR,
1520 			    "error %d creating per-cmd DMA maps", error);
1521 			while (--i >= 0) {
1522 				bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1523 			}
1524 			goto bad;
1525 		}
1526 	}
1527 
1528 	im.isp = isp;
1529 	im.error = 0;
1530 	bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1531 	if (im.error) {
1532 		isp_prt(isp, ISP_LOGERR,
1533 		    "error %d loading dma map for control areas", im.error);
1534 		goto bad;
1535 	}
1536 
1537 	isp->isp_rquest = base;
1538 	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1539 	isp->isp_result = base;
1540 	if (IS_FC(isp)) {
1541 		base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1542 		FCPARAM(isp)->isp_scratch = base;
1543 	}
1544 	ISP_LOCK(isp);
1545 	return (0);
1546 
1547 bad:
1548 	bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1549 	bus_dma_tag_destroy(isp->isp_cdmat);
1550 	free(isp->isp_xflist, M_DEVBUF);
1551 #ifdef	ISP_TARGET_MODE
1552 	free(isp->isp_tgtlist, M_DEVBUF);
1553 #endif
1554 	free(pcs->dmaps, M_DEVBUF);
1555 	ISP_LOCK(isp);
1556 	isp->isp_rquest = NULL;
1557 	return (1);
1558 }
1559 
1560 typedef struct {
1561 	ispsoftc_t *isp;
1562 	void *cmd_token;
1563 	void *rq;
1564 	uint16_t *nxtip;
1565 	uint16_t optr;
1566 	int error;
1567 } mush_t;
1568 
1569 #define	MUSHERR_NOQENTRIES	-2
1570 
1571 #ifdef	ISP_TARGET_MODE
1572 /*
1573  * We need to handle DMA for target mode differently from initiator mode.
1574  *
1575  * DMA mapping and construction and submission of CTIO Request Entries
1576  * and rendevous for completion are very tightly coupled because we start
1577  * out by knowing (per platform) how much data we have to move, but we
1578  * don't know, up front, how many DMA mapping segments will have to be used
1579  * cover that data, so we don't know how many CTIO Request Entries we
1580  * will end up using. Further, for performance reasons we may want to
1581  * (on the last CTIO for Fibre Channel), send status too (if all went well).
1582  *
1583  * The standard vector still goes through isp_pci_dmasetup, but the callback
1584  * for the DMA mapping routines comes here instead with the whole transfer
1585  * mapped and a pointer to a partially filled in already allocated request
1586  * queue entry. We finish the job.
1587  */
1588 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1589 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1590 
1591 #define	STATUS_WITH_DATA	1
1592 
1593 static void
1594 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1595 {
1596 	mush_t *mp;
1597 	struct ccb_scsiio *csio;
1598 	ispsoftc_t *isp;
1599 	struct isp_pcisoftc *pcs;
1600 	bus_dmamap_t *dp;
1601 	ct_entry_t *cto, *qe;
1602 	uint8_t scsi_status;
1603 	uint16_t curi, nxti, handle;
1604 	uint32_t sflags;
1605 	int32_t resid;
1606 	int nth_ctio, nctios, send_status;
1607 
1608 	mp = (mush_t *) arg;
1609 	if (error) {
1610 		mp->error = error;
1611 		return;
1612 	}
1613 
1614 	isp = mp->isp;
1615 	csio = mp->cmd_token;
1616 	cto = mp->rq;
1617 	curi = isp->isp_reqidx;
1618 	qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1619 
1620 	cto->ct_xfrlen = 0;
1621 	cto->ct_seg_count = 0;
1622 	cto->ct_header.rqs_entry_count = 1;
1623 	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1624 
1625 	if (nseg == 0) {
1626 		cto->ct_header.rqs_seqno = 1;
1627 		isp_prt(isp, ISP_LOGTDEBUG1,
1628 		    "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1629 		    cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1630 		    cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1631 		    cto->ct_scsi_status, cto->ct_resid);
1632 		ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1633 		isp_put_ctio(isp, cto, qe);
1634 		return;
1635 	}
1636 
1637 	nctios = nseg / ISP_RQDSEG;
1638 	if (nseg % ISP_RQDSEG) {
1639 		nctios++;
1640 	}
1641 
1642 	/*
1643 	 * Save syshandle, and potentially any SCSI status, which we'll
1644 	 * reinsert on the last CTIO we're going to send.
1645 	 */
1646 
1647 	handle = cto->ct_syshandle;
1648 	cto->ct_syshandle = 0;
1649 	cto->ct_header.rqs_seqno = 0;
1650 	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1651 
1652 	if (send_status) {
1653 		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1654 		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1655 		/*
1656 		 * Preserve residual.
1657 		 */
1658 		resid = cto->ct_resid;
1659 
1660 		/*
1661 		 * Save actual SCSI status.
1662 		 */
1663 		scsi_status = cto->ct_scsi_status;
1664 
1665 #ifndef	STATUS_WITH_DATA
1666 		sflags |= CT_NO_DATA;
1667 		/*
1668 		 * We can't do a status at the same time as a data CTIO, so
1669 		 * we need to synthesize an extra CTIO at this level.
1670 		 */
1671 		nctios++;
1672 #endif
1673 	} else {
1674 		sflags = scsi_status = resid = 0;
1675 	}
1676 
1677 	cto->ct_resid = 0;
1678 	cto->ct_scsi_status = 0;
1679 
1680 	pcs = (struct isp_pcisoftc *)isp;
1681 	dp = &pcs->dmaps[isp_handle_index(handle)];
1682 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1683 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1684 	} else {
1685 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1686 	}
1687 
1688 	nxti = *mp->nxtip;
1689 
1690 	for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1691 		int seglim;
1692 
1693 		seglim = nseg;
1694 		if (seglim) {
1695 			int seg;
1696 
1697 			if (seglim > ISP_RQDSEG)
1698 				seglim = ISP_RQDSEG;
1699 
1700 			for (seg = 0; seg < seglim; seg++, nseg--) {
1701 				/*
1702 				 * Unlike normal initiator commands, we don't
1703 				 * do any swizzling here.
1704 				 */
1705 				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1706 				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1707 				cto->ct_xfrlen += dm_segs->ds_len;
1708 				dm_segs++;
1709 			}
1710 			cto->ct_seg_count = seg;
1711 		} else {
1712 			/*
1713 			 * This case should only happen when we're sending an
1714 			 * extra CTIO with final status.
1715 			 */
1716 			if (send_status == 0) {
1717 				isp_prt(isp, ISP_LOGWARN,
1718 				    "tdma_mk ran out of segments");
1719 				mp->error = EINVAL;
1720 				return;
1721 			}
1722 		}
1723 
1724 		/*
1725 		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1726 		 * ct_tagtype, and ct_timeout have been carried over
1727 		 * unchanged from what our caller had set.
1728 		 *
1729 		 * The dataseg fields and the seg_count fields we just got
1730 		 * through setting. The data direction we've preserved all
1731 		 * along and only clear it if we're now sending status.
1732 		 */
1733 
1734 		if (nth_ctio == nctios - 1) {
1735 			/*
1736 			 * We're the last in a sequence of CTIOs, so mark
1737 			 * this CTIO and save the handle to the CCB such that
1738 			 * when this CTIO completes we can free dma resources
1739 			 * and do whatever else we need to do to finish the
1740 			 * rest of the command. We *don't* give this to the
1741 			 * firmware to work on- the caller will do that.
1742 			 */
1743 
1744 			cto->ct_syshandle = handle;
1745 			cto->ct_header.rqs_seqno = 1;
1746 
1747 			if (send_status) {
1748 				cto->ct_scsi_status = scsi_status;
1749 				cto->ct_flags |= sflags;
1750 				cto->ct_resid = resid;
1751 			}
1752 			if (send_status) {
1753 				isp_prt(isp, ISP_LOGTDEBUG1,
1754 				    "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1755 				    "scsi status %x resid %d",
1756 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1757 				    cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1758 				    cto->ct_scsi_status, cto->ct_resid);
1759 			} else {
1760 				isp_prt(isp, ISP_LOGTDEBUG1,
1761 				    "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1762 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1763 				    cto->ct_iid, cto->ct_tag_val,
1764 				    cto->ct_flags);
1765 			}
1766 			isp_put_ctio(isp, cto, qe);
1767 			ISP_TDQE(isp, "last tdma_mk", curi, cto);
1768 			if (nctios > 1) {
1769 				MEMORYBARRIER(isp, SYNC_REQUEST,
1770 				    curi, QENTRY_LEN);
1771 			}
1772 		} else {
1773 			ct_entry_t *oqe = qe;
1774 
1775 			/*
1776 			 * Make sure syshandle fields are clean
1777 			 */
1778 			cto->ct_syshandle = 0;
1779 			cto->ct_header.rqs_seqno = 0;
1780 
1781 			isp_prt(isp, ISP_LOGTDEBUG1,
1782 			    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1783 			    cto->ct_fwhandle, csio->ccb_h.target_lun,
1784 			    cto->ct_iid, cto->ct_flags);
1785 
1786 			/*
1787 			 * Get a new CTIO
1788 			 */
1789 			qe = (ct_entry_t *)
1790 			    ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1791 			nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1792 			if (nxti == mp->optr) {
1793 				isp_prt(isp, ISP_LOGTDEBUG0,
1794 				    "Queue Overflow in tdma_mk");
1795 				mp->error = MUSHERR_NOQENTRIES;
1796 				return;
1797 			}
1798 
1799 			/*
1800 			 * Now that we're done with the old CTIO,
1801 			 * flush it out to the request queue.
1802 			 */
1803 			ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1804 			isp_put_ctio(isp, cto, oqe);
1805 			if (nth_ctio != 0) {
1806 				MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1807 				    QENTRY_LEN);
1808 			}
1809 			curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1810 
1811 			/*
1812 			 * Reset some fields in the CTIO so we can reuse
1813 			 * for the next one we'll flush to the request
1814 			 * queue.
1815 			 */
1816 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1817 			cto->ct_header.rqs_entry_count = 1;
1818 			cto->ct_header.rqs_flags = 0;
1819 			cto->ct_status = 0;
1820 			cto->ct_scsi_status = 0;
1821 			cto->ct_xfrlen = 0;
1822 			cto->ct_resid = 0;
1823 			cto->ct_seg_count = 0;
1824 			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1825 		}
1826 	}
1827 	*mp->nxtip = nxti;
1828 }
1829 
1830 /*
1831  * We don't have to do multiple CTIOs here. Instead, we can just do
1832  * continuation segments as needed. This greatly simplifies the code
1833  * improves performance.
1834  */
1835 
1836 static void
1837 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1838 {
1839 	mush_t *mp;
1840 	struct ccb_scsiio *csio;
1841 	ispsoftc_t *isp;
1842 	ct2_entry_t *cto, *qe;
1843 	uint16_t curi, nxti;
1844 	ispds_t *ds;
1845 	ispds64_t *ds64;
1846 	int segcnt, seglim;
1847 
1848 	mp = (mush_t *) arg;
1849 	if (error) {
1850 		mp->error = error;
1851 		return;
1852 	}
1853 
1854 	isp = mp->isp;
1855 	csio = mp->cmd_token;
1856 	cto = mp->rq;
1857 
1858 	curi = isp->isp_reqidx;
1859 	qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1860 
1861 	if (nseg == 0) {
1862 		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1863 			isp_prt(isp, ISP_LOGWARN,
1864 			    "dma2_tgt_fc, a status CTIO2 without MODE1 "
1865 			    "set (0x%x)", cto->ct_flags);
1866 			mp->error = EINVAL;
1867 			return;
1868 		}
1869 		/*
1870 		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1871 		 * flags to NO DATA and clear relative offset flags.
1872 		 * We preserve the ct_resid and the response area.
1873 		 */
1874 		cto->ct_header.rqs_seqno = 1;
1875 		cto->ct_seg_count = 0;
1876 		cto->ct_reloff = 0;
1877 		isp_prt(isp, ISP_LOGTDEBUG1,
1878 		    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1879 		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1880 		    cto->ct_iid, cto->ct_flags, cto->ct_status,
1881 		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1882 		if (IS_2KLOGIN(isp)) {
1883 			isp_put_ctio2e(isp,
1884 			    (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
1885 		} else {
1886 			isp_put_ctio2(isp, cto, qe);
1887 		}
1888 		ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1889 		return;
1890 	}
1891 
1892 	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1893 		isp_prt(isp, ISP_LOGERR,
1894 		    "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1895 		    "(0x%x)", cto->ct_flags);
1896 		mp->error = EINVAL;
1897 		return;
1898 	}
1899 
1900 
1901 	nxti = *mp->nxtip;
1902 
1903 	/*
1904 	 * Check to see if we need to DAC addressing or not.
1905 	 *
1906 	 * Any address that's over the 4GB boundary causes this
1907 	 * to happen.
1908 	 */
1909 	segcnt = nseg;
1910 	if (sizeof (bus_addr_t) > 4) {
1911 		for (segcnt = 0; segcnt < nseg; segcnt++) {
1912 			uint64_t addr = dm_segs[segcnt].ds_addr;
1913 			if (addr >= 0x100000000LL) {
1914 				break;
1915 			}
1916 		}
1917 	}
1918 	if (segcnt != nseg) {
1919 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3;
1920 		seglim = ISP_RQDSEG_T3;
1921 		ds64 = &cto->rsp.m0.ct_dataseg64[0];
1922 		ds = NULL;
1923 	} else {
1924 		seglim = ISP_RQDSEG_T2;
1925 		ds64 = NULL;
1926 		ds = &cto->rsp.m0.ct_dataseg[0];
1927 	}
1928 	cto->ct_seg_count = 0;
1929 
1930 	/*
1931 	 * Set up the CTIO2 data segments.
1932 	 */
1933 	for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg;
1934 	    cto->ct_seg_count++, segcnt++) {
1935 		if (ds64) {
1936 			ds64->ds_basehi =
1937 			    ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
1938 			ds64->ds_base = dm_segs[segcnt].ds_addr;
1939 			ds64->ds_count = dm_segs[segcnt].ds_len;
1940 			ds64++;
1941 		} else {
1942 			ds->ds_base = dm_segs[segcnt].ds_addr;
1943 			ds->ds_count = dm_segs[segcnt].ds_len;
1944 			ds++;
1945 		}
1946 		cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1947 #if __FreeBSD_version < 500000
1948 		isp_prt(isp, ISP_LOGTDEBUG1,
1949 		    "isp_send_ctio2: ent0[%d]0x%llx:%llu",
1950 		    cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr,
1951 		    (uint64_t)dm_segs[segcnt].ds_len);
1952 #else
1953 		isp_prt(isp, ISP_LOGTDEBUG1,
1954 		    "isp_send_ctio2: ent0[%d]0x%jx:%ju",
1955 		    cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
1956 		    (uintmax_t)dm_segs[segcnt].ds_len);
1957 #endif
1958 	}
1959 
1960 	while (segcnt < nseg) {
1961 		uint16_t curip;
1962 		int seg;
1963 		ispcontreq_t local, *crq = &local, *qep;
1964 
1965 		qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1966 		curip = nxti;
1967 		nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1968 		if (nxti == mp->optr) {
1969 			ISP_UNLOCK(isp);
1970 			isp_prt(isp, ISP_LOGTDEBUG0,
1971 			    "tdma_mkfc: request queue overflow");
1972 			mp->error = MUSHERR_NOQENTRIES;
1973 			return;
1974 		}
1975 		cto->ct_header.rqs_entry_count++;
1976 		MEMZERO((void *)crq, sizeof (*crq));
1977 		crq->req_header.rqs_entry_count = 1;
1978 		if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) {
1979 			seglim = ISP_CDSEG64;
1980 			ds = NULL;
1981 			ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0];
1982 			crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1983 		} else {
1984 			seglim = ISP_CDSEG;
1985 			ds = &crq->req_dataseg[0];
1986 			ds64 = NULL;
1987 			crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1988 		}
1989 		for (seg = 0; segcnt < nseg && seg < seglim;
1990 		    segcnt++, seg++) {
1991 			if (ds64) {
1992 				ds64->ds_basehi =
1993 				  ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
1994 				ds64->ds_base = dm_segs[segcnt].ds_addr;
1995 				ds64->ds_count = dm_segs[segcnt].ds_len;
1996 				ds64++;
1997 			} else {
1998 				ds->ds_base = dm_segs[segcnt].ds_addr;
1999 				ds->ds_count = dm_segs[segcnt].ds_len;
2000 				ds++;
2001 			}
2002 #if __FreeBSD_version < 500000
2003 			isp_prt(isp, ISP_LOGTDEBUG1,
2004 			    "isp_send_ctio2: ent%d[%d]%llx:%llu",
2005 			    cto->ct_header.rqs_entry_count-1, seg,
2006 			    (uint64_t)dm_segs[segcnt].ds_addr,
2007 			    (uint64_t)dm_segs[segcnt].ds_len);
2008 #else
2009 			isp_prt(isp, ISP_LOGTDEBUG1,
2010 			    "isp_send_ctio2: ent%d[%d]%jx:%ju",
2011 			    cto->ct_header.rqs_entry_count-1, seg,
2012 			    (uintmax_t)dm_segs[segcnt].ds_addr,
2013 			    (uintmax_t)dm_segs[segcnt].ds_len);
2014 #endif
2015 			cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2016 			cto->ct_seg_count++;
2017 		}
2018 		MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
2019 		isp_put_cont_req(isp, crq, qep);
2020 		ISP_TDQE(isp, "cont entry", curi, qep);
2021 	}
2022 
2023 	/*
2024 	 * No do final twiddling for the CTIO itself.
2025 	 */
2026 	cto->ct_header.rqs_seqno = 1;
2027 	isp_prt(isp, ISP_LOGTDEBUG1,
2028 	    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
2029 	    cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
2030 	    cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
2031 	    cto->ct_resid);
2032 	if (IS_2KLOGIN(isp))
2033 		isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2034 	else
2035 		isp_put_ctio2(isp, cto, qe);
2036 	ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
2037 	*mp->nxtip = nxti;
2038 }
2039 #endif
2040 
2041 static void dma2_a64(void *, bus_dma_segment_t *, int, int);
2042 static void dma2(void *, bus_dma_segment_t *, int, int);
2043 
2044 static void
2045 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2046 {
2047 	mush_t *mp;
2048 	ispsoftc_t *isp;
2049 	struct ccb_scsiio *csio;
2050 	struct isp_pcisoftc *pcs;
2051 	bus_dmamap_t *dp;
2052 	bus_dma_segment_t *eseg;
2053 	ispreq64_t *rq;
2054 	int seglim, datalen;
2055 	uint16_t nxti;
2056 
2057 	mp = (mush_t *) arg;
2058 	if (error) {
2059 		mp->error = error;
2060 		return;
2061 	}
2062 
2063 	if (nseg < 1) {
2064 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2065 		mp->error = EFAULT;
2066 		return;
2067 	}
2068 	csio = mp->cmd_token;
2069 	isp = mp->isp;
2070 	rq = mp->rq;
2071 	pcs = (struct isp_pcisoftc *)mp->isp;
2072 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2073 	nxti = *mp->nxtip;
2074 
2075 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2076 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2077 	} else {
2078 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2079 	}
2080 	datalen = XS_XFRLEN(csio);
2081 
2082 	/*
2083 	 * We're passed an initial partially filled in entry that
2084 	 * has most fields filled in except for data transfer
2085 	 * related values.
2086 	 *
2087 	 * Our job is to fill in the initial request queue entry and
2088 	 * then to start allocating and filling in continuation entries
2089 	 * until we've covered the entire transfer.
2090 	 */
2091 
2092 	if (IS_FC(isp)) {
2093 		rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
2094 		seglim = ISP_RQDSEG_T3;
2095 		((ispreqt3_t *)rq)->req_totalcnt = datalen;
2096 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2097 			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2098 		} else {
2099 			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2100 		}
2101 	} else {
2102 		rq->req_header.rqs_entry_type = RQSTYPE_A64;
2103 		if (csio->cdb_len > 12) {
2104 			seglim = 0;
2105 		} else {
2106 			seglim = ISP_RQDSEG_A64;
2107 		}
2108 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2109 			rq->req_flags |= REQFLAG_DATA_IN;
2110 		} else {
2111 			rq->req_flags |= REQFLAG_DATA_OUT;
2112 		}
2113 	}
2114 
2115 	eseg = dm_segs + nseg;
2116 
2117 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2118 		if (IS_FC(isp)) {
2119 			ispreqt3_t *rq3 = (ispreqt3_t *)rq;
2120 			rq3->req_dataseg[rq3->req_seg_count].ds_base =
2121 			    DMA_LO32(dm_segs->ds_addr);
2122 			rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
2123 			    DMA_HI32(dm_segs->ds_addr);
2124 			rq3->req_dataseg[rq3->req_seg_count].ds_count =
2125 			    dm_segs->ds_len;
2126 		} else {
2127 			rq->req_dataseg[rq->req_seg_count].ds_base =
2128 			    DMA_LO32(dm_segs->ds_addr);
2129 			rq->req_dataseg[rq->req_seg_count].ds_basehi =
2130 			    DMA_HI32(dm_segs->ds_addr);
2131 			rq->req_dataseg[rq->req_seg_count].ds_count =
2132 			    dm_segs->ds_len;
2133 		}
2134 		datalen -= dm_segs->ds_len;
2135 		rq->req_seg_count++;
2136 		dm_segs++;
2137 	}
2138 
2139 	while (datalen > 0 && dm_segs != eseg) {
2140 		uint16_t onxti;
2141 		ispcontreq64_t local, *crq = &local, *cqe;
2142 
2143 		cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2144 		onxti = nxti;
2145 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2146 		if (nxti == mp->optr) {
2147 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2148 			mp->error = MUSHERR_NOQENTRIES;
2149 			return;
2150 		}
2151 		rq->req_header.rqs_entry_count++;
2152 		MEMZERO((void *)crq, sizeof (*crq));
2153 		crq->req_header.rqs_entry_count = 1;
2154 		crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2155 
2156 		seglim = 0;
2157 		while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2158 			crq->req_dataseg[seglim].ds_base =
2159 			    DMA_LO32(dm_segs->ds_addr);
2160 			crq->req_dataseg[seglim].ds_basehi =
2161 			    DMA_HI32(dm_segs->ds_addr);
2162 			crq->req_dataseg[seglim].ds_count =
2163 			    dm_segs->ds_len;
2164 			rq->req_seg_count++;
2165 			dm_segs++;
2166 			seglim++;
2167 			datalen -= dm_segs->ds_len;
2168 		}
2169 		isp_put_cont64_req(isp, crq, cqe);
2170 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2171 	}
2172 	*mp->nxtip = nxti;
2173 }
2174 
2175 static void
2176 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2177 {
2178 	mush_t *mp;
2179 	ispsoftc_t *isp;
2180 	struct ccb_scsiio *csio;
2181 	struct isp_pcisoftc *pcs;
2182 	bus_dmamap_t *dp;
2183 	bus_dma_segment_t *eseg;
2184 	ispreq_t *rq;
2185 	int seglim, datalen;
2186 	uint16_t nxti;
2187 
2188 	mp = (mush_t *) arg;
2189 	if (error) {
2190 		mp->error = error;
2191 		return;
2192 	}
2193 
2194 	if (nseg < 1) {
2195 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2196 		mp->error = EFAULT;
2197 		return;
2198 	}
2199 	csio = mp->cmd_token;
2200 	isp = mp->isp;
2201 	rq = mp->rq;
2202 	pcs = (struct isp_pcisoftc *)mp->isp;
2203 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2204 	nxti = *mp->nxtip;
2205 
2206 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2207 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2208 	} else {
2209 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2210 	}
2211 
2212 	datalen = XS_XFRLEN(csio);
2213 
2214 	/*
2215 	 * We're passed an initial partially filled in entry that
2216 	 * has most fields filled in except for data transfer
2217 	 * related values.
2218 	 *
2219 	 * Our job is to fill in the initial request queue entry and
2220 	 * then to start allocating and filling in continuation entries
2221 	 * until we've covered the entire transfer.
2222 	 */
2223 
2224 	if (IS_FC(isp)) {
2225 		seglim = ISP_RQDSEG_T2;
2226 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
2227 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2228 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2229 		} else {
2230 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2231 		}
2232 	} else {
2233 		if (csio->cdb_len > 12) {
2234 			seglim = 0;
2235 		} else {
2236 			seglim = ISP_RQDSEG;
2237 		}
2238 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2239 			rq->req_flags |= REQFLAG_DATA_IN;
2240 		} else {
2241 			rq->req_flags |= REQFLAG_DATA_OUT;
2242 		}
2243 	}
2244 
2245 	eseg = dm_segs + nseg;
2246 
2247 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2248 		if (IS_FC(isp)) {
2249 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
2250 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
2251 			    DMA_LO32(dm_segs->ds_addr);
2252 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
2253 			    dm_segs->ds_len;
2254 		} else {
2255 			rq->req_dataseg[rq->req_seg_count].ds_base =
2256 				DMA_LO32(dm_segs->ds_addr);
2257 			rq->req_dataseg[rq->req_seg_count].ds_count =
2258 				dm_segs->ds_len;
2259 		}
2260 		datalen -= dm_segs->ds_len;
2261 		rq->req_seg_count++;
2262 		dm_segs++;
2263 	}
2264 
2265 	while (datalen > 0 && dm_segs != eseg) {
2266 		uint16_t onxti;
2267 		ispcontreq_t local, *crq = &local, *cqe;
2268 
2269 		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2270 		onxti = nxti;
2271 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2272 		if (nxti == mp->optr) {
2273 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2274 			mp->error = MUSHERR_NOQENTRIES;
2275 			return;
2276 		}
2277 		rq->req_header.rqs_entry_count++;
2278 		MEMZERO((void *)crq, sizeof (*crq));
2279 		crq->req_header.rqs_entry_count = 1;
2280 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2281 
2282 		seglim = 0;
2283 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
2284 			crq->req_dataseg[seglim].ds_base =
2285 			    DMA_LO32(dm_segs->ds_addr);
2286 			crq->req_dataseg[seglim].ds_count =
2287 			    dm_segs->ds_len;
2288 			rq->req_seg_count++;
2289 			dm_segs++;
2290 			seglim++;
2291 			datalen -= dm_segs->ds_len;
2292 		}
2293 		isp_put_cont_req(isp, crq, cqe);
2294 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2295 	}
2296 	*mp->nxtip = nxti;
2297 }
2298 
2299 /*
2300  * We enter with ISP_LOCK held
2301  */
2302 static int
2303 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq,
2304 	uint16_t *nxtip, uint16_t optr)
2305 {
2306 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2307 	ispreq_t *qep;
2308 	bus_dmamap_t *dp = NULL;
2309 	mush_t mush, *mp;
2310 	void (*eptr)(void *, bus_dma_segment_t *, int, int);
2311 
2312 	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
2313 #ifdef	ISP_TARGET_MODE
2314 	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
2315 		if (IS_FC(isp)) {
2316 			eptr = tdma_mkfc;
2317 		} else {
2318 			eptr = tdma_mk;
2319 		}
2320 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2321 		    (csio->dxfer_len == 0)) {
2322 			mp = &mush;
2323 			mp->isp = isp;
2324 			mp->cmd_token = csio;
2325 			mp->rq = rq;	/* really a ct_entry_t or ct2_entry_t */
2326 			mp->nxtip = nxtip;
2327 			mp->optr = optr;
2328 			mp->error = 0;
2329 			ISPLOCK_2_CAMLOCK(isp);
2330 			(*eptr)(mp, NULL, 0, 0);
2331 			CAMLOCK_2_ISPLOCK(isp);
2332 			goto mbxsync;
2333 		}
2334 	} else
2335 #endif
2336 	if (sizeof (bus_addr_t) > 4) {
2337 		eptr = dma2_a64;
2338 	} else {
2339 		eptr = dma2;
2340 	}
2341 
2342 
2343 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2344 	    (csio->dxfer_len == 0)) {
2345 		rq->req_seg_count = 1;
2346 		goto mbxsync;
2347 	}
2348 
2349 	/*
2350 	 * Do a virtual grapevine step to collect info for
2351 	 * the callback dma allocation that we have to use...
2352 	 */
2353 	mp = &mush;
2354 	mp->isp = isp;
2355 	mp->cmd_token = csio;
2356 	mp->rq = rq;
2357 	mp->nxtip = nxtip;
2358 	mp->optr = optr;
2359 	mp->error = 0;
2360 
2361 	ISPLOCK_2_CAMLOCK(isp);
2362 	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2363 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2364 			int error, s;
2365 			dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2366 			s = splsoftvm();
2367 			error = bus_dmamap_load(pcs->dmat, *dp,
2368 			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
2369 			if (error == EINPROGRESS) {
2370 				bus_dmamap_unload(pcs->dmat, *dp);
2371 				mp->error = EINVAL;
2372 				isp_prt(isp, ISP_LOGERR,
2373 				    "deferred dma allocation not supported");
2374 			} else if (error && mp->error == 0) {
2375 #ifdef	DIAGNOSTIC
2376 				isp_prt(isp, ISP_LOGERR,
2377 				    "error %d in dma mapping code", error);
2378 #endif
2379 				mp->error = error;
2380 			}
2381 			splx(s);
2382 		} else {
2383 			/* Pointer to physical buffer */
2384 			struct bus_dma_segment seg;
2385 			seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
2386 			seg.ds_len = csio->dxfer_len;
2387 			(*eptr)(mp, &seg, 1, 0);
2388 		}
2389 	} else {
2390 		struct bus_dma_segment *segs;
2391 
2392 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2393 			isp_prt(isp, ISP_LOGERR,
2394 			    "Physical segment pointers unsupported");
2395 			mp->error = EINVAL;
2396 		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2397 			isp_prt(isp, ISP_LOGERR,
2398 			    "Virtual segment addresses unsupported");
2399 			mp->error = EINVAL;
2400 		} else {
2401 			/* Just use the segments provided */
2402 			segs = (struct bus_dma_segment *) csio->data_ptr;
2403 			(*eptr)(mp, segs, csio->sglist_cnt, 0);
2404 		}
2405 	}
2406 	CAMLOCK_2_ISPLOCK(isp);
2407 	if (mp->error) {
2408 		int retval = CMD_COMPLETE;
2409 		if (mp->error == MUSHERR_NOQENTRIES) {
2410 			retval = CMD_EAGAIN;
2411 		} else if (mp->error == EFBIG) {
2412 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
2413 		} else if (mp->error == EINVAL) {
2414 			XS_SETERR(csio, CAM_REQ_INVALID);
2415 		} else {
2416 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2417 		}
2418 		return (retval);
2419 	}
2420 mbxsync:
2421 	switch (rq->req_header.rqs_entry_type) {
2422 	case RQSTYPE_REQUEST:
2423 		isp_put_request(isp, rq, qep);
2424 		break;
2425 	case RQSTYPE_CMDONLY:
2426 		isp_put_extended_request(isp, (ispextreq_t *)rq,
2427 		    (ispextreq_t *)qep);
2428 		break;
2429 	case RQSTYPE_T2RQS:
2430 		isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2431 		break;
2432 	case RQSTYPE_A64:
2433 	case RQSTYPE_T3RQS:
2434 		isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2435 		break;
2436 	}
2437 	return (CMD_QUEUED);
2438 }
2439 
2440 static void
2441 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint16_t handle)
2442 {
2443 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2444 	bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2445 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2446 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2447 	} else {
2448 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2449 	}
2450 	bus_dmamap_unload(pcs->dmat, *dp);
2451 }
2452 
2453 
2454 static void
2455 isp_pci_reset1(ispsoftc_t *isp)
2456 {
2457 	/* Make sure the BIOS is disabled */
2458 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2459 	/* and enable interrupts */
2460 	ENABLE_INTS(isp);
2461 }
2462 
2463 static void
2464 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
2465 {
2466 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2467 	if (msg)
2468 		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2469 	else
2470 		printf("%s:\n", device_get_nameunit(isp->isp_dev));
2471 	if (IS_SCSI(isp))
2472 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2473 	else
2474 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2475 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2476 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2477 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2478 
2479 
2480 	if (IS_SCSI(isp)) {
2481 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2482 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2483 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2484 			ISP_READ(isp, CDMA_FIFO_STS));
2485 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2486 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2487 			ISP_READ(isp, DDMA_FIFO_STS));
2488 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2489 			ISP_READ(isp, SXP_INTERRUPT),
2490 			ISP_READ(isp, SXP_GROSS_ERR),
2491 			ISP_READ(isp, SXP_PINS_CTRL));
2492 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2493 	}
2494 	printf("    mbox regs: %x %x %x %x %x\n",
2495 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2496 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2497 	    ISP_READ(isp, OUTMAILBOX4));
2498 	printf("    PCI Status Command/Status=%x\n",
2499 	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2500 }
2501