xref: /freebsd/sys/dev/isp/isp_pci.c (revision e4e9813eb92cd7c4d4b819a8fbed5cbd3d92f5d8)
1 /*-
2  *
3  * Copyright (c) 1997-2006 by Matthew Jacob
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice immediately at the beginning of the file, without modification,
11  *    this list of conditions, and the following disclaimer.
12  * 2. The name of the author may not be used to endorse or promote products
13  *    derived from this software without specific prior written permission.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
19  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 /*
29  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
30  * FreeBSD Version.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #if __FreeBSD_version >= 700000
40 #include <sys/linker.h>
41 #include <sys/firmware.h>
42 #endif
43 #include <sys/bus.h>
44 #if __FreeBSD_version < 500000
45 #include <pci/pcireg.h>
46 #include <pci/pcivar.h>
47 #include <machine/bus_memio.h>
48 #include <machine/bus_pio.h>
49 #else
50 #include <sys/stdint.h>
51 #include <dev/pci/pcireg.h>
52 #include <dev/pci/pcivar.h>
53 #endif
54 #include <machine/bus.h>
55 #include <machine/resource.h>
56 #include <sys/rman.h>
57 #include <sys/malloc.h>
58 
59 #include <dev/isp/isp_freebsd.h>
60 
61 #if __FreeBSD_version < 500000
62 #define	BUS_PROBE_DEFAULT	0
63 #endif
64 
65 static uint16_t isp_pci_rd_reg(ispsoftc_t *, int);
66 static void isp_pci_wr_reg(ispsoftc_t *, int, uint16_t);
67 static uint16_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
68 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint16_t);
69 static int
70 isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
71 static int
72 isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *);
73 static int isp_pci_mbxdma(ispsoftc_t *);
74 static int
75 isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint16_t *, uint16_t);
76 static void
77 isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint16_t);
78 
79 
80 static void isp_pci_reset1(ispsoftc_t *);
81 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
82 
83 static struct ispmdvec mdvec = {
84 	isp_pci_rd_isr,
85 	isp_pci_rd_reg,
86 	isp_pci_wr_reg,
87 	isp_pci_mbxdma,
88 	isp_pci_dmasetup,
89 	isp_pci_dmateardown,
90 	NULL,
91 	isp_pci_reset1,
92 	isp_pci_dumpregs,
93 	NULL,
94 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
95 };
96 
97 static struct ispmdvec mdvec_1080 = {
98 	isp_pci_rd_isr,
99 	isp_pci_rd_reg_1080,
100 	isp_pci_wr_reg_1080,
101 	isp_pci_mbxdma,
102 	isp_pci_dmasetup,
103 	isp_pci_dmateardown,
104 	NULL,
105 	isp_pci_reset1,
106 	isp_pci_dumpregs,
107 	NULL,
108 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
109 };
110 
111 static struct ispmdvec mdvec_12160 = {
112 	isp_pci_rd_isr,
113 	isp_pci_rd_reg_1080,
114 	isp_pci_wr_reg_1080,
115 	isp_pci_mbxdma,
116 	isp_pci_dmasetup,
117 	isp_pci_dmateardown,
118 	NULL,
119 	isp_pci_reset1,
120 	isp_pci_dumpregs,
121 	NULL,
122 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
123 };
124 
125 static struct ispmdvec mdvec_2100 = {
126 	isp_pci_rd_isr,
127 	isp_pci_rd_reg,
128 	isp_pci_wr_reg,
129 	isp_pci_mbxdma,
130 	isp_pci_dmasetup,
131 	isp_pci_dmateardown,
132 	NULL,
133 	isp_pci_reset1,
134 	isp_pci_dumpregs
135 };
136 
137 static struct ispmdvec mdvec_2200 = {
138 	isp_pci_rd_isr,
139 	isp_pci_rd_reg,
140 	isp_pci_wr_reg,
141 	isp_pci_mbxdma,
142 	isp_pci_dmasetup,
143 	isp_pci_dmateardown,
144 	NULL,
145 	isp_pci_reset1,
146 	isp_pci_dumpregs
147 };
148 
149 static struct ispmdvec mdvec_2300 = {
150 	isp_pci_rd_isr_2300,
151 	isp_pci_rd_reg,
152 	isp_pci_wr_reg,
153 	isp_pci_mbxdma,
154 	isp_pci_dmasetup,
155 	isp_pci_dmateardown,
156 	NULL,
157 	isp_pci_reset1,
158 	isp_pci_dumpregs
159 };
160 
161 #ifndef	PCIM_CMD_INVEN
162 #define	PCIM_CMD_INVEN			0x10
163 #endif
164 #ifndef	PCIM_CMD_BUSMASTEREN
165 #define	PCIM_CMD_BUSMASTEREN		0x0004
166 #endif
167 #ifndef	PCIM_CMD_PERRESPEN
168 #define	PCIM_CMD_PERRESPEN		0x0040
169 #endif
170 #ifndef	PCIM_CMD_SEREN
171 #define	PCIM_CMD_SEREN			0x0100
172 #endif
173 #ifndef	PCIM_CMD_INTX_DISABLE
174 #define	PCIM_CMD_INTX_DISABLE		0x0400
175 #endif
176 
177 #ifndef	PCIR_COMMAND
178 #define	PCIR_COMMAND			0x04
179 #endif
180 
181 #ifndef	PCIR_CACHELNSZ
182 #define	PCIR_CACHELNSZ			0x0c
183 #endif
184 
185 #ifndef	PCIR_LATTIMER
186 #define	PCIR_LATTIMER			0x0d
187 #endif
188 
189 #ifndef	PCIR_ROMADDR
190 #define	PCIR_ROMADDR			0x30
191 #endif
192 
193 #ifndef	PCI_VENDOR_QLOGIC
194 #define	PCI_VENDOR_QLOGIC		0x1077
195 #endif
196 
197 #ifndef	PCI_PRODUCT_QLOGIC_ISP1020
198 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
199 #endif
200 
201 #ifndef	PCI_PRODUCT_QLOGIC_ISP1080
202 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
203 #endif
204 
205 #ifndef	PCI_PRODUCT_QLOGIC_ISP10160
206 #define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
207 #endif
208 
209 #ifndef	PCI_PRODUCT_QLOGIC_ISP12160
210 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
211 #endif
212 
213 #ifndef	PCI_PRODUCT_QLOGIC_ISP1240
214 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
215 #endif
216 
217 #ifndef	PCI_PRODUCT_QLOGIC_ISP1280
218 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
219 #endif
220 
221 #ifndef	PCI_PRODUCT_QLOGIC_ISP2100
222 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
223 #endif
224 
225 #ifndef	PCI_PRODUCT_QLOGIC_ISP2200
226 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
227 #endif
228 
229 #ifndef	PCI_PRODUCT_QLOGIC_ISP2300
230 #define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
231 #endif
232 
233 #ifndef	PCI_PRODUCT_QLOGIC_ISP2312
234 #define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
235 #endif
236 
237 #ifndef	PCI_PRODUCT_QLOGIC_ISP2322
238 #define	PCI_PRODUCT_QLOGIC_ISP2322	0x2322
239 #endif
240 
241 #ifndef	PCI_PRODUCT_QLOGIC_ISP2422
242 #define	PCI_PRODUCT_QLOGIC_ISP2422	0x2422
243 #endif
244 
245 #ifndef	PCI_PRODUCT_QLOGIC_ISP6312
246 #define	PCI_PRODUCT_QLOGIC_ISP6312	0x6312
247 #endif
248 
249 #ifndef	PCI_PRODUCT_QLOGIC_ISP6322
250 #define	PCI_PRODUCT_QLOGIC_ISP6322	0x6322
251 #endif
252 
253 
254 #define	PCI_QLOGIC_ISP1020	\
255 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
256 
257 #define	PCI_QLOGIC_ISP1080	\
258 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
259 
260 #define	PCI_QLOGIC_ISP10160	\
261 	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
262 
263 #define	PCI_QLOGIC_ISP12160	\
264 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
265 
266 #define	PCI_QLOGIC_ISP1240	\
267 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
268 
269 #define	PCI_QLOGIC_ISP1280	\
270 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
271 
272 #define	PCI_QLOGIC_ISP2100	\
273 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
274 
275 #define	PCI_QLOGIC_ISP2200	\
276 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
277 
278 #define	PCI_QLOGIC_ISP2300	\
279 	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
280 
281 #define	PCI_QLOGIC_ISP2312	\
282 	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
283 
284 #define	PCI_QLOGIC_ISP2322	\
285 	((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
286 
287 #define	PCI_QLOGIC_ISP2422	\
288 	((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
289 
290 #define	PCI_QLOGIC_ISP6312	\
291 	((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
292 
293 #define	PCI_QLOGIC_ISP6322	\
294 	((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
295 
296 /*
297  * Odd case for some AMI raid cards... We need to *not* attach to this.
298  */
299 #define	AMI_RAID_SUBVENDOR_ID	0x101e
300 
301 #define	IO_MAP_REG	0x10
302 #define	MEM_MAP_REG	0x14
303 
304 #define	PCI_DFLT_LTNCY	0x40
305 #define	PCI_DFLT_LNSZ	0x10
306 
307 static int isp_pci_probe (device_t);
308 static int isp_pci_attach (device_t);
309 
310 
311 struct isp_pcisoftc {
312 	ispsoftc_t			pci_isp;
313 	device_t			pci_dev;
314 	struct resource *		pci_reg;
315 	bus_space_tag_t			pci_st;
316 	bus_space_handle_t		pci_sh;
317 	void *				ih;
318 	int16_t				pci_poff[_NREG_BLKS];
319 	bus_dma_tag_t			dmat;
320 	bus_dmamap_t			*dmaps;
321 };
322 
323 static device_method_t isp_pci_methods[] = {
324 	/* Device interface */
325 	DEVMETHOD(device_probe,		isp_pci_probe),
326 	DEVMETHOD(device_attach,	isp_pci_attach),
327 	{ 0, 0 }
328 };
329 static void isp_pci_intr(void *);
330 
331 static driver_t isp_pci_driver = {
332 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
333 };
334 static devclass_t isp_devclass;
335 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
336 #if __FreeBSD_version >= 700000
337 MODULE_DEPEND(isp, ispfw, 1, 1, 1);
338 MODULE_DEPEND(isp, firmware, 1, 1, 1);
339 #else
340 typedef void ispfwfunc(int, int, int, uint16_t **);
341 extern ispfwfunc *isp_get_firmware_p;
342 #endif
343 
344 static int
345 isp_pci_probe(device_t dev)
346 {
347         switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
348 	case PCI_QLOGIC_ISP1020:
349 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
350 		break;
351 	case PCI_QLOGIC_ISP1080:
352 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
353 		break;
354 	case PCI_QLOGIC_ISP1240:
355 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
356 		break;
357 	case PCI_QLOGIC_ISP1280:
358 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
359 		break;
360 	case PCI_QLOGIC_ISP10160:
361 		device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
362 		break;
363 	case PCI_QLOGIC_ISP12160:
364 		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
365 			return (ENXIO);
366 		}
367 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
368 		break;
369 	case PCI_QLOGIC_ISP2100:
370 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
371 		break;
372 	case PCI_QLOGIC_ISP2200:
373 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
374 		break;
375 	case PCI_QLOGIC_ISP2300:
376 		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
377 		break;
378 	case PCI_QLOGIC_ISP2312:
379 		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
380 		break;
381 	case PCI_QLOGIC_ISP2322:
382 		device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
383 		break;
384 #if	0
385 	case PCI_QLOGIC_ISP2422:
386 		device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
387 		break;
388 #endif
389 	case PCI_QLOGIC_ISP6312:
390 		device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
391 		break;
392 	case PCI_QLOGIC_ISP6322:
393 		device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
394 		break;
395 	default:
396 		return (ENXIO);
397 	}
398 	if (isp_announced == 0 && bootverbose) {
399 		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
400 		    "Core Version %d.%d\n",
401 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
402 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
403 		isp_announced++;
404 	}
405 	/*
406 	 * XXXX: Here is where we might load the f/w module
407 	 * XXXX: (or increase a reference count to it).
408 	 */
409 	return (BUS_PROBE_DEFAULT);
410 }
411 
412 #if __FreeBSD_version < 500000
413 static void
414 isp_get_options(device_t dev, ispsoftc_t *isp)
415 {
416 	uint64_t wwn;
417 	int bitmap, unit;
418 
419 	unit = device_get_unit(dev);
420 	if (getenv_int("isp_disable", &bitmap)) {
421 		if (bitmap & (1 << unit)) {
422 			isp->isp_osinfo.disabled = 1;
423 			return;
424 		}
425 	}
426 
427 	if (getenv_int("isp_no_fwload", &bitmap)) {
428 		if (bitmap & (1 << unit))
429 			isp->isp_confopts |= ISP_CFG_NORELOAD;
430 	}
431 	if (getenv_int("isp_fwload", &bitmap)) {
432 		if (bitmap & (1 << unit))
433 			isp->isp_confopts &= ~ISP_CFG_NORELOAD;
434 	}
435 	if (getenv_int("isp_no_nvram", &bitmap)) {
436 		if (bitmap & (1 << unit))
437 			isp->isp_confopts |= ISP_CFG_NONVRAM;
438 	}
439 	if (getenv_int("isp_nvram", &bitmap)) {
440 		if (bitmap & (1 << unit))
441 			isp->isp_confopts &= ~ISP_CFG_NONVRAM;
442 	}
443 	if (getenv_int("isp_fcduplex", &bitmap)) {
444 		if (bitmap & (1 << unit))
445 			isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
446 	}
447 	if (getenv_int("isp_no_fcduplex", &bitmap)) {
448 		if (bitmap & (1 << unit))
449 			isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX;
450 	}
451 	if (getenv_int("isp_nport", &bitmap)) {
452 		if (bitmap & (1 << unit))
453 			isp->isp_confopts |= ISP_CFG_NPORT;
454 	}
455 
456 	/*
457 	 * Because the resource_*_value functions can neither return
458 	 * 64 bit integer values, nor can they be directly coerced
459 	 * to interpret the right hand side of the assignment as
460 	 * you want them to interpret it, we have to force WWN
461 	 * hint replacement to specify WWN strings with a leading
462 	 * 'w' (e..g w50000000aaaa0001). Sigh.
463 	 */
464 	if (getenv_quad("isp_portwwn", &wwn)) {
465 		isp->isp_osinfo.default_port_wwn = wwn;
466 		isp->isp_confopts |= ISP_CFG_OWNWWPN;
467 	}
468 	if (isp->isp_osinfo.default_port_wwn == 0) {
469 		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
470 	}
471 
472 	if (getenv_quad("isp_nodewwn", &wwn)) {
473 		isp->isp_osinfo.default_node_wwn = wwn;
474 		isp->isp_confopts |= ISP_CFG_OWNWWNN;
475 	}
476 	if (isp->isp_osinfo.default_node_wwn == 0) {
477 		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
478 	}
479 
480 	bitmap = 0;
481 	(void) getenv_int("isp_debug", &bitmap);
482 	if (bitmap) {
483 		isp->isp_dblev = bitmap;
484 	} else {
485 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
486 	}
487 	if (bootverbose) {
488 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
489 	}
490 
491 #ifdef	ISP_FW_CRASH_DUMP
492 	bitmap = 0;
493 	if (getenv_int("isp_fw_dump_enable", &bitmap)) {
494 		if (bitmap & (1 << unit) {
495 			size_t amt = 0;
496 			if (IS_2200(isp)) {
497 				amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
498 			} else if (IS_23XX(isp)) {
499 				amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
500 			}
501 			if (amt) {
502 				FCPARAM(isp)->isp_dump_data =
503 				    malloc(amt, M_DEVBUF, M_WAITOK);
504 				memset(FCPARAM(isp)->isp_dump_data, 0, amt);
505 			} else {
506 				device_printf(dev,
507 				    "f/w crash dumps not supported for card\n");
508 			}
509 		}
510 	}
511 #endif
512 }
513 
514 static void
515 isp_get_pci_options(device_t dev, int *m1, int *m2)
516 {
517 	int bitmap;
518 	int unit = device_get_unit(dev);
519 
520 	*m1 = PCIM_CMD_MEMEN;
521 	*m2 = PCIM_CMD_PORTEN;
522 	if (getenv_int("isp_mem_map", &bitmap)) {
523 		if (bitmap & (1 << unit)) {
524 			*m1 = PCIM_CMD_MEMEN;
525 			*m2 = PCIM_CMD_PORTEN;
526 		}
527 	}
528 	bitmap = 0;
529 	if (getenv_int("isp_io_map", &bitmap)) {
530 		if (bitmap & (1 << unit)) {
531 			*m1 = PCIM_CMD_PORTEN;
532 			*m2 = PCIM_CMD_MEMEN;
533 		}
534 	}
535 }
536 #else
537 static void
538 isp_get_options(device_t dev, ispsoftc_t *isp)
539 {
540 	int tval;
541 	const char *sptr;
542 	/*
543 	 * Figure out if we're supposed to skip this one.
544 	 */
545 
546 	tval = 0;
547 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
548 	    "disable", &tval) == 0 && tval) {
549 		device_printf(dev, "disabled at user request\n");
550 		isp->isp_osinfo.disabled = 1;
551 		return;
552 	}
553 
554 	tval = -1;
555 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
556 	    "role", &tval) == 0 && tval != -1) {
557 		tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET);
558 		isp->isp_role = tval;
559 		device_printf(dev, "setting role to 0x%x\n", isp->isp_role);
560 	} else {
561 #ifdef	ISP_TARGET_MODE
562 		isp->isp_role = ISP_ROLE_TARGET;
563 #else
564 		isp->isp_role = ISP_DEFAULT_ROLES;
565 #endif
566 	}
567 
568 	tval = 0;
569         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
570             "fwload_disable", &tval) == 0 && tval != 0) {
571 		isp->isp_confopts |= ISP_CFG_NORELOAD;
572 	}
573 	tval = 0;
574         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
575             "ignore_nvram", &tval) == 0 && tval != 0) {
576 		isp->isp_confopts |= ISP_CFG_NONVRAM;
577 	}
578 	tval = 0;
579         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
580             "fullduplex", &tval) == 0 && tval != 0) {
581 		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
582 	}
583 #ifdef	ISP_FW_CRASH_DUMP
584 	tval = 0;
585         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
586             "fw_dump_enable", &tval) == 0 && tval != 0) {
587 		size_t amt = 0;
588 		if (IS_2200(isp)) {
589 			amt = QLA2200_RISC_IMAGE_DUMP_SIZE;
590 		} else if (IS_23XX(isp)) {
591 			amt = QLA2300_RISC_IMAGE_DUMP_SIZE;
592 		}
593 		if (amt) {
594 			FCPARAM(isp)->isp_dump_data =
595 			    malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO);
596 		} else {
597 			device_printf(dev,
598 			    "f/w crash dumps not supported for this model\n");
599 		}
600 	}
601 #endif
602 
603 	sptr = 0;
604         if (resource_string_value(device_get_name(dev), device_get_unit(dev),
605             "topology", (const char **) &sptr) == 0 && sptr != 0) {
606 		if (strcmp(sptr, "lport") == 0) {
607 			isp->isp_confopts |= ISP_CFG_LPORT;
608 		} else if (strcmp(sptr, "nport") == 0) {
609 			isp->isp_confopts |= ISP_CFG_NPORT;
610 		} else if (strcmp(sptr, "lport-only") == 0) {
611 			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
612 		} else if (strcmp(sptr, "nport-only") == 0) {
613 			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
614 		}
615 	}
616 
617 	/*
618 	 * Because the resource_*_value functions can neither return
619 	 * 64 bit integer values, nor can they be directly coerced
620 	 * to interpret the right hand side of the assignment as
621 	 * you want them to interpret it, we have to force WWN
622 	 * hint replacement to specify WWN strings with a leading
623 	 * 'w' (e..g w50000000aaaa0001). Sigh.
624 	 */
625 	sptr = 0;
626 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
627             "portwwn", (const char **) &sptr);
628 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
629 		char *eptr = 0;
630 		isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16);
631 		if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) {
632 			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
633 			isp->isp_osinfo.default_port_wwn = 0;
634 		} else {
635 			isp->isp_confopts |= ISP_CFG_OWNWWPN;
636 		}
637 	}
638 	if (isp->isp_osinfo.default_port_wwn == 0) {
639 		isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull;
640 	}
641 
642 	sptr = 0;
643 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
644             "nodewwn", (const char **) &sptr);
645 	if (tval == 0 && sptr != 0 && *sptr++ == 'w') {
646 		char *eptr = 0;
647 		isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16);
648 		if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) {
649 			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
650 			isp->isp_osinfo.default_node_wwn = 0;
651 		} else {
652 			isp->isp_confopts |= ISP_CFG_OWNWWNN;
653 		}
654 	}
655 	if (isp->isp_osinfo.default_node_wwn == 0) {
656 		isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull;
657 	}
658 
659 	isp->isp_osinfo.default_id = -1;
660 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
661             "iid", &tval) == 0) {
662 		isp->isp_osinfo.default_id = tval;
663 		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
664 	}
665 	if (isp->isp_osinfo.default_id == -1) {
666 		if (IS_FC(isp)) {
667 			isp->isp_osinfo.default_id = 109;
668 		} else {
669 			isp->isp_osinfo.default_id = 7;
670 		}
671 	}
672 
673 	/*
674 	 * Set up logging levels.
675 	 */
676 	tval = 0;
677         (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
678             "debug", &tval);
679 	if (tval) {
680 		isp->isp_dblev = tval;
681 	} else {
682 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
683 	}
684 	if (bootverbose) {
685 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
686 	}
687 
688 }
689 
690 static void
691 isp_get_pci_options(device_t dev, int *m1, int *m2)
692 {
693 	int tval;
694 	/*
695 	 * Which we should try first - memory mapping or i/o mapping?
696 	 *
697 	 * We used to try memory first followed by i/o on alpha, otherwise
698 	 * the reverse, but we should just try memory first all the time now.
699 	 */
700 	*m1 = PCIM_CMD_MEMEN;
701 	*m2 = PCIM_CMD_PORTEN;
702 
703 	tval = 0;
704         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
705             "prefer_iomap", &tval) == 0 && tval != 0) {
706 		*m1 = PCIM_CMD_PORTEN;
707 		*m2 = PCIM_CMD_MEMEN;
708 	}
709 	tval = 0;
710         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
711             "prefer_memmap", &tval) == 0 && tval != 0) {
712 		*m1 = PCIM_CMD_MEMEN;
713 		*m2 = PCIM_CMD_PORTEN;
714 	}
715 }
716 #endif
717 
718 static int
719 isp_pci_attach(device_t dev)
720 {
721 	struct resource *regs, *irq;
722 	int rtp, rgd, iqd, m1, m2;
723 	uint32_t data, cmd, linesz, psize, basetype;
724 	struct isp_pcisoftc *pcs;
725 	ispsoftc_t *isp = NULL;
726 	struct ispmdvec *mdvp;
727 #if __FreeBSD_version >= 500000
728 	int locksetup = 0;
729 #endif
730 
731 	pcs = device_get_softc(dev);
732 	if (pcs == NULL) {
733 		device_printf(dev, "cannot get softc\n");
734 		return (ENOMEM);
735 	}
736 	memset(pcs, 0, sizeof (*pcs));
737 	pcs->pci_dev = dev;
738 	isp = &pcs->pci_isp;
739 
740 	/*
741 	 * Get Generic Options
742 	 */
743 	isp_get_options(dev, isp);
744 
745 	/*
746 	 * Check to see if options have us disabled
747 	 */
748 	if (isp->isp_osinfo.disabled) {
749 		/*
750 		 * But return zero to preserve unit numbering
751 		 */
752 		return (0);
753 	}
754 
755 	/*
756 	 * Get PCI options- which in this case are just mapping preferences.
757 	 */
758 	isp_get_pci_options(dev, &m1, &m2);
759 
760 
761 	linesz = PCI_DFLT_LNSZ;
762 	irq = regs = NULL;
763 	rgd = rtp = iqd = 0;
764 
765 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
766 	if (cmd & m1) {
767 		rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
768 		rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
769 		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
770 	}
771 	if (regs == NULL && (cmd & m2)) {
772 		rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
773 		rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG;
774 		regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE);
775 	}
776 	if (regs == NULL) {
777 		device_printf(dev, "unable to map any ports\n");
778 		goto bad;
779 	}
780 	if (bootverbose) {
781 		device_printf(dev, "using %s space register mapping\n",
782 		    (rgd == IO_MAP_REG)? "I/O" : "Memory");
783 	}
784 	pcs->pci_dev = dev;
785 	pcs->pci_reg = regs;
786 	pcs->pci_st = rman_get_bustag(regs);
787 	pcs->pci_sh = rman_get_bushandle(regs);
788 
789 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
790 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
791 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
792 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
793 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
794 	mdvp = &mdvec;
795 	basetype = ISP_HA_SCSI_UNKNOWN;
796 	psize = sizeof (sdparam);
797 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) {
798 		mdvp = &mdvec;
799 		basetype = ISP_HA_SCSI_UNKNOWN;
800 		psize = sizeof (sdparam);
801 	}
802 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) {
803 		mdvp = &mdvec_1080;
804 		basetype = ISP_HA_SCSI_1080;
805 		psize = sizeof (sdparam);
806 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
807 		    ISP1080_DMA_REGS_OFF;
808 	}
809 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) {
810 		mdvp = &mdvec_1080;
811 		basetype = ISP_HA_SCSI_1240;
812 		psize = 2 * sizeof (sdparam);
813 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
814 		    ISP1080_DMA_REGS_OFF;
815 	}
816 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) {
817 		mdvp = &mdvec_1080;
818 		basetype = ISP_HA_SCSI_1280;
819 		psize = 2 * sizeof (sdparam);
820 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
821 		    ISP1080_DMA_REGS_OFF;
822 	}
823 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) {
824 		mdvp = &mdvec_12160;
825 		basetype = ISP_HA_SCSI_10160;
826 		psize = sizeof (sdparam);
827 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
828 		    ISP1080_DMA_REGS_OFF;
829 	}
830 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) {
831 		mdvp = &mdvec_12160;
832 		basetype = ISP_HA_SCSI_12160;
833 		psize = 2 * sizeof (sdparam);
834 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] =
835 		    ISP1080_DMA_REGS_OFF;
836 	}
837 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) {
838 		mdvp = &mdvec_2100;
839 		basetype = ISP_HA_FC_2100;
840 		psize = sizeof (fcparam);
841 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
842 		    PCI_MBOX_REGS2100_OFF;
843 		if (pci_get_revid(dev) < 3) {
844 			/*
845 			 * XXX: Need to get the actual revision
846 			 * XXX: number of the 2100 FB. At any rate,
847 			 * XXX: lower cache line size for early revision
848 			 * XXX; boards.
849 			 */
850 			linesz = 1;
851 		}
852 	}
853 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) {
854 		mdvp = &mdvec_2200;
855 		basetype = ISP_HA_FC_2200;
856 		psize = sizeof (fcparam);
857 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
858 		    PCI_MBOX_REGS2100_OFF;
859 	}
860 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) {
861 		mdvp = &mdvec_2300;
862 		basetype = ISP_HA_FC_2300;
863 		psize = sizeof (fcparam);
864 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
865 		    PCI_MBOX_REGS2300_OFF;
866 	}
867 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 ||
868 	    pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
869 		mdvp = &mdvec_2300;
870 		basetype = ISP_HA_FC_2312;
871 		psize = sizeof (fcparam);
872 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
873 		    PCI_MBOX_REGS2300_OFF;
874 	}
875 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 ||
876 	    pci_get_devid(dev) == PCI_QLOGIC_ISP6322) {
877 		mdvp = &mdvec_2300;
878 		basetype = ISP_HA_FC_2322;
879 		psize = sizeof (fcparam);
880 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
881 		    PCI_MBOX_REGS2300_OFF;
882 	}
883 	if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422) {
884 		mdvp = &mdvec_2300;
885 		basetype = ISP_HA_FC_2422;
886 		psize = sizeof (fcparam);
887 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] =
888 		    PCI_MBOX_REGS2300_OFF;
889 	}
890 	isp = &pcs->pci_isp;
891 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
892 	if (isp->isp_param == NULL) {
893 		device_printf(dev, "cannot allocate parameter data\n");
894 		goto bad;
895 	}
896 	isp->isp_mdvec = mdvp;
897 	isp->isp_type = basetype;
898 	isp->isp_revision = pci_get_revid(dev);
899 	isp->isp_dev = dev;
900 
901 #if __FreeBSD_version >= 700000
902 	/*
903 	 * Try and find firmware for this device.
904 	 */
905 	{
906 		char fwname[32];
907 		unsigned int did = pci_get_device(dev);
908 
909 		/*
910 		 * Map a few pci ids to fw names
911 		 */
912 		switch (did) {
913 		case PCI_PRODUCT_QLOGIC_ISP1020:
914 			did = 0x1040;
915 			break;
916 		case PCI_PRODUCT_QLOGIC_ISP1240:
917 			did = 0x1080;
918 			break;
919 		case PCI_PRODUCT_QLOGIC_ISP10160:
920 		case PCI_PRODUCT_QLOGIC_ISP12160:
921 			did = 0x12160;
922 			break;
923 		case PCI_PRODUCT_QLOGIC_ISP6312:
924 		case PCI_PRODUCT_QLOGIC_ISP2312:
925 			did = 0x2300;
926 			break;
927 		case PCI_PRODUCT_QLOGIC_ISP6322:
928 			did = 0x2322;
929 			break;
930 		default:
931 			break;
932 		}
933 
934 		isp->isp_osinfo.fw = NULL;
935 		if (isp->isp_role & ISP_ROLE_TARGET) {
936 			snprintf(fwname, sizeof (fwname), "isp_%04x_it", did);
937 			isp->isp_osinfo.fw = firmware_get(fwname);
938 		}
939 		if (isp->isp_osinfo.fw == NULL) {
940 			snprintf(fwname, sizeof (fwname), "isp_%04x", did);
941 			isp->isp_osinfo.fw = firmware_get(fwname);
942 		}
943 		if (isp->isp_osinfo.fw != NULL) {
944 			union {
945 				const void *fred;
946 				uint16_t *bob;
947 			} u;
948 			u.fred = isp->isp_osinfo.fw->data;
949 			isp->isp_mdvec->dv_ispfw = u.bob;
950 		}
951 	}
952 #else
953 	if (isp_get_firmware_p) {
954 		int device = (int) pci_get_device(dev);
955 #ifdef	ISP_TARGET_MODE
956 		(*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw);
957 #else
958 		(*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw);
959 #endif
960 	}
961 #endif
962 
963 	/*
964 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER
965 	 * are set.
966 	 */
967 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN |
968 		PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
969 
970 	if (IS_2300(isp)) {	/* per QLogic errata */
971 		cmd &= ~PCIM_CMD_INVEN;
972 	}
973 
974 	if (IS_23XX(isp)) {
975 		/*
976 		 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command.
977 		 */
978 		isp->isp_touched = 1;
979 
980 	}
981 
982 	if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
983 		cmd &= ~PCIM_CMD_INTX_DISABLE;
984 	}
985 
986 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
987 
988 	/*
989 	 * Make sure the Cache Line Size register is set sensibly.
990 	 */
991 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
992 	if (data != linesz) {
993 		data = PCI_DFLT_LNSZ;
994 		isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data);
995 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
996 	}
997 
998 	/*
999 	 * Make sure the Latency Timer is sane.
1000 	 */
1001 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
1002 	if (data < PCI_DFLT_LTNCY) {
1003 		data = PCI_DFLT_LTNCY;
1004 		isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data);
1005 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
1006 	}
1007 
1008 	/*
1009 	 * Make sure we've disabled the ROM.
1010 	 */
1011 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
1012 	data &= ~1;
1013 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
1014 
1015 	iqd = 0;
1016 	irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
1017 	    RF_ACTIVE | RF_SHAREABLE);
1018 	if (irq == NULL) {
1019 		device_printf(dev, "could not allocate interrupt\n");
1020 		goto bad;
1021 	}
1022 
1023 #if __FreeBSD_version >= 500000
1024 	/* Make sure the lock is set up. */
1025 	mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF);
1026 	locksetup++;
1027 #endif
1028 
1029 	if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) {
1030 		device_printf(dev, "could not setup interrupt\n");
1031 		goto bad;
1032 	}
1033 
1034 	/*
1035 	 * Last minute checks...
1036 	 */
1037 	if (IS_23XX(isp)) {
1038 		isp->isp_port = pci_get_function(dev);
1039 	}
1040 
1041 	/*
1042 	 * Make sure we're in reset state.
1043 	 */
1044 	ISP_LOCK(isp);
1045 	isp_reset(isp);
1046 	if (isp->isp_state != ISP_RESETSTATE) {
1047 		ISP_UNLOCK(isp);
1048 		goto bad;
1049 	}
1050 	isp_init(isp);
1051 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) {
1052 		isp_uninit(isp);
1053 		ISP_UNLOCK(isp);
1054 		goto bad;
1055 	}
1056 	isp_attach(isp);
1057 	if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) {
1058 		isp_uninit(isp);
1059 		ISP_UNLOCK(isp);
1060 		goto bad;
1061 	}
1062 	/*
1063 	 * XXXX: Here is where we might unload the f/w module
1064 	 * XXXX: (or decrease the reference count to it).
1065 	 */
1066 	ISP_UNLOCK(isp);
1067 	return (0);
1068 
1069 bad:
1070 
1071 	if (pcs && pcs->ih) {
1072 		(void) bus_teardown_intr(dev, irq, pcs->ih);
1073 	}
1074 
1075 #if __FreeBSD_version >= 500000
1076 	if (locksetup && isp) {
1077 		mtx_destroy(&isp->isp_osinfo.lock);
1078 	}
1079 #endif
1080 
1081 	if (irq) {
1082 		(void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq);
1083 	}
1084 
1085 
1086 	if (regs) {
1087 		(void) bus_release_resource(dev, rtp, rgd, regs);
1088 	}
1089 
1090 	if (pcs) {
1091 		if (pcs->pci_isp.isp_param) {
1092 #ifdef	ISP_FW_CRASH_DUMP
1093 			if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) {
1094 				free(FCPARAM(isp)->isp_dump_data, M_DEVBUF);
1095 			}
1096 #endif
1097 			free(pcs->pci_isp.isp_param, M_DEVBUF);
1098 		}
1099 	}
1100 
1101 	/*
1102 	 * XXXX: Here is where we might unload the f/w module
1103 	 * XXXX: (or decrease the reference count to it).
1104 	 */
1105 	return (ENXIO);
1106 }
1107 
1108 static void
1109 isp_pci_intr(void *arg)
1110 {
1111 	ispsoftc_t *isp = arg;
1112 	uint16_t isr, sema, mbox;
1113 
1114 	ISP_LOCK(isp);
1115 	isp->isp_intcnt++;
1116 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) {
1117 		isp->isp_intbogus++;
1118 	} else {
1119 		int iok = isp->isp_osinfo.intsok;
1120 		isp->isp_osinfo.intsok = 0;
1121 		isp_intr(isp, isr, sema, mbox);
1122 		isp->isp_osinfo.intsok = iok;
1123 	}
1124 	ISP_UNLOCK(isp);
1125 }
1126 
1127 
1128 #define	IspVirt2Off(a, x)	\
1129 	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1130 	_BLK_REG_SHFT] + ((x) & 0xff))
1131 
1132 #define	BXR2(pcs, off)		\
1133 	bus_space_read_2(pcs->pci_st, pcs->pci_sh, off)
1134 #define	BXW2(pcs, off, v)	\
1135 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v)
1136 
1137 
1138 static __inline int
1139 isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp)
1140 {
1141 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1142 	uint16_t val0, val1;
1143 	int i = 0;
1144 
1145 	do {
1146 		val0 = BXR2(pcs, IspVirt2Off(isp, off));
1147 		val1 = BXR2(pcs, IspVirt2Off(isp, off));
1148 	} while (val0 != val1 && ++i < 1000);
1149 	if (val0 != val1) {
1150 		return (1);
1151 	}
1152 	*rp = val0;
1153 	return (0);
1154 }
1155 
1156 static int
1157 isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp,
1158     uint16_t *semap, uint16_t *mbp)
1159 {
1160 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1161 	uint16_t isr, sema;
1162 
1163 	if (IS_2100(isp)) {
1164 		if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) {
1165 		    return (0);
1166 		}
1167 		if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) {
1168 		    return (0);
1169 		}
1170 	} else {
1171 		isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR));
1172 		sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA));
1173 	}
1174 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1175 	isr &= INT_PENDING_MASK(isp);
1176 	sema &= BIU_SEMA_LOCK;
1177 	if (isr == 0 && sema == 0) {
1178 		return (0);
1179 	}
1180 	*isrp = isr;
1181 	if ((*semap = sema) != 0) {
1182 		if (IS_2100(isp)) {
1183 			if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) {
1184 				return (0);
1185 			}
1186 		} else {
1187 			*mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0));
1188 		}
1189 	}
1190 	return (1);
1191 }
1192 
1193 static int
1194 isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp,
1195     uint16_t *semap, uint16_t *mbox0p)
1196 {
1197 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1198 	uint16_t hccr;
1199 	uint32_t r2hisr;
1200 
1201 	if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) {
1202 		*isrp = 0;
1203 		return (0);
1204 	}
1205 	r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh,
1206 	    IspVirt2Off(pcs, BIU_R2HSTSLO));
1207 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1208 	if ((r2hisr & BIU_R2HST_INTR) == 0) {
1209 		*isrp = 0;
1210 		return (0);
1211 	}
1212 	switch (r2hisr & BIU_R2HST_ISTAT_MASK) {
1213 	case ISPR2HST_ROM_MBX_OK:
1214 	case ISPR2HST_ROM_MBX_FAIL:
1215 	case ISPR2HST_MBX_OK:
1216 	case ISPR2HST_MBX_FAIL:
1217 	case ISPR2HST_ASYNC_EVENT:
1218 		*isrp = r2hisr & 0xffff;
1219 		*mbox0p = (r2hisr >> 16);
1220 		*semap = 1;
1221 		return (1);
1222 	case ISPR2HST_RIO_16:
1223 		*isrp = r2hisr & 0xffff;
1224 		*mbox0p = ASYNC_RIO1;
1225 		*semap = 1;
1226 		return (1);
1227 	case ISPR2HST_FPOST:
1228 		*isrp = r2hisr & 0xffff;
1229 		*mbox0p = ASYNC_CMD_CMPLT;
1230 		*semap = 1;
1231 		return (1);
1232 	case ISPR2HST_FPOST_CTIO:
1233 		*isrp = r2hisr & 0xffff;
1234 		*mbox0p = ASYNC_CTIO_DONE;
1235 		*semap = 1;
1236 		return (1);
1237 	case ISPR2HST_RSPQ_UPDATE:
1238 		*isrp = r2hisr & 0xffff;
1239 		*mbox0p = 0;
1240 		*semap = 0;
1241 		return (1);
1242 	default:
1243 		hccr = ISP_READ(isp, HCCR);
1244 		if (hccr & HCCR_PAUSE) {
1245 			ISP_WRITE(isp, HCCR, HCCR_RESET);
1246 			isp_prt(isp, ISP_LOGERR,
1247 			    "RISC paused at interrupt (%x->%x\n", hccr,
1248 			    ISP_READ(isp, HCCR));
1249 		} else {
1250 			isp_prt(isp, ISP_LOGERR, "unknown interrerupt 0x%x\n",
1251 			    r2hisr);
1252 		}
1253 		return (0);
1254 	}
1255 }
1256 
1257 static uint16_t
1258 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1259 {
1260 	uint16_t rv;
1261 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1262 	int oldconf = 0;
1263 
1264 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1265 		/*
1266 		 * We will assume that someone has paused the RISC processor.
1267 		 */
1268 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1269 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1270 		    oldconf | BIU_PCI_CONF1_SXP);
1271 	}
1272 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1273 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1274 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1275 	}
1276 	return (rv);
1277 }
1278 
1279 static void
1280 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint16_t val)
1281 {
1282 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1283 	int oldconf = 0;
1284 
1285 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1286 		/*
1287 		 * We will assume that someone has paused the RISC processor.
1288 		 */
1289 		oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1290 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1291 		    oldconf | BIU_PCI_CONF1_SXP);
1292 	}
1293 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1294 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1295 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf);
1296 	}
1297 }
1298 
1299 static uint16_t
1300 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1301 {
1302 	uint16_t rv, oc = 0;
1303 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1304 
1305 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1306 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1307 		uint16_t tc;
1308 		/*
1309 		 * We will assume that someone has paused the RISC processor.
1310 		 */
1311 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1312 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1313 		if (regoff & SXP_BANK1_SELECT)
1314 			tc |= BIU_PCI1080_CONF1_SXP1;
1315 		else
1316 			tc |= BIU_PCI1080_CONF1_SXP0;
1317 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1318 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1319 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1320 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1321 		    oc | BIU_PCI1080_CONF1_DMA);
1322 	}
1323 	rv = BXR2(pcs, IspVirt2Off(isp, regoff));
1324 	if (oc) {
1325 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1326 	}
1327 	return (rv);
1328 }
1329 
1330 static void
1331 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint16_t val)
1332 {
1333 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
1334 	int oc = 0;
1335 
1336 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK ||
1337 	    (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) {
1338 		uint16_t tc;
1339 		/*
1340 		 * We will assume that someone has paused the RISC processor.
1341 		 */
1342 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1343 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1344 		if (regoff & SXP_BANK1_SELECT)
1345 			tc |= BIU_PCI1080_CONF1_SXP1;
1346 		else
1347 			tc |= BIU_PCI1080_CONF1_SXP0;
1348 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc);
1349 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1350 		oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1));
1351 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1),
1352 		    oc | BIU_PCI1080_CONF1_DMA);
1353 	}
1354 	BXW2(pcs, IspVirt2Off(isp, regoff), val);
1355 	if (oc) {
1356 		BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc);
1357 	}
1358 }
1359 
1360 
1361 struct imush {
1362 	ispsoftc_t *isp;
1363 	int error;
1364 };
1365 
1366 static void imc(void *, bus_dma_segment_t *, int, int);
1367 
1368 static void
1369 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1370 {
1371 	struct imush *imushp = (struct imush *) arg;
1372 	if (error) {
1373 		imushp->error = error;
1374 	} else {
1375 		ispsoftc_t *isp =imushp->isp;
1376 		bus_addr_t addr = segs->ds_addr;
1377 
1378 		isp->isp_rquest_dma = addr;
1379 		addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1380 		isp->isp_result_dma = addr;
1381 		if (IS_FC(isp)) {
1382 			addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1383 			FCPARAM(isp)->isp_scdma = addr;
1384 		}
1385 	}
1386 }
1387 
1388 /*
1389  * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE
1390  */
1391 #define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1)
1392 
1393 #if __FreeBSD_version < 500000
1394 #define	isp_dma_tag_create	bus_dma_tag_create
1395 #else
1396 #define	isp_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, z)	\
1397 	bus_dma_tag_create(a, b, c, d, e, f, g, h, i, j, k, \
1398 	    busdma_lock_mutex, &Giant, z)
1399 #endif
1400 
1401 static int
1402 isp_pci_mbxdma(ispsoftc_t *isp)
1403 {
1404 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1405 	caddr_t base;
1406 	uint32_t len;
1407 	int i, error, ns;
1408 	bus_size_t slim;	/* segment size */
1409 	bus_addr_t llim;	/* low limit of unavailable dma */
1410 	bus_addr_t hlim;	/* high limit of unavailable dma */
1411 	struct imush im;
1412 
1413 	/*
1414 	 * Already been here? If so, leave...
1415 	 */
1416 	if (isp->isp_rquest) {
1417 		return (0);
1418 	}
1419 
1420 	hlim = BUS_SPACE_MAXADDR;
1421 	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1422 		slim = (bus_size_t) (1ULL << 32);
1423 		llim = BUS_SPACE_MAXADDR;
1424 	} else {
1425 		llim = BUS_SPACE_MAXADDR_32BIT;
1426 		slim = (1 << 24);
1427 	}
1428 
1429 	/*
1430 	 * XXX: We don't really support 64 bit target mode for parallel scsi yet
1431 	 */
1432 #ifdef	ISP_TARGET_MODE
1433 	if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) {
1434 		isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet");
1435 		return (1);
1436 	}
1437 #endif
1438 
1439 	ISP_UNLOCK(isp);
1440 	if (isp_dma_tag_create(NULL, 1, slim, llim, hlim,
1441 	    NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, &pcs->dmat)) {
1442 		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1443 		ISP_LOCK(isp);
1444 		return (1);
1445 	}
1446 
1447 
1448 	len = sizeof (XS_T **) * isp->isp_maxcmds;
1449 	isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1450 	if (isp->isp_xflist == NULL) {
1451 		isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array");
1452 		ISP_LOCK(isp);
1453 		return (1);
1454 	}
1455 #ifdef	ISP_TARGET_MODE
1456 	len = sizeof (void **) * isp->isp_maxcmds;
1457 	isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1458 	if (isp->isp_tgtlist == NULL) {
1459 		isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array");
1460 		ISP_LOCK(isp);
1461 		return (1);
1462 	}
1463 #endif
1464 	len = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
1465 	pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF,  M_WAITOK);
1466 	if (pcs->dmaps == NULL) {
1467 		isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage");
1468 		free(isp->isp_xflist, M_DEVBUF);
1469 #ifdef	ISP_TARGET_MODE
1470 		free(isp->isp_tgtlist, M_DEVBUF);
1471 #endif
1472 		ISP_LOCK(isp);
1473 		return (1);
1474 	}
1475 
1476 	/*
1477 	 * Allocate and map the request, result queues, plus FC scratch area.
1478 	 */
1479 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1480 	len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1481 	if (IS_FC(isp)) {
1482 		len += ISP2100_SCRLEN;
1483 	}
1484 
1485 	ns = (len / PAGE_SIZE) + 1;
1486 	/*
1487 	 * Create a tag for the control spaces- force it to within 32 bits.
1488 	 */
1489 	if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim,
1490 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1491 	    NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) {
1492 		isp_prt(isp, ISP_LOGERR,
1493 		    "cannot create a dma tag for control spaces");
1494 		free(pcs->dmaps, M_DEVBUF);
1495 		free(isp->isp_xflist, M_DEVBUF);
1496 #ifdef	ISP_TARGET_MODE
1497 		free(isp->isp_tgtlist, M_DEVBUF);
1498 #endif
1499 		ISP_LOCK(isp);
1500 		return (1);
1501 	}
1502 
1503 	if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT,
1504 	    &isp->isp_cdmap) != 0) {
1505 		isp_prt(isp, ISP_LOGERR,
1506 		    "cannot allocate %d bytes of CCB memory", len);
1507 		bus_dma_tag_destroy(isp->isp_cdmat);
1508 		free(isp->isp_xflist, M_DEVBUF);
1509 #ifdef	ISP_TARGET_MODE
1510 		free(isp->isp_tgtlist, M_DEVBUF);
1511 #endif
1512 		free(pcs->dmaps, M_DEVBUF);
1513 		ISP_LOCK(isp);
1514 		return (1);
1515 	}
1516 
1517 	for (i = 0; i < isp->isp_maxcmds; i++) {
1518 		error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]);
1519 		if (error) {
1520 			isp_prt(isp, ISP_LOGERR,
1521 			    "error %d creating per-cmd DMA maps", error);
1522 			while (--i >= 0) {
1523 				bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]);
1524 			}
1525 			goto bad;
1526 		}
1527 	}
1528 
1529 	im.isp = isp;
1530 	im.error = 0;
1531 	bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0);
1532 	if (im.error) {
1533 		isp_prt(isp, ISP_LOGERR,
1534 		    "error %d loading dma map for control areas", im.error);
1535 		goto bad;
1536 	}
1537 
1538 	isp->isp_rquest = base;
1539 	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1540 	isp->isp_result = base;
1541 	if (IS_FC(isp)) {
1542 		base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1543 		FCPARAM(isp)->isp_scratch = base;
1544 	}
1545 	ISP_LOCK(isp);
1546 	return (0);
1547 
1548 bad:
1549 	bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap);
1550 	bus_dma_tag_destroy(isp->isp_cdmat);
1551 	free(isp->isp_xflist, M_DEVBUF);
1552 #ifdef	ISP_TARGET_MODE
1553 	free(isp->isp_tgtlist, M_DEVBUF);
1554 #endif
1555 	free(pcs->dmaps, M_DEVBUF);
1556 	ISP_LOCK(isp);
1557 	isp->isp_rquest = NULL;
1558 	return (1);
1559 }
1560 
1561 typedef struct {
1562 	ispsoftc_t *isp;
1563 	void *cmd_token;
1564 	void *rq;
1565 	uint16_t *nxtip;
1566 	uint16_t optr;
1567 	int error;
1568 } mush_t;
1569 
1570 #define	MUSHERR_NOQENTRIES	-2
1571 
1572 #ifdef	ISP_TARGET_MODE
1573 /*
1574  * We need to handle DMA for target mode differently from initiator mode.
1575  *
1576  * DMA mapping and construction and submission of CTIO Request Entries
1577  * and rendevous for completion are very tightly coupled because we start
1578  * out by knowing (per platform) how much data we have to move, but we
1579  * don't know, up front, how many DMA mapping segments will have to be used
1580  * cover that data, so we don't know how many CTIO Request Entries we
1581  * will end up using. Further, for performance reasons we may want to
1582  * (on the last CTIO for Fibre Channel), send status too (if all went well).
1583  *
1584  * The standard vector still goes through isp_pci_dmasetup, but the callback
1585  * for the DMA mapping routines comes here instead with the whole transfer
1586  * mapped and a pointer to a partially filled in already allocated request
1587  * queue entry. We finish the job.
1588  */
1589 static void tdma_mk(void *, bus_dma_segment_t *, int, int);
1590 static void tdma_mkfc(void *, bus_dma_segment_t *, int, int);
1591 
1592 #define	STATUS_WITH_DATA	1
1593 
1594 static void
1595 tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1596 {
1597 	mush_t *mp;
1598 	struct ccb_scsiio *csio;
1599 	ispsoftc_t *isp;
1600 	struct isp_pcisoftc *pcs;
1601 	bus_dmamap_t *dp;
1602 	ct_entry_t *cto, *qe;
1603 	uint8_t scsi_status;
1604 	uint16_t curi, nxti, handle;
1605 	uint32_t sflags;
1606 	int32_t resid;
1607 	int nth_ctio, nctios, send_status;
1608 
1609 	mp = (mush_t *) arg;
1610 	if (error) {
1611 		mp->error = error;
1612 		return;
1613 	}
1614 
1615 	isp = mp->isp;
1616 	csio = mp->cmd_token;
1617 	cto = mp->rq;
1618 	curi = isp->isp_reqidx;
1619 	qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1620 
1621 	cto->ct_xfrlen = 0;
1622 	cto->ct_seg_count = 0;
1623 	cto->ct_header.rqs_entry_count = 1;
1624 	MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1625 
1626 	if (nseg == 0) {
1627 		cto->ct_header.rqs_seqno = 1;
1628 		isp_prt(isp, ISP_LOGTDEBUG1,
1629 		    "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d",
1630 		    cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid,
1631 		    cto->ct_tag_val, cto->ct_flags, cto->ct_status,
1632 		    cto->ct_scsi_status, cto->ct_resid);
1633 		ISP_TDQE(isp, "tdma_mk[no data]", curi, cto);
1634 		isp_put_ctio(isp, cto, qe);
1635 		return;
1636 	}
1637 
1638 	nctios = nseg / ISP_RQDSEG;
1639 	if (nseg % ISP_RQDSEG) {
1640 		nctios++;
1641 	}
1642 
1643 	/*
1644 	 * Save syshandle, and potentially any SCSI status, which we'll
1645 	 * reinsert on the last CTIO we're going to send.
1646 	 */
1647 
1648 	handle = cto->ct_syshandle;
1649 	cto->ct_syshandle = 0;
1650 	cto->ct_header.rqs_seqno = 0;
1651 	send_status = (cto->ct_flags & CT_SENDSTATUS) != 0;
1652 
1653 	if (send_status) {
1654 		sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR);
1655 		cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR);
1656 		/*
1657 		 * Preserve residual.
1658 		 */
1659 		resid = cto->ct_resid;
1660 
1661 		/*
1662 		 * Save actual SCSI status.
1663 		 */
1664 		scsi_status = cto->ct_scsi_status;
1665 
1666 #ifndef	STATUS_WITH_DATA
1667 		sflags |= CT_NO_DATA;
1668 		/*
1669 		 * We can't do a status at the same time as a data CTIO, so
1670 		 * we need to synthesize an extra CTIO at this level.
1671 		 */
1672 		nctios++;
1673 #endif
1674 	} else {
1675 		sflags = scsi_status = resid = 0;
1676 	}
1677 
1678 	cto->ct_resid = 0;
1679 	cto->ct_scsi_status = 0;
1680 
1681 	pcs = (struct isp_pcisoftc *)isp;
1682 	dp = &pcs->dmaps[isp_handle_index(handle)];
1683 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1684 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
1685 	} else {
1686 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
1687 	}
1688 
1689 	nxti = *mp->nxtip;
1690 
1691 	for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) {
1692 		int seglim;
1693 
1694 		seglim = nseg;
1695 		if (seglim) {
1696 			int seg;
1697 
1698 			if (seglim > ISP_RQDSEG)
1699 				seglim = ISP_RQDSEG;
1700 
1701 			for (seg = 0; seg < seglim; seg++, nseg--) {
1702 				/*
1703 				 * Unlike normal initiator commands, we don't
1704 				 * do any swizzling here.
1705 				 */
1706 				cto->ct_dataseg[seg].ds_count = dm_segs->ds_len;
1707 				cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr;
1708 				cto->ct_xfrlen += dm_segs->ds_len;
1709 				dm_segs++;
1710 			}
1711 			cto->ct_seg_count = seg;
1712 		} else {
1713 			/*
1714 			 * This case should only happen when we're sending an
1715 			 * extra CTIO with final status.
1716 			 */
1717 			if (send_status == 0) {
1718 				isp_prt(isp, ISP_LOGWARN,
1719 				    "tdma_mk ran out of segments");
1720 				mp->error = EINVAL;
1721 				return;
1722 			}
1723 		}
1724 
1725 		/*
1726 		 * At this point, the fields ct_lun, ct_iid, ct_tagval,
1727 		 * ct_tagtype, and ct_timeout have been carried over
1728 		 * unchanged from what our caller had set.
1729 		 *
1730 		 * The dataseg fields and the seg_count fields we just got
1731 		 * through setting. The data direction we've preserved all
1732 		 * along and only clear it if we're now sending status.
1733 		 */
1734 
1735 		if (nth_ctio == nctios - 1) {
1736 			/*
1737 			 * We're the last in a sequence of CTIOs, so mark
1738 			 * this CTIO and save the handle to the CCB such that
1739 			 * when this CTIO completes we can free dma resources
1740 			 * and do whatever else we need to do to finish the
1741 			 * rest of the command. We *don't* give this to the
1742 			 * firmware to work on- the caller will do that.
1743 			 */
1744 
1745 			cto->ct_syshandle = handle;
1746 			cto->ct_header.rqs_seqno = 1;
1747 
1748 			if (send_status) {
1749 				cto->ct_scsi_status = scsi_status;
1750 				cto->ct_flags |= sflags;
1751 				cto->ct_resid = resid;
1752 			}
1753 			if (send_status) {
1754 				isp_prt(isp, ISP_LOGTDEBUG1,
1755 				    "CTIO[%x] lun%d iid %d tag %x ct_flags %x "
1756 				    "scsi status %x resid %d",
1757 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1758 				    cto->ct_iid, cto->ct_tag_val, cto->ct_flags,
1759 				    cto->ct_scsi_status, cto->ct_resid);
1760 			} else {
1761 				isp_prt(isp, ISP_LOGTDEBUG1,
1762 				    "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x",
1763 				    cto->ct_fwhandle, csio->ccb_h.target_lun,
1764 				    cto->ct_iid, cto->ct_tag_val,
1765 				    cto->ct_flags);
1766 			}
1767 			isp_put_ctio(isp, cto, qe);
1768 			ISP_TDQE(isp, "last tdma_mk", curi, cto);
1769 			if (nctios > 1) {
1770 				MEMORYBARRIER(isp, SYNC_REQUEST,
1771 				    curi, QENTRY_LEN);
1772 			}
1773 		} else {
1774 			ct_entry_t *oqe = qe;
1775 
1776 			/*
1777 			 * Make sure syshandle fields are clean
1778 			 */
1779 			cto->ct_syshandle = 0;
1780 			cto->ct_header.rqs_seqno = 0;
1781 
1782 			isp_prt(isp, ISP_LOGTDEBUG1,
1783 			    "CTIO[%x] lun%d for ID%d ct_flags 0x%x",
1784 			    cto->ct_fwhandle, csio->ccb_h.target_lun,
1785 			    cto->ct_iid, cto->ct_flags);
1786 
1787 			/*
1788 			 * Get a new CTIO
1789 			 */
1790 			qe = (ct_entry_t *)
1791 			    ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1792 			nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp));
1793 			if (nxti == mp->optr) {
1794 				isp_prt(isp, ISP_LOGTDEBUG0,
1795 				    "Queue Overflow in tdma_mk");
1796 				mp->error = MUSHERR_NOQENTRIES;
1797 				return;
1798 			}
1799 
1800 			/*
1801 			 * Now that we're done with the old CTIO,
1802 			 * flush it out to the request queue.
1803 			 */
1804 			ISP_TDQE(isp, "dma_tgt_fc", curi, cto);
1805 			isp_put_ctio(isp, cto, oqe);
1806 			if (nth_ctio != 0) {
1807 				MEMORYBARRIER(isp, SYNC_REQUEST, curi,
1808 				    QENTRY_LEN);
1809 			}
1810 			curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp));
1811 
1812 			/*
1813 			 * Reset some fields in the CTIO so we can reuse
1814 			 * for the next one we'll flush to the request
1815 			 * queue.
1816 			 */
1817 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1818 			cto->ct_header.rqs_entry_count = 1;
1819 			cto->ct_header.rqs_flags = 0;
1820 			cto->ct_status = 0;
1821 			cto->ct_scsi_status = 0;
1822 			cto->ct_xfrlen = 0;
1823 			cto->ct_resid = 0;
1824 			cto->ct_seg_count = 0;
1825 			MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg));
1826 		}
1827 	}
1828 	*mp->nxtip = nxti;
1829 }
1830 
1831 /*
1832  * We don't have to do multiple CTIOs here. Instead, we can just do
1833  * continuation segments as needed. This greatly simplifies the code
1834  * improves performance.
1835  */
1836 
1837 static void
1838 tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1839 {
1840 	mush_t *mp;
1841 	struct ccb_scsiio *csio;
1842 	ispsoftc_t *isp;
1843 	ct2_entry_t *cto, *qe;
1844 	uint16_t curi, nxti;
1845 	ispds_t *ds;
1846 	ispds64_t *ds64;
1847 	int segcnt, seglim;
1848 
1849 	mp = (mush_t *) arg;
1850 	if (error) {
1851 		mp->error = error;
1852 		return;
1853 	}
1854 
1855 	isp = mp->isp;
1856 	csio = mp->cmd_token;
1857 	cto = mp->rq;
1858 
1859 	curi = isp->isp_reqidx;
1860 	qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi);
1861 
1862 	if (nseg == 0) {
1863 		if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) {
1864 			isp_prt(isp, ISP_LOGWARN,
1865 			    "dma2_tgt_fc, a status CTIO2 without MODE1 "
1866 			    "set (0x%x)", cto->ct_flags);
1867 			mp->error = EINVAL;
1868 			return;
1869 		}
1870 		/*
1871 		 * We preserve ct_lun, ct_iid, ct_rxid. We set the data
1872 		 * flags to NO DATA and clear relative offset flags.
1873 		 * We preserve the ct_resid and the response area.
1874 		 */
1875 		cto->ct_header.rqs_seqno = 1;
1876 		cto->ct_seg_count = 0;
1877 		cto->ct_reloff = 0;
1878 		isp_prt(isp, ISP_LOGTDEBUG1,
1879 		    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts "
1880 		    "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun,
1881 		    cto->ct_iid, cto->ct_flags, cto->ct_status,
1882 		    cto->rsp.m1.ct_scsi_status, cto->ct_resid);
1883 		if (IS_2KLOGIN(isp)) {
1884 			isp_put_ctio2e(isp,
1885 			    (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
1886 		} else {
1887 			isp_put_ctio2(isp, cto, qe);
1888 		}
1889 		ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe);
1890 		return;
1891 	}
1892 
1893 	if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) {
1894 		isp_prt(isp, ISP_LOGERR,
1895 		    "dma2_tgt_fc, a data CTIO2 without MODE0 set "
1896 		    "(0x%x)", cto->ct_flags);
1897 		mp->error = EINVAL;
1898 		return;
1899 	}
1900 
1901 
1902 	nxti = *mp->nxtip;
1903 
1904 	/*
1905 	 * Check to see if we need to DAC addressing or not.
1906 	 *
1907 	 * Any address that's over the 4GB boundary causes this
1908 	 * to happen.
1909 	 */
1910 	segcnt = nseg;
1911 	if (sizeof (bus_addr_t) > 4) {
1912 		for (segcnt = 0; segcnt < nseg; segcnt++) {
1913 			uint64_t addr = dm_segs[segcnt].ds_addr;
1914 			if (addr >= 0x100000000LL) {
1915 				break;
1916 			}
1917 		}
1918 	}
1919 	if (segcnt != nseg) {
1920 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3;
1921 		seglim = ISP_RQDSEG_T3;
1922 		ds64 = &cto->rsp.m0.ct_dataseg64[0];
1923 		ds = NULL;
1924 	} else {
1925 		seglim = ISP_RQDSEG_T2;
1926 		ds64 = NULL;
1927 		ds = &cto->rsp.m0.ct_dataseg[0];
1928 	}
1929 	cto->ct_seg_count = 0;
1930 
1931 	/*
1932 	 * Set up the CTIO2 data segments.
1933 	 */
1934 	for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg;
1935 	    cto->ct_seg_count++, segcnt++) {
1936 		if (ds64) {
1937 			ds64->ds_basehi =
1938 			    ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
1939 			ds64->ds_base = dm_segs[segcnt].ds_addr;
1940 			ds64->ds_count = dm_segs[segcnt].ds_len;
1941 			ds64++;
1942 		} else {
1943 			ds->ds_base = dm_segs[segcnt].ds_addr;
1944 			ds->ds_count = dm_segs[segcnt].ds_len;
1945 			ds++;
1946 		}
1947 		cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
1948 #if __FreeBSD_version < 500000
1949 		isp_prt(isp, ISP_LOGTDEBUG1,
1950 		    "isp_send_ctio2: ent0[%d]0x%llx:%llu",
1951 		    cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr,
1952 		    (uint64_t)dm_segs[segcnt].ds_len);
1953 #else
1954 		isp_prt(isp, ISP_LOGTDEBUG1,
1955 		    "isp_send_ctio2: ent0[%d]0x%jx:%ju",
1956 		    cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr,
1957 		    (uintmax_t)dm_segs[segcnt].ds_len);
1958 #endif
1959 	}
1960 
1961 	while (segcnt < nseg) {
1962 		uint16_t curip;
1963 		int seg;
1964 		ispcontreq_t local, *crq = &local, *qep;
1965 
1966 		qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
1967 		curip = nxti;
1968 		nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp));
1969 		if (nxti == mp->optr) {
1970 			ISP_UNLOCK(isp);
1971 			isp_prt(isp, ISP_LOGTDEBUG0,
1972 			    "tdma_mkfc: request queue overflow");
1973 			mp->error = MUSHERR_NOQENTRIES;
1974 			return;
1975 		}
1976 		cto->ct_header.rqs_entry_count++;
1977 		MEMZERO((void *)crq, sizeof (*crq));
1978 		crq->req_header.rqs_entry_count = 1;
1979 		if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) {
1980 			seglim = ISP_CDSEG64;
1981 			ds = NULL;
1982 			ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0];
1983 			crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
1984 		} else {
1985 			seglim = ISP_CDSEG;
1986 			ds = &crq->req_dataseg[0];
1987 			ds64 = NULL;
1988 			crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
1989 		}
1990 		for (seg = 0; segcnt < nseg && seg < seglim;
1991 		    segcnt++, seg++) {
1992 			if (ds64) {
1993 				ds64->ds_basehi =
1994 				  ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32);
1995 				ds64->ds_base = dm_segs[segcnt].ds_addr;
1996 				ds64->ds_count = dm_segs[segcnt].ds_len;
1997 				ds64++;
1998 			} else {
1999 				ds->ds_base = dm_segs[segcnt].ds_addr;
2000 				ds->ds_count = dm_segs[segcnt].ds_len;
2001 				ds++;
2002 			}
2003 #if __FreeBSD_version < 500000
2004 			isp_prt(isp, ISP_LOGTDEBUG1,
2005 			    "isp_send_ctio2: ent%d[%d]%llx:%llu",
2006 			    cto->ct_header.rqs_entry_count-1, seg,
2007 			    (uint64_t)dm_segs[segcnt].ds_addr,
2008 			    (uint64_t)dm_segs[segcnt].ds_len);
2009 #else
2010 			isp_prt(isp, ISP_LOGTDEBUG1,
2011 			    "isp_send_ctio2: ent%d[%d]%jx:%ju",
2012 			    cto->ct_header.rqs_entry_count-1, seg,
2013 			    (uintmax_t)dm_segs[segcnt].ds_addr,
2014 			    (uintmax_t)dm_segs[segcnt].ds_len);
2015 #endif
2016 			cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len;
2017 			cto->ct_seg_count++;
2018 		}
2019 		MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN);
2020 		isp_put_cont_req(isp, crq, qep);
2021 		ISP_TDQE(isp, "cont entry", curi, qep);
2022 	}
2023 
2024 	/*
2025 	 * No do final twiddling for the CTIO itself.
2026 	 */
2027 	cto->ct_header.rqs_seqno = 1;
2028 	isp_prt(isp, ISP_LOGTDEBUG1,
2029 	    "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d",
2030 	    cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid,
2031 	    cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status,
2032 	    cto->ct_resid);
2033 	if (IS_2KLOGIN(isp))
2034 		isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe);
2035 	else
2036 		isp_put_ctio2(isp, cto, qe);
2037 	ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe);
2038 	*mp->nxtip = nxti;
2039 }
2040 #endif
2041 
2042 static void dma2_a64(void *, bus_dma_segment_t *, int, int);
2043 static void dma2(void *, bus_dma_segment_t *, int, int);
2044 
2045 static void
2046 dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2047 {
2048 	mush_t *mp;
2049 	ispsoftc_t *isp;
2050 	struct ccb_scsiio *csio;
2051 	struct isp_pcisoftc *pcs;
2052 	bus_dmamap_t *dp;
2053 	bus_dma_segment_t *eseg;
2054 	ispreq64_t *rq;
2055 	int seglim, datalen;
2056 	uint16_t nxti;
2057 
2058 	mp = (mush_t *) arg;
2059 	if (error) {
2060 		mp->error = error;
2061 		return;
2062 	}
2063 
2064 	if (nseg < 1) {
2065 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2066 		mp->error = EFAULT;
2067 		return;
2068 	}
2069 	csio = mp->cmd_token;
2070 	isp = mp->isp;
2071 	rq = mp->rq;
2072 	pcs = (struct isp_pcisoftc *)mp->isp;
2073 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2074 	nxti = *mp->nxtip;
2075 
2076 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2077 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2078 	} else {
2079 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2080 	}
2081 	datalen = XS_XFRLEN(csio);
2082 
2083 	/*
2084 	 * We're passed an initial partially filled in entry that
2085 	 * has most fields filled in except for data transfer
2086 	 * related values.
2087 	 *
2088 	 * Our job is to fill in the initial request queue entry and
2089 	 * then to start allocating and filling in continuation entries
2090 	 * until we've covered the entire transfer.
2091 	 */
2092 
2093 	if (IS_FC(isp)) {
2094 		rq->req_header.rqs_entry_type = RQSTYPE_T3RQS;
2095 		seglim = ISP_RQDSEG_T3;
2096 		((ispreqt3_t *)rq)->req_totalcnt = datalen;
2097 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2098 			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2099 		} else {
2100 			((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2101 		}
2102 	} else {
2103 		rq->req_header.rqs_entry_type = RQSTYPE_A64;
2104 		if (csio->cdb_len > 12) {
2105 			seglim = 0;
2106 		} else {
2107 			seglim = ISP_RQDSEG_A64;
2108 		}
2109 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2110 			rq->req_flags |= REQFLAG_DATA_IN;
2111 		} else {
2112 			rq->req_flags |= REQFLAG_DATA_OUT;
2113 		}
2114 	}
2115 
2116 	eseg = dm_segs + nseg;
2117 
2118 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2119 		if (IS_FC(isp)) {
2120 			ispreqt3_t *rq3 = (ispreqt3_t *)rq;
2121 			rq3->req_dataseg[rq3->req_seg_count].ds_base =
2122 			    DMA_LO32(dm_segs->ds_addr);
2123 			rq3->req_dataseg[rq3->req_seg_count].ds_basehi =
2124 			    DMA_HI32(dm_segs->ds_addr);
2125 			rq3->req_dataseg[rq3->req_seg_count].ds_count =
2126 			    dm_segs->ds_len;
2127 		} else {
2128 			rq->req_dataseg[rq->req_seg_count].ds_base =
2129 			    DMA_LO32(dm_segs->ds_addr);
2130 			rq->req_dataseg[rq->req_seg_count].ds_basehi =
2131 			    DMA_HI32(dm_segs->ds_addr);
2132 			rq->req_dataseg[rq->req_seg_count].ds_count =
2133 			    dm_segs->ds_len;
2134 		}
2135 		datalen -= dm_segs->ds_len;
2136 		rq->req_seg_count++;
2137 		dm_segs++;
2138 	}
2139 
2140 	while (datalen > 0 && dm_segs != eseg) {
2141 		uint16_t onxti;
2142 		ispcontreq64_t local, *crq = &local, *cqe;
2143 
2144 		cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2145 		onxti = nxti;
2146 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2147 		if (nxti == mp->optr) {
2148 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2149 			mp->error = MUSHERR_NOQENTRIES;
2150 			return;
2151 		}
2152 		rq->req_header.rqs_entry_count++;
2153 		MEMZERO((void *)crq, sizeof (*crq));
2154 		crq->req_header.rqs_entry_count = 1;
2155 		crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT;
2156 
2157 		seglim = 0;
2158 		while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) {
2159 			crq->req_dataseg[seglim].ds_base =
2160 			    DMA_LO32(dm_segs->ds_addr);
2161 			crq->req_dataseg[seglim].ds_basehi =
2162 			    DMA_HI32(dm_segs->ds_addr);
2163 			crq->req_dataseg[seglim].ds_count =
2164 			    dm_segs->ds_len;
2165 			rq->req_seg_count++;
2166 			dm_segs++;
2167 			seglim++;
2168 			datalen -= dm_segs->ds_len;
2169 		}
2170 		isp_put_cont64_req(isp, crq, cqe);
2171 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2172 	}
2173 	*mp->nxtip = nxti;
2174 }
2175 
2176 static void
2177 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2178 {
2179 	mush_t *mp;
2180 	ispsoftc_t *isp;
2181 	struct ccb_scsiio *csio;
2182 	struct isp_pcisoftc *pcs;
2183 	bus_dmamap_t *dp;
2184 	bus_dma_segment_t *eseg;
2185 	ispreq_t *rq;
2186 	int seglim, datalen;
2187 	uint16_t nxti;
2188 
2189 	mp = (mush_t *) arg;
2190 	if (error) {
2191 		mp->error = error;
2192 		return;
2193 	}
2194 
2195 	if (nseg < 1) {
2196 		isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg);
2197 		mp->error = EFAULT;
2198 		return;
2199 	}
2200 	csio = mp->cmd_token;
2201 	isp = mp->isp;
2202 	rq = mp->rq;
2203 	pcs = (struct isp_pcisoftc *)mp->isp;
2204 	dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2205 	nxti = *mp->nxtip;
2206 
2207 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2208 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD);
2209 	} else {
2210 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE);
2211 	}
2212 
2213 	datalen = XS_XFRLEN(csio);
2214 
2215 	/*
2216 	 * We're passed an initial partially filled in entry that
2217 	 * has most fields filled in except for data transfer
2218 	 * related values.
2219 	 *
2220 	 * Our job is to fill in the initial request queue entry and
2221 	 * then to start allocating and filling in continuation entries
2222 	 * until we've covered the entire transfer.
2223 	 */
2224 
2225 	if (IS_FC(isp)) {
2226 		seglim = ISP_RQDSEG_T2;
2227 		((ispreqt2_t *)rq)->req_totalcnt = datalen;
2228 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2229 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN;
2230 		} else {
2231 			((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT;
2232 		}
2233 	} else {
2234 		if (csio->cdb_len > 12) {
2235 			seglim = 0;
2236 		} else {
2237 			seglim = ISP_RQDSEG;
2238 		}
2239 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2240 			rq->req_flags |= REQFLAG_DATA_IN;
2241 		} else {
2242 			rq->req_flags |= REQFLAG_DATA_OUT;
2243 		}
2244 	}
2245 
2246 	eseg = dm_segs + nseg;
2247 
2248 	while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) {
2249 		if (IS_FC(isp)) {
2250 			ispreqt2_t *rq2 = (ispreqt2_t *)rq;
2251 			rq2->req_dataseg[rq2->req_seg_count].ds_base =
2252 			    DMA_LO32(dm_segs->ds_addr);
2253 			rq2->req_dataseg[rq2->req_seg_count].ds_count =
2254 			    dm_segs->ds_len;
2255 		} else {
2256 			rq->req_dataseg[rq->req_seg_count].ds_base =
2257 				DMA_LO32(dm_segs->ds_addr);
2258 			rq->req_dataseg[rq->req_seg_count].ds_count =
2259 				dm_segs->ds_len;
2260 		}
2261 		datalen -= dm_segs->ds_len;
2262 		rq->req_seg_count++;
2263 		dm_segs++;
2264 	}
2265 
2266 	while (datalen > 0 && dm_segs != eseg) {
2267 		uint16_t onxti;
2268 		ispcontreq_t local, *crq = &local, *cqe;
2269 
2270 		cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti);
2271 		onxti = nxti;
2272 		nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp));
2273 		if (nxti == mp->optr) {
2274 			isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++");
2275 			mp->error = MUSHERR_NOQENTRIES;
2276 			return;
2277 		}
2278 		rq->req_header.rqs_entry_count++;
2279 		MEMZERO((void *)crq, sizeof (*crq));
2280 		crq->req_header.rqs_entry_count = 1;
2281 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
2282 
2283 		seglim = 0;
2284 		while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) {
2285 			crq->req_dataseg[seglim].ds_base =
2286 			    DMA_LO32(dm_segs->ds_addr);
2287 			crq->req_dataseg[seglim].ds_count =
2288 			    dm_segs->ds_len;
2289 			rq->req_seg_count++;
2290 			dm_segs++;
2291 			seglim++;
2292 			datalen -= dm_segs->ds_len;
2293 		}
2294 		isp_put_cont_req(isp, crq, cqe);
2295 		MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN);
2296 	}
2297 	*mp->nxtip = nxti;
2298 }
2299 
2300 /*
2301  * We enter with ISP_LOCK held
2302  */
2303 static int
2304 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq,
2305 	uint16_t *nxtip, uint16_t optr)
2306 {
2307 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2308 	ispreq_t *qep;
2309 	bus_dmamap_t *dp = NULL;
2310 	mush_t mush, *mp;
2311 	void (*eptr)(void *, bus_dma_segment_t *, int, int);
2312 
2313 	qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx);
2314 #ifdef	ISP_TARGET_MODE
2315 	if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) {
2316 		if (IS_FC(isp)) {
2317 			eptr = tdma_mkfc;
2318 		} else {
2319 			eptr = tdma_mk;
2320 		}
2321 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2322 		    (csio->dxfer_len == 0)) {
2323 			mp = &mush;
2324 			mp->isp = isp;
2325 			mp->cmd_token = csio;
2326 			mp->rq = rq;	/* really a ct_entry_t or ct2_entry_t */
2327 			mp->nxtip = nxtip;
2328 			mp->optr = optr;
2329 			mp->error = 0;
2330 			ISPLOCK_2_CAMLOCK(isp);
2331 			(*eptr)(mp, NULL, 0, 0);
2332 			CAMLOCK_2_ISPLOCK(isp);
2333 			goto mbxsync;
2334 		}
2335 	} else
2336 #endif
2337 	if (sizeof (bus_addr_t) > 4) {
2338 		eptr = dma2_a64;
2339 	} else {
2340 		eptr = dma2;
2341 	}
2342 
2343 
2344 	if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE ||
2345 	    (csio->dxfer_len == 0)) {
2346 		rq->req_seg_count = 1;
2347 		goto mbxsync;
2348 	}
2349 
2350 	/*
2351 	 * Do a virtual grapevine step to collect info for
2352 	 * the callback dma allocation that we have to use...
2353 	 */
2354 	mp = &mush;
2355 	mp->isp = isp;
2356 	mp->cmd_token = csio;
2357 	mp->rq = rq;
2358 	mp->nxtip = nxtip;
2359 	mp->optr = optr;
2360 	mp->error = 0;
2361 
2362 	ISPLOCK_2_CAMLOCK(isp);
2363 	if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2364 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2365 			int error, s;
2366 			dp = &pcs->dmaps[isp_handle_index(rq->req_handle)];
2367 			s = splsoftvm();
2368 			error = bus_dmamap_load(pcs->dmat, *dp,
2369 			    csio->data_ptr, csio->dxfer_len, eptr, mp, 0);
2370 			if (error == EINPROGRESS) {
2371 				bus_dmamap_unload(pcs->dmat, *dp);
2372 				mp->error = EINVAL;
2373 				isp_prt(isp, ISP_LOGERR,
2374 				    "deferred dma allocation not supported");
2375 			} else if (error && mp->error == 0) {
2376 #ifdef	DIAGNOSTIC
2377 				isp_prt(isp, ISP_LOGERR,
2378 				    "error %d in dma mapping code", error);
2379 #endif
2380 				mp->error = error;
2381 			}
2382 			splx(s);
2383 		} else {
2384 			/* Pointer to physical buffer */
2385 			struct bus_dma_segment seg;
2386 			seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr;
2387 			seg.ds_len = csio->dxfer_len;
2388 			(*eptr)(mp, &seg, 1, 0);
2389 		}
2390 	} else {
2391 		struct bus_dma_segment *segs;
2392 
2393 		if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2394 			isp_prt(isp, ISP_LOGERR,
2395 			    "Physical segment pointers unsupported");
2396 			mp->error = EINVAL;
2397 		} else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2398 			isp_prt(isp, ISP_LOGERR,
2399 			    "Virtual segment addresses unsupported");
2400 			mp->error = EINVAL;
2401 		} else {
2402 			/* Just use the segments provided */
2403 			segs = (struct bus_dma_segment *) csio->data_ptr;
2404 			(*eptr)(mp, segs, csio->sglist_cnt, 0);
2405 		}
2406 	}
2407 	CAMLOCK_2_ISPLOCK(isp);
2408 	if (mp->error) {
2409 		int retval = CMD_COMPLETE;
2410 		if (mp->error == MUSHERR_NOQENTRIES) {
2411 			retval = CMD_EAGAIN;
2412 		} else if (mp->error == EFBIG) {
2413 			XS_SETERR(csio, CAM_REQ_TOO_BIG);
2414 		} else if (mp->error == EINVAL) {
2415 			XS_SETERR(csio, CAM_REQ_INVALID);
2416 		} else {
2417 			XS_SETERR(csio, CAM_UNREC_HBA_ERROR);
2418 		}
2419 		return (retval);
2420 	}
2421 mbxsync:
2422 	switch (rq->req_header.rqs_entry_type) {
2423 	case RQSTYPE_REQUEST:
2424 		isp_put_request(isp, rq, qep);
2425 		break;
2426 	case RQSTYPE_CMDONLY:
2427 		isp_put_extended_request(isp, (ispextreq_t *)rq,
2428 		    (ispextreq_t *)qep);
2429 		break;
2430 	case RQSTYPE_T2RQS:
2431 		isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep);
2432 		break;
2433 	case RQSTYPE_A64:
2434 	case RQSTYPE_T3RQS:
2435 		isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep);
2436 		break;
2437 	}
2438 	return (CMD_QUEUED);
2439 }
2440 
2441 static void
2442 isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint16_t handle)
2443 {
2444 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2445 	bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)];
2446 	if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2447 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD);
2448 	} else {
2449 		bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE);
2450 	}
2451 	bus_dmamap_unload(pcs->dmat, *dp);
2452 }
2453 
2454 
2455 static void
2456 isp_pci_reset1(ispsoftc_t *isp)
2457 {
2458 	/* Make sure the BIOS is disabled */
2459 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
2460 	/* and enable interrupts */
2461 	ENABLE_INTS(isp);
2462 }
2463 
2464 static void
2465 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
2466 {
2467 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
2468 	if (msg)
2469 		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
2470 	else
2471 		printf("%s:\n", device_get_nameunit(isp->isp_dev));
2472 	if (IS_SCSI(isp))
2473 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
2474 	else
2475 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
2476 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
2477 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
2478 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
2479 
2480 
2481 	if (IS_SCSI(isp)) {
2482 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
2483 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
2484 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
2485 			ISP_READ(isp, CDMA_FIFO_STS));
2486 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
2487 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
2488 			ISP_READ(isp, DDMA_FIFO_STS));
2489 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
2490 			ISP_READ(isp, SXP_INTERRUPT),
2491 			ISP_READ(isp, SXP_GROSS_ERR),
2492 			ISP_READ(isp, SXP_PINS_CTRL));
2493 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
2494 	}
2495 	printf("    mbox regs: %x %x %x %x %x\n",
2496 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
2497 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
2498 	    ISP_READ(isp, OUTMAILBOX4));
2499 	printf("    PCI Status Command/Status=%x\n",
2500 	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
2501 }
2502