xref: /freebsd/sys/dev/mpt/mpt.h (revision 66c14b21d3ab0b18376563ba643ddb49b4fd33dd)
1 /* $FreeBSD$ */
2 /*-
3  * Generic defines for LSI '909 FC  adapters.
4  * FreeBSD Version.
5  *
6  * Copyright (c)  2000, 2001 by Greg Ansley
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 /*-
30  * Copyright (c) 2002, 2006 by Matthew Jacob
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions are
35  * met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
39  *    substantially similar to the "NO WARRANTY" disclaimer below
40  *    ("Disclaimer") and any redistribution must be conditioned upon including
41  *    a substantially similar Disclaimer requirement for further binary
42  *    redistribution.
43  * 3. Neither the names of the above listed copyright holders nor the names
44  *    of any contributors may be used to endorse or promote products derived
45  *    from this software without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
48  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
51  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
52  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
53  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
54  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
55  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
57  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58  *
59  * Support from Chris Ellsworth in order to make SAS adapters work
60  * is gratefully acknowledged.
61  */
62 /*
63  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
64  * Copyright (c) 2004, 2005 Justin T. Gibbs
65  * Copyright (c) 2005, WHEEL Sp. z o.o.
66  * All rights reserved.
67  *
68  * Redistribution and use in source and binary forms, with or without
69  * modification, are permitted provided that the following conditions are
70  * met:
71  * 1. Redistributions of source code must retain the above copyright
72  *    notice, this list of conditions and the following disclaimer.
73  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
74  *    substantially similar to the "NO WARRANTY" disclaimer below
75  *    ("Disclaimer") and any redistribution must be conditioned upon including
76  *    a substantially similar Disclaimer requirement for further binary
77  *    redistribution.
78  * 3. Neither the names of the above listed copyright holders nor the names
79  *    of any contributors may be used to endorse or promote products derived
80  *    from this software without specific prior written permission.
81  *
82  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
83  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
84  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
85  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
86  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
87  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
88  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
89  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
90  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
91  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
92  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
93  */
94 
95 #ifndef _MPT_H_
96 #define _MPT_H_
97 
98 /********************************* OS Includes ********************************/
99 #include <sys/types.h>
100 #include <sys/param.h>
101 #include <sys/systm.h>
102 #include <sys/endian.h>
103 #include <sys/eventhandler.h>
104 #if __FreeBSD_version < 500000
105 #include <sys/kernel.h>
106 #include <sys/queue.h>
107 #include <sys/malloc.h>
108 #else
109 #include <sys/lock.h>
110 #include <sys/kernel.h>
111 #include <sys/queue.h>
112 #include <sys/malloc.h>
113 #include <sys/mutex.h>
114 #include <sys/condvar.h>
115 #endif
116 #include <sys/proc.h>
117 #include <sys/bus.h>
118 #include <sys/module.h>
119 
120 #include <machine/cpu.h>
121 #include <machine/resource.h>
122 
123 #if __FreeBSD_version < 500000
124 #include <machine/bus.h>
125 #endif
126 
127 #include <sys/rman.h>
128 
129 #if __FreeBSD_version < 500000
130 #include <pci/pcireg.h>
131 #include <pci/pcivar.h>
132 #else
133 #include <dev/pci/pcireg.h>
134 #include <dev/pci/pcivar.h>
135 #endif
136 
137 #include <machine/bus.h>
138 #include "opt_ddb.h"
139 
140 /**************************** Register Definitions ****************************/
141 #include <dev/mpt/mpt_reg.h>
142 
143 /******************************* MPI Definitions ******************************/
144 #include <dev/mpt/mpilib/mpi_type.h>
145 #include <dev/mpt/mpilib/mpi.h>
146 #include <dev/mpt/mpilib/mpi_cnfg.h>
147 #include <dev/mpt/mpilib/mpi_ioc.h>
148 #include <dev/mpt/mpilib/mpi_raid.h>
149 
150 /* XXX For mpt_debug.c */
151 #include <dev/mpt/mpilib/mpi_init.h>
152 
153 /****************************** Misc Definitions ******************************/
154 #define MPT_OK (0)
155 #define MPT_FAIL (0x10000)
156 
157 #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array))
158 
159 #define	MPT_ROLE_NONE		0
160 #define	MPT_ROLE_INITIATOR	1
161 #define	MPT_ROLE_TARGET		2
162 #define	MPT_ROLE_BOTH		3
163 #define	MPT_ROLE_DEFAULT	MPT_ROLE_INITIATOR
164 
165 /**************************** Forward Declarations ****************************/
166 struct mpt_softc;
167 struct mpt_personality;
168 typedef struct req_entry request_t;
169 
170 /************************* Personality Module Support *************************/
171 typedef int mpt_load_handler_t(struct mpt_personality *);
172 typedef int mpt_probe_handler_t(struct mpt_softc *);
173 typedef int mpt_attach_handler_t(struct mpt_softc *);
174 typedef int mpt_enable_handler_t(struct mpt_softc *);
175 typedef int mpt_event_handler_t(struct mpt_softc *, request_t *,
176 				MSG_EVENT_NOTIFY_REPLY *);
177 typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/);
178 /* XXX Add return value and use for veto? */
179 typedef void mpt_shutdown_handler_t(struct mpt_softc *);
180 typedef void mpt_detach_handler_t(struct mpt_softc *);
181 typedef int mpt_unload_handler_t(struct mpt_personality *);
182 
183 struct mpt_personality
184 {
185 	const char		*name;
186 	uint32_t		 id;		/* Assigned identifier. */
187 	u_int			 use_count;	/* Instances using personality*/
188 	mpt_load_handler_t	*load;		/* configure personailty */
189 #define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load)
190 	mpt_probe_handler_t	*probe;		/* configure personailty */
191 	mpt_attach_handler_t	*attach;	/* initialize device instance */
192 	mpt_enable_handler_t	*enable;	/* enable device */
193 	mpt_event_handler_t	*event;		/* Handle MPI event. */
194 	mpt_reset_handler_t	*reset;		/* Re-init after reset. */
195 	mpt_shutdown_handler_t	*shutdown;	/* Shutdown instance. */
196 	mpt_detach_handler_t	*detach;	/* release device instance */
197 	mpt_unload_handler_t	*unload;	/* Shutdown personality */
198 #define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload)
199 };
200 
201 int mpt_modevent(module_t, int, void *);
202 
203 /* Maximum supported number of personalities. */
204 #define MPT_MAX_PERSONALITIES	(15)
205 
206 #define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \
207 	MODULE_DEPEND(name, dep, vmin, vpref, vmax)
208 
209 #define DECLARE_MPT_PERSONALITY(name, order)				  \
210 	static moduledata_t name##_mod = {				  \
211 		#name, mpt_modevent, &name##_personality		  \
212 	};								  \
213 	DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order);	  \
214 	MODULE_VERSION(name, 1);					  \
215 	MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1)
216 
217 /******************************* Bus DMA Support ******************************/
218 /* XXX Need to update bus_dmamap_sync to take a range argument. */
219 #define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op)	\
220 	bus_dmamap_sync(dma_tag, dmamap, op)
221 
222 #if __FreeBSD_version >= 501102
223 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary,	\
224 			   lowaddr, highaddr, filter, filterarg,	\
225 			   maxsize, nsegments, maxsegsz, flags,		\
226 			   dma_tagp)					\
227 	bus_dma_tag_create(parent_tag, alignment, boundary,		\
228 			   lowaddr, highaddr, filter, filterarg,	\
229 			   maxsize, nsegments, maxsegsz, flags,		\
230 			   busdma_lock_mutex, &Giant,			\
231 			   dma_tagp)
232 #else
233 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary,	\
234 			   lowaddr, highaddr, filter, filterarg,	\
235 			   maxsize, nsegments, maxsegsz, flags,		\
236 			   dma_tagp)					\
237 	bus_dma_tag_create(parent_tag, alignment, boundary,		\
238 			   lowaddr, highaddr, filter, filterarg,	\
239 			   maxsize, nsegments, maxsegsz, flags,		\
240 			   dma_tagp)
241 #endif
242 
243 struct mpt_map_info {
244 	struct mpt_softc *mpt;
245 	int		  error;
246 	uint32_t	  phys;
247 };
248 
249 void mpt_map_rquest(void *, bus_dma_segment_t *, int, int);
250 
251 /**************************** Kernel Thread Support ***************************/
252 #if __FreeBSD_version > 500005
253 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
254 	kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
255 #else
256 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
257 	kthread_create(func, farg, proc_ptr, fmtstr, arg)
258 #endif
259 
260 /****************************** Timer Facilities ******************************/
261 #if __FreeBSD_version > 500000
262 #define mpt_callout_init(c)	callout_init(c, /*mpsafe*/0);
263 #else
264 #define mpt_callout_init(c)	callout_init(c);
265 #endif
266 
267 /********************************** Endianess *********************************/
268 static __inline uint64_t
269 u64toh(U64 s)
270 {
271 	uint64_t result;
272 
273 	result = le32toh(s.Low);
274 	result |= ((uint64_t)le32toh(s.High)) << 32;
275 	return (result);
276 }
277 
278 /**************************** MPI Transaction State ***************************/
279 typedef enum {
280 	REQ_STATE_NIL		= 0x00,
281 	REQ_STATE_FREE		= 0x01,
282 	REQ_STATE_ALLOCATED	= 0x02,
283 	REQ_STATE_QUEUED	= 0x04,
284 	REQ_STATE_DONE		= 0x08,
285 	REQ_STATE_TIMEDOUT	= 0x10,
286 	REQ_STATE_NEED_WAKEUP	= 0x20,
287 	REQ_STATE_LOCKED	= 0x80,	/* can't be freed */
288 	REQ_STATE_MASK		= 0xFF
289 } mpt_req_state_t;
290 
291 struct req_entry {
292 	TAILQ_ENTRY(req_entry) links;	/* Pointer to next in list */
293 	mpt_req_state_t	state;		/* Request State Information */
294 	uint16_t	index;		/* Index of this entry */
295 	uint16_t	IOCStatus;	/* Completion status */
296 	uint16_t	ResponseCode;	/* TMF Reponse Code */
297 	uint16_t	serno;		/* serial number */
298 	union ccb      *ccb;		/* CAM request */
299 	void	       *req_vbuf;	/* Virtual Address of Entry */
300 	void	       *sense_vbuf;	/* Virtual Address of sense data */
301 	bus_addr_t	req_pbuf;	/* Physical Address of Entry */
302 	bus_addr_t	sense_pbuf;	/* Physical Address of sense data */
303 	bus_dmamap_t	dmap;		/* DMA map for data buffer */
304 	struct req_entry *chain;	/* for SGE overallocations */
305 };
306 
307 /**************************** MPI Target State Info ***************************/
308 
309 typedef struct {
310 	uint32_t reply_desc;	/* current reply descriptor */
311 	uint32_t resid;		/* current data residual */
312 	uint32_t bytes_xfered;	/* current relative offset */
313 	union ccb *ccb;		/* pointer to currently active ccb */
314 	request_t *req;		/* pointer to currently active assist request */
315 	int	nxfers;
316 	uint32_t tag_id;
317 	enum {
318 		TGT_STATE_NIL,
319 		TGT_STATE_LOADING,
320 		TGT_STATE_LOADED,
321 		TGT_STATE_IN_CAM,
322                 TGT_STATE_SETTING_UP_FOR_DATA,
323                 TGT_STATE_MOVING_DATA,
324                 TGT_STATE_MOVING_DATA_AND_STATUS,
325                 TGT_STATE_SENDING_STATUS
326 	} state;
327 } mpt_tgt_state_t;
328 
329 /*
330  * When we get an incoming command it has its own tag which is called the
331  * IoIndex. This is the value we gave that particular command buffer when
332  * we originally assigned it. It's just a number, really. The FC card uses
333  * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which
334  * contains pointers the request_t structures related to that IoIndex.
335  *
336  * What *we* do is construct a tag out of the index for the target command
337  * which owns the incoming ATIO plus a rolling sequence number.
338  */
339 #define	MPT_MAKE_TAGID(mpt, req, ioindex)	\
340  ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff))
341 
342 #ifdef	INVARIANTS
343 #define	MPT_TAG_2_REQ(a, b)		mpt_tag_2_req(a, (uint32_t) b)
344 #else
345 #define	MPT_TAG_2_REQ(mpt, tag)		mpt->tgt_cmd_ptrs[tag >> 18]
346 #endif
347 
348 #define	MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \
349     (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)]))
350 
351 STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr);
352 #define	MPT_MAX_LUNS	256
353 typedef struct {
354 	struct mpt_hdr_stailq	atios;
355 	struct mpt_hdr_stailq	inots;
356 	int enabled;
357 } tgt_resource_t;
358 #define	MPT_MAX_ELS	64
359 
360 /**************************** Handler Registration ****************************/
361 /*
362  * Global table of registered reply handlers.  The
363  * handler is indicated by byte 3 of the request
364  * index submitted to the IOC.  This allows the
365  * driver core to perform generic processing without
366  * any knowledge of per-personality behavior.
367  *
368  * MPT_NUM_REPLY_HANDLERS must be a power of 2
369  * to allow the easy generation of a mask.
370  *
371  * The handler offsets used by the core are hard coded
372  * allowing faster code generation when assigning a handler
373  * to a request.  All "personalities" must use the
374  * the handler registration mechanism.
375  *
376  * The IOC handlers that are rarely executed are placed
377  * at the tail of the table to make it more likely that
378  * all commonly executed handlers fit in a single cache
379  * line.
380  */
381 #define MPT_NUM_REPLY_HANDLERS		(32)
382 #define MPT_REPLY_HANDLER_EVENTS	MPT_CBI_TO_HID(0)
383 #define MPT_REPLY_HANDLER_CONFIG	MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1)
384 #define MPT_REPLY_HANDLER_HANDSHAKE	MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2)
385 typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request,
386     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame);
387 typedef union {
388 	mpt_reply_handler_t	*reply_handler;
389 } mpt_handler_t;
390 
391 typedef enum {
392 	MPT_HANDLER_REPLY,
393 	MPT_HANDLER_EVENT,
394 	MPT_HANDLER_RESET,
395 	MPT_HANDLER_SHUTDOWN
396 } mpt_handler_type;
397 
398 struct mpt_handler_record
399 {
400 	LIST_ENTRY(mpt_handler_record)	links;
401 	mpt_handler_t			handler;
402 };
403 
404 LIST_HEAD(mpt_handler_list, mpt_handler_record);
405 
406 /*
407  * The handler_id is currently unused but would contain the
408  * handler ID used in the MsgContext field to allow direction
409  * of replies to the handler.  Registrations that don't require
410  * a handler id can pass in NULL for the handler_id.
411  *
412  * Deregistrations for handlers without a handler id should
413  * pass in MPT_HANDLER_ID_NONE.
414  */
415 #define MPT_HANDLER_ID_NONE		(0xFFFFFFFF)
416 int mpt_register_handler(struct mpt_softc *, mpt_handler_type,
417 			 mpt_handler_t, uint32_t *);
418 int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type,
419 			   mpt_handler_t, uint32_t);
420 
421 /******************* Per-Controller Instance Data Structures ******************/
422 TAILQ_HEAD(req_queue, req_entry);
423 
424 /* Structure for saving proper values for modifyable PCI config registers */
425 struct mpt_pci_cfg {
426 	uint16_t Command;
427 	uint16_t LatencyTimer_LineSize;
428 	uint32_t IO_BAR;
429 	uint32_t Mem0_BAR[2];
430 	uint32_t Mem1_BAR[2];
431 	uint32_t ROM_BAR;
432 	uint8_t  IntLine;
433 	uint32_t PMCSR;
434 };
435 
436 typedef enum {
437 	MPT_RVF_NONE		= 0x0,
438 	MPT_RVF_ACTIVE		= 0x1,
439 	MPT_RVF_ANNOUNCED	= 0x2,
440 	MPT_RVF_UP2DATE		= 0x4,
441 	MPT_RVF_REFERENCED	= 0x8,
442 	MPT_RVF_WCE_CHANGED	= 0x10
443 } mpt_raid_volume_flags;
444 
445 struct mpt_raid_volume {
446 	CONFIG_PAGE_RAID_VOL_0	       *config_page;
447 	MPI_RAID_VOL_INDICATOR		sync_progress;
448 	mpt_raid_volume_flags		flags;
449 	u_int				quieced_disks;
450 };
451 
452 typedef enum {
453 	MPT_RDF_NONE		= 0x00,
454 	MPT_RDF_ACTIVE		= 0x01,
455 	MPT_RDF_ANNOUNCED	= 0x02,
456 	MPT_RDF_UP2DATE		= 0x04,
457 	MPT_RDF_REFERENCED	= 0x08,
458 	MPT_RDF_QUIESCING	= 0x10,
459 	MPT_RDF_QUIESCED	= 0x20
460 } mpt_raid_disk_flags;
461 
462 struct mpt_raid_disk {
463 	CONFIG_PAGE_RAID_PHYS_DISK_0	config_page;
464 	struct mpt_raid_volume	       *volume;
465 	u_int				member_number;
466 	u_int				pass_thru_active;
467 	mpt_raid_disk_flags		flags;
468 };
469 
470 struct mpt_evtf_record {
471 	MSG_EVENT_NOTIFY_REPLY		reply;
472 	uint32_t			context;
473 	LIST_ENTRY(mpt_evtf_record)	links;
474 };
475 
476 LIST_HEAD(mpt_evtf_list, mpt_evtf_record);
477 
478 struct mpt_softc {
479 	device_t		dev;
480 #if __FreeBSD_version < 500000
481 	uint32_t		mpt_islocked;
482 	int			mpt_splsaved;
483 #else
484 	struct mtx		mpt_lock;
485 	int			mpt_locksetup;
486 #endif
487 	uint32_t		mpt_pers_mask;
488 	uint32_t		: 8,
489 		unit		: 8,
490 				: 1,
491 		twildcard	: 1,
492 		tenabled	: 1,
493 				: 2,
494 		role		: 2,	/* none, ini, target, both */
495 		raid_mwce_set	: 1,
496 		getreqwaiter	: 1,
497 		shutdwn_raid    : 1,
498 		shutdwn_recovery: 1,
499 		outofbeer	: 1,
500 				: 1,
501 		disabled	: 1,
502 		is_sas		: 1,
503 		is_fc		: 1;
504 
505 	u_int			verbose;
506 
507 	/*
508 	 * IOC Facts
509 	 */
510 	uint16_t	mpt_global_credits;
511 	uint16_t	request_frame_size;
512 	uint8_t		mpt_max_devices;
513 	uint8_t		mpt_max_buses;
514 	uint8_t		ioc_facts_flags;
515 	uint8_t		padding0;
516 
517 	/*
518 	 * Port Facts
519 	 * XXX - Add multi-port support!.
520 	 */
521 	uint16_t	mpt_ini_id;
522 	uint16_t	mpt_port_type;
523 	uint16_t	mpt_proto_flags;
524 	uint16_t	mpt_max_tgtcmds;
525 
526 	/*
527 	 * Device Configuration Information
528 	 */
529 	union {
530 		struct mpt_spi_cfg {
531 			CONFIG_PAGE_SCSI_PORT_0		_port_page0;
532 			CONFIG_PAGE_SCSI_PORT_1		_port_page1;
533 			CONFIG_PAGE_SCSI_PORT_2		_port_page2;
534 			CONFIG_PAGE_SCSI_DEVICE_0	_dev_page0[16];
535 			CONFIG_PAGE_SCSI_DEVICE_1	_dev_page1[16];
536 			uint16_t			_tag_enable;
537 			uint16_t			_disc_enable;
538 		} spi;
539 #define	mpt_port_page0		cfg.spi._port_page0
540 #define	mpt_port_page1		cfg.spi._port_page1
541 #define	mpt_port_page2		cfg.spi._port_page2
542 #define	mpt_dev_page0		cfg.spi._dev_page0
543 #define	mpt_dev_page1		cfg.spi._dev_page1
544 #define	mpt_tag_enable		cfg.spi._tag_enable
545 #define	mpt_disc_enable		cfg.spi._disc_enable
546 		struct mpi_fc_cfg {
547 			CONFIG_PAGE_FC_PORT_0 _port_page0;
548 #define	mpt_fcport_page0	cfg.fc._port_page0
549 		} fc;
550 	} cfg;
551 
552 	/* Controller Info */
553 	CONFIG_PAGE_IOC_2 *	ioc_page2;
554 	CONFIG_PAGE_IOC_3 *	ioc_page3;
555 
556 	/* Raid Data */
557 	struct mpt_raid_volume* raid_volumes;
558 	struct mpt_raid_disk*	raid_disks;
559 	u_int			raid_max_volumes;
560 	u_int			raid_max_disks;
561 	u_int			raid_page0_len;
562 	u_int			raid_wakeup;
563 	u_int			raid_rescan;
564 	u_int			raid_resync_rate;
565 	u_int			raid_mwce_setting;
566 	u_int			raid_queue_depth;
567 	u_int			raid_nonopt_volumes;
568 	struct proc	       *raid_thread;
569 	struct callout		raid_timer;
570 
571 	/*
572 	 * PCI Hardware info
573 	 */
574 	struct resource *	pci_irq;	/* Interrupt map for chip */
575 	void *			ih;		/* Interupt handle */
576 	struct mpt_pci_cfg	pci_cfg;	/* saved PCI conf registers */
577 
578 	/*
579 	 * DMA Mapping Stuff
580 	 */
581 	struct resource *	pci_reg;	/* Register map for chip */
582 	int			pci_mem_rid;	/* Resource ID */
583 	bus_space_tag_t		pci_st;		/* Bus tag for registers */
584 	bus_space_handle_t	pci_sh;		/* Bus handle for registers */
585 	/* PIO versions of above. */
586 	int			pci_pio_rid;
587 	struct resource *	pci_pio_reg;
588 	bus_space_tag_t		pci_pio_st;
589 	bus_space_handle_t	pci_pio_sh;
590 
591 	bus_dma_tag_t		parent_dmat;	/* DMA tag for parent PCI bus */
592 	bus_dma_tag_t		reply_dmat;	/* DMA tag for reply memory */
593 	bus_dmamap_t		reply_dmap;	/* DMA map for reply memory */
594 	uint8_t		       *reply;		/* KVA of reply memory */
595 	bus_addr_t		reply_phys;	/* BusAddr of reply memory */
596 
597 	bus_dma_tag_t		buffer_dmat;	/* DMA tag for buffers */
598 	bus_dma_tag_t		request_dmat;	/* DMA tag for request memroy */
599 	bus_dmamap_t		request_dmap;	/* DMA map for request memroy */
600 	uint8_t		       *request;	/* KVA of Request memory */
601 	bus_addr_t		request_phys;	/* BusAddr of request memory */
602 
603 	uint32_t		max_seg_cnt;	/* calculated after IOC facts */
604 
605 	/*
606 	 * Hardware management
607 	 */
608 	u_int			reset_cnt;
609 
610 	/*
611 	 * CAM && Software Management
612 	 */
613 	request_t	       *request_pool;
614 	struct req_queue	request_free_list;
615 	struct req_queue	request_pending_list;
616 	struct req_queue	request_timeout_list;
617 
618 
619 	struct cam_sim	       *sim;
620 	struct cam_path	       *path;
621 
622 	struct cam_sim	       *phydisk_sim;
623 	struct cam_path	       *phydisk_path;
624 
625 	struct proc	       *recovery_thread;
626 	request_t	       *tmf_req;
627 
628 	/*
629 	 * Deferred frame acks due to resource shortage.
630 	 */
631 	struct mpt_evtf_list	ack_frames;
632 	/*
633 	 * Target Mode Support
634 	 */
635 	uint32_t		scsi_tgt_handler_id;
636 	request_t **		tgt_cmd_ptrs;
637 	request_t **		els_cmd_ptrs;	/* FC only */
638 
639 	/*
640 	 * *snork*- this is chosen to be here *just in case* somebody
641 	 * forgets to point to it exactly and we index off of trt with
642 	 * CAM_LUN_WILDCARD.
643 	 */
644 	tgt_resource_t		trt_wildcard;	/* wildcard luns */
645 	tgt_resource_t		trt[MPT_MAX_LUNS];
646 	uint16_t		tgt_cmds_allocated;
647 	uint16_t		els_cmds_allocated;	/* FC only */
648 
649 	uint16_t		timeouts;	/* timeout count */
650 	uint16_t		success;	/* successes afer timeout */
651 	uint16_t		sequence;	/* Sequence Number */
652 	uint16_t		pad3;
653 
654 
655 	/* Opposing port in a 929 or 1030, or NULL */
656 	struct mpt_softc *	mpt2;
657 
658 	/* FW Image management */
659 	uint32_t		fw_image_size;
660 	uint8_t		       *fw_image;
661 	bus_dma_tag_t		fw_dmat;	/* DMA tag for firmware image */
662 	bus_dmamap_t		fw_dmap;	/* DMA map for firmware image */
663 	bus_addr_t		fw_phys;	/* BusAddr of request memory */
664 
665 	/* Shutdown Event Handler. */
666 	eventhandler_tag         eh;
667 
668 	TAILQ_ENTRY(mpt_softc)	links;
669 };
670 
671 static __inline void mpt_assign_serno(struct mpt_softc *, request_t *);
672 
673 static __inline void
674 mpt_assign_serno(struct mpt_softc *mpt, request_t *req)
675 {
676 	if ((req->serno = mpt->sequence++) == 0) {
677 		req->serno = mpt->sequence++;
678 	}
679 }
680 
681 /***************************** Locking Primitives *****************************/
682 #if __FreeBSD_version < 500000
683 #define	MPT_IFLAGS		INTR_TYPE_CAM
684 #define	MPT_LOCK(mpt)		mpt_lockspl(mpt)
685 #define	MPT_UNLOCK(mpt)		mpt_unlockspl(mpt)
686 #define	MPT_OWNED(mpt)		mpt->mpt_islocked
687 #define	MPTLOCK_2_CAMLOCK	MPT_UNLOCK
688 #define	CAMLOCK_2_MPTLOCK	MPT_LOCK
689 #define	MPT_LOCK_SETUP(mpt)
690 #define	MPT_LOCK_DESTROY(mpt)
691 
692 static __inline void mpt_lockspl(struct mpt_softc *mpt);
693 static __inline void mpt_unlockspl(struct mpt_softc *mpt);
694 
695 static __inline void
696 mpt_lockspl(struct mpt_softc *mpt)
697 {
698        int s;
699 
700        s = splcam();
701        if (mpt->mpt_islocked++ == 0) {
702                mpt->mpt_splsaved = s;
703        } else {
704                splx(s);
705 	       panic("Recursed lock with mask: 0x%x\n", s);
706        }
707 }
708 
709 static __inline void
710 mpt_unlockspl(struct mpt_softc *mpt)
711 {
712        if (mpt->mpt_islocked) {
713                if (--mpt->mpt_islocked == 0) {
714                        splx(mpt->mpt_splsaved);
715                }
716        } else
717 	       panic("Negative lock count\n");
718 }
719 
720 static __inline int
721 mpt_sleep(struct mpt_softc *mpt, void *ident, int priority,
722 	   const char *wmesg, int timo)
723 {
724 	int saved_cnt;
725 	int saved_spl;
726 	int error;
727 
728 	KASSERT(mpt->mpt_islocked <= 1, ("Invalid lock count on tsleep"));
729 	saved_cnt = mpt->mpt_islocked;
730 	saved_spl = mpt->mpt_splsaved;
731 	mpt->mpt_islocked = 0;
732 	error = tsleep(ident, priority, wmesg, timo);
733 	KASSERT(mpt->mpt_islocked == 0, ("Invalid lock count on wakeup"));
734 	mpt->mpt_islocked = saved_cnt;
735 	mpt->mpt_splsaved = saved_spl;
736 	return (error);
737 }
738 
739 #else
740 #ifdef	LOCKING_WORKED_AS_IT_SHOULD
741 #error "Shouldn't Be Here!"
742 #define	MPT_IFLAGS		INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE
743 #define	MPT_LOCK_SETUP(mpt)						\
744 		mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF);		\
745 		mpt->mpt_locksetup = 1
746 #define	MPT_LOCK_DESTROY(mpt)						\
747 	if (mpt->mpt_locksetup) {					\
748 		mtx_destroy(&mpt->mpt_lock);				\
749 		mpt->mpt_locksetup = 0;					\
750 	}
751 
752 #define	MPT_LOCK(mpt)		mtx_lock(&(mpt)->mpt_lock)
753 #define	MPT_UNLOCK(mpt)		mtx_unlock(&(mpt)->mpt_lock)
754 #define	MPT_OWNED(mpt)		mtx_owned(&(mpt)->mpt_lock)
755 #define	MPTLOCK_2_CAMLOCK(mpt)	\
756 	mtx_unlock(&(mpt)->mpt_lock); mtx_lock(&Giant)
757 #define	CAMLOCK_2_MPTLOCK(mpt)	\
758 	mtx_unlock(&Giant); mtx_lock(&(mpt)->mpt_lock)
759 #define mpt_sleep(mpt, ident, priority, wmesg, timo) \
760 	msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo)
761 
762 #else
763 
764 #define	MPT_IFLAGS		INTR_TYPE_CAM | INTR_ENTROPY
765 #define	MPT_LOCK_SETUP(mpt)	do { } while (0)
766 #define	MPT_LOCK_DESTROY(mpt)	do { } while (0)
767 #if	0
768 #define	MPT_LOCK(mpt)		\
769 	device_printf(mpt->dev, "LOCK %s:%d\n", __FILE__, __LINE__); 	\
770 	KASSERT(mpt->mpt_locksetup == 0,				\
771 	    ("recursive lock acquire at %s:%d", __FILE__, __LINE__));	\
772 	mpt->mpt_locksetup = 1
773 #define	MPT_UNLOCK(mpt)		\
774 	device_printf(mpt->dev, "UNLK %s:%d\n", __FILE__, __LINE__); 	\
775 	KASSERT(mpt->mpt_locksetup == 1,				\
776 	    ("release unowned lock at %s:%d", __FILE__, __LINE__));	\
777 	mpt->mpt_locksetup = 0
778 #else
779 #define	MPT_LOCK(mpt)							\
780 	KASSERT(mpt->mpt_locksetup == 0,				\
781 	    ("recursive lock acquire at %s:%d", __FILE__, __LINE__));	\
782 	mpt->mpt_locksetup = 1
783 #define	MPT_UNLOCK(mpt)							\
784 	KASSERT(mpt->mpt_locksetup == 1,				\
785 	    ("release unowned lock at %s:%d", __FILE__, __LINE__));	\
786 	mpt->mpt_locksetup = 0
787 #endif
788 #define	MPT_OWNED(mpt)		mpt->mpt_locksetup
789 #define	MPTLOCK_2_CAMLOCK(mpt)	MPT_UNLOCK(mpt)
790 #define	CAMLOCK_2_MPTLOCK(mpt)	MPT_LOCK(mpt)
791 
792 static __inline int
793 mpt_sleep(struct mpt_softc *, void *, int, const char *, int);
794 
795 static __inline int
796 mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t)
797 {
798 	int r;
799 	MPT_UNLOCK(mpt);
800 	r = tsleep(i, p, w, t);
801 	MPT_LOCK(mpt);
802 	return (r);
803 }
804 #endif
805 #endif
806 
807 /******************************* Register Access ******************************/
808 static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t);
809 static __inline uint32_t mpt_read(struct mpt_softc *, int);
810 static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t);
811 static __inline uint32_t mpt_pio_read(struct mpt_softc *, int);
812 
813 static __inline void
814 mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
815 {
816 	bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val);
817 }
818 
819 static __inline uint32_t
820 mpt_read(struct mpt_softc *mpt, int offset)
821 {
822 	return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset));
823 }
824 
825 /*
826  * Some operations (e.g. diagnostic register writes while the ARM proccessor
827  * is disabled), must be performed using "PCI pio" operations.  On non-PCI
828  * busses, these operations likely map to normal register accesses.
829  */
830 static __inline void
831 mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
832 {
833 	bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val);
834 }
835 
836 static __inline uint32_t
837 mpt_pio_read(struct mpt_softc *mpt, int offset)
838 {
839 	return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset));
840 }
841 /*********************** Reply Frame/Request Management ***********************/
842 /* Max MPT Reply we are willing to accept (must be power of 2) */
843 #define MPT_REPLY_SIZE   	256
844 
845 /*
846  * Must be less than 16384 in order for target mode to work
847  */
848 #define MPT_MAX_REQUESTS(mpt)	512
849 #define MPT_REQUEST_AREA	512
850 #define MPT_SENSE_SIZE		32	/* included in MPT_REQUEST_AREA */
851 #define MPT_REQ_MEM_SIZE(mpt)	(MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA)
852 
853 #define MPT_CONTEXT_CB_SHIFT	(16)
854 #define MPT_CBI(handle)		(handle >> MPT_CONTEXT_CB_SHIFT)
855 #define MPT_CBI_TO_HID(cbi)	((cbi) << MPT_CONTEXT_CB_SHIFT)
856 #define MPT_CONTEXT_TO_CBI(x)	\
857     (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1))
858 #define MPT_CONTEXT_REQI_MASK	0xFFFF
859 #define MPT_CONTEXT_TO_REQI(x)	((x) & MPT_CONTEXT_REQI_MASK)
860 
861 /*
862  * Convert a 32bit physical address returned from IOC to an
863  * offset into our reply frame memory or the kvm address needed
864  * to access the data.  The returned address is only the low
865  * 32 bits, so mask our base physical address accordingly.
866  */
867 #define MPT_REPLY_BADDR(x)		\
868 	(x << 1)
869 #define MPT_REPLY_OTOV(m, i) 		\
870 	((void *)(&m->reply[i]))
871 
872 #define	MPT_DUMP_REPLY_FRAME(mpt, reply_frame)		\
873 do {							\
874 	if (mpt->verbose >= MPT_PRT_DEBUG)		\
875 		mpt_dump_reply_frame(mpt, reply_frame);	\
876 } while(0)
877 
878 static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt);
879 static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr);
880 
881 /*
882  * Give the reply buffer back to the IOC after we have
883  * finished processing it.
884  */
885 static __inline void
886 mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr)
887 {
888      mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr);
889 }
890 
891 /* Get a reply from the IOC */
892 static __inline uint32_t
893 mpt_pop_reply_queue(struct mpt_softc *mpt)
894 {
895      return mpt_read(mpt, MPT_OFFSET_REPLY_Q);
896 }
897 
898 void
899 mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int);
900 
901 /************************** Scatter Gather Managment **************************/
902 /* MPT_RQSL- size of request frame, in bytes */
903 #define	MPT_RQSL(mpt)		(mpt->request_frame_size << 2)
904 
905 /* MPT_NSGL- how many SG entries can fit in a request frame size */
906 #define	MPT_NSGL(mpt)		(MPT_RQSL(mpt) / sizeof (SGE_IO_UNION))
907 
908 /* MPT_NRFM- how many request frames can fit in each request alloc we make */
909 #define	MPT_NRFM(mpt)		(MPT_REQUEST_AREA / MPT_RQSL(mpt))
910 
911 /*
912  * MPT_NSGL_FIRST- # of SG elements that can fit after
913  * an I/O request but still within the request frame.
914  * Do this safely based upon SGE_IO_UNION.
915  *
916  * Note that the first element is *within* the SCSI request.
917  */
918 #define	MPT_NSGL_FIRST(mpt)	\
919     ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \
920     sizeof (SGE_IO_UNION))
921 
922 /***************************** IOC Initialization *****************************/
923 int mpt_reset(struct mpt_softc *, int /*reinit*/);
924 
925 /****************************** Debugging ************************************/
926 typedef struct mpt_decode_entry {
927 	char    *name;
928 	u_int	 value;
929 	u_int	 mask;
930 } mpt_decode_entry_t;
931 
932 int mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries,
933 		     const char *name, u_int value, u_int *cur_column,
934 		     u_int wrap_point);
935 
936 enum {
937 	MPT_PRT_ALWAYS,
938 	MPT_PRT_FATAL,
939 	MPT_PRT_ERROR,
940 	MPT_PRT_WARN,
941 	MPT_PRT_INFO,
942 	MPT_PRT_DEBUG,
943 	MPT_PRT_DEBUG1,
944 	MPT_PRT_DEBUG2,
945 	MPT_PRT_DEBUG3,
946 	MPT_PRT_TRACE,
947 	MPT_PRT_NONE=100
948 };
949 
950 #if __FreeBSD_version > 500000
951 #define mpt_lprt(mpt, level, ...)		\
952 do {						\
953 	if (level <= (mpt)->verbose)		\
954 		mpt_prt(mpt, __VA_ARGS__);	\
955 } while (0)
956 
957 #define mpt_lprtc(mpt, level, ...)		 \
958 do {						 \
959 	if (level <= (mpt)->debug_level)	 \
960 		mpt_prtc(mpt, __VA_ARGS__);	 \
961 } while (0)
962 #else
963 void mpt_lprt(struct mpt_softc *, int, const char *, ...)
964 	__printflike(3, 4);
965 void mpt_lprtc(struct mpt_softc *, int, const char *, ...)
966 	__printflike(3, 4);
967 #endif
968 void mpt_prt(struct mpt_softc *, const char *, ...)
969 	__printflike(2, 3);
970 void mpt_prtc(struct mpt_softc *, const char *, ...)
971 	__printflike(2, 3);
972 
973 /**************************** Target Mode Related ***************************/
974 static __inline int mpt_cdblen(uint8_t, int);
975 static __inline int
976 mpt_cdblen(uint8_t cdb0, int maxlen)
977 {
978 	int group = cdb0 >> 5;
979 	switch (group) {
980 	case 0:
981 		return (6);
982 	case 1:
983 		return (10);
984 	case 4:
985 	case 5:
986 		return (12);
987 	default:
988 		return (16);
989 	}
990 }
991 #ifdef	INVARIANTS
992 static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t);
993 static __inline request_t *
994 mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag)
995 {
996 	uint16_t rtg = (tag >> 18);
997 	KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d\n", tag));
998 	KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array"));
999 	KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer"));
1000 	return (mpt->tgt_cmd_ptrs[rtg]);
1001 }
1002 
1003 
1004 static __inline int
1005 mpt_req_on_free_list(struct mpt_softc *, request_t *);
1006 static __inline int
1007 mpt_req_on_pending_list(struct mpt_softc *, request_t *);
1008 
1009 static __inline void
1010 mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int);
1011 static __inline void
1012 mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int);
1013 
1014 
1015 /*
1016  * Is request on freelist?
1017  */
1018 static __inline int
1019 mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req)
1020 {
1021 	request_t *lrq;
1022 
1023 	TAILQ_FOREACH(lrq, &mpt->request_free_list, links) {
1024 		if (lrq == req) {
1025 			return (1);
1026 		}
1027 	}
1028 	return (0);
1029 }
1030 
1031 /*
1032  * Is request on pending list?
1033  */
1034 static __inline int
1035 mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req)
1036 {
1037 	request_t *lrq;
1038 
1039 	TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) {
1040 		if (lrq == req) {
1041 			return (1);
1042 		}
1043 	}
1044 	return (0);
1045 }
1046 
1047 /*
1048  * Make sure that req *is* part of one of the special lists
1049  */
1050 static __inline void
1051 mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1052 {
1053 	int i;
1054 	for (i = 0; i < mpt->els_cmds_allocated; i++) {
1055 		if (req == mpt->els_cmd_ptrs[i]) {
1056 			return;
1057 		}
1058 	}
1059 	for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1060 		if (req == mpt->tgt_cmd_ptrs[i]) {
1061 			return;
1062 		}
1063 	}
1064 	panic("%s(%d): req %p:%u function %x not in els or tgt ptrs\n",
1065 	    s, line, req, req->serno,
1066 	    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function);
1067 }
1068 
1069 /*
1070  * Make sure that req is *not* part of one of the special lists.
1071  */
1072 static __inline void
1073 mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1074 {
1075 	int i;
1076 	for (i = 0; i < mpt->els_cmds_allocated; i++) {
1077 		KASSERT(req != mpt->els_cmd_ptrs[i],
1078 		    ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d\n",
1079 		    s, line, req, req->serno,
1080 		    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1081 	}
1082 	for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1083 		KASSERT(req != mpt->tgt_cmd_ptrs[i],
1084 		    ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d\n",
1085 		    s, line, req, req->serno,
1086 		    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1087 	}
1088 }
1089 #endif
1090 
1091 typedef enum {
1092 	MPT_ABORT_TASK_SET=1234,
1093 	MPT_CLEAR_TASK_SET,
1094 	MPT_TARGET_RESET,
1095 	MPT_CLEAR_ACA,
1096 	MPT_TERMINATE_TASK,
1097 	MPT_NIL_TMT_VALUE=5678
1098 } mpt_task_mgmt_t;
1099 
1100 /**************************** Unclassified Routines ***************************/
1101 void		mpt_send_cmd(struct mpt_softc *mpt, request_t *req);
1102 int		mpt_recv_handshake_reply(struct mpt_softc *mpt,
1103 					 size_t reply_len, void *reply);
1104 int		mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1105 			     mpt_req_state_t state, mpt_req_state_t mask,
1106 			     int sleep_ok, int time_ms);
1107 void		mpt_enable_ints(struct mpt_softc *mpt);
1108 void		mpt_disable_ints(struct mpt_softc *mpt);
1109 int		mpt_attach(struct mpt_softc *mpt);
1110 int		mpt_shutdown(struct mpt_softc *mpt);
1111 int		mpt_detach(struct mpt_softc *mpt);
1112 int		mpt_send_handshake_cmd(struct mpt_softc *mpt,
1113 				       size_t len, void *cmd);
1114 request_t *	mpt_get_request(struct mpt_softc *mpt, int sleep_ok);
1115 void		mpt_free_request(struct mpt_softc *mpt, request_t *req);
1116 void		mpt_intr(void *arg);
1117 void		mpt_check_doorbell(struct mpt_softc *mpt);
1118 void		mpt_dump_reply_frame(struct mpt_softc *mpt,
1119 				     MSG_DEFAULT_REPLY *reply_frame);
1120 
1121 void		mpt_set_config_regs(struct mpt_softc *);
1122 int		mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/,
1123 				  u_int /*Action*/, u_int /*PageVersion*/,
1124 				  u_int /*PageLength*/, u_int /*PageNumber*/,
1125 				  u_int /*PageType*/, uint32_t /*PageAddress*/,
1126 				  bus_addr_t /*addr*/, bus_size_t/*len*/,
1127 				  int /*sleep_ok*/, int /*timeout_ms*/);
1128 int		mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/,
1129 				    int /*PageNumber*/,
1130 				    uint32_t /*PageAddress*/,
1131 				    CONFIG_PAGE_HEADER *,
1132 				    int /*sleep_ok*/, int /*timeout_ms*/);
1133 int		mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/,
1134 				  uint32_t /*PageAddress*/,
1135 				  CONFIG_PAGE_HEADER *, size_t /*len*/,
1136 				  int /*sleep_ok*/, int /*timeout_ms*/);
1137 int		mpt_write_cfg_page(struct mpt_softc *, int /*Action*/,
1138 				   uint32_t /*PageAddress*/,
1139 				   CONFIG_PAGE_HEADER *, size_t /*len*/,
1140 				   int /*sleep_ok*/, int /*timeout_ms*/);
1141 static __inline int
1142 mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1143 		      CONFIG_PAGE_HEADER *hdr, size_t len,
1144 		      int sleep_ok, int timeout_ms)
1145 {
1146 	return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
1147 				  PageAddress, hdr, len, sleep_ok, timeout_ms));
1148 }
1149 
1150 static __inline int
1151 mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1152 		       CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1153 		       int timeout_ms)
1154 {
1155 	return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT,
1156 				   PageAddress, hdr, len, sleep_ok,
1157 				   timeout_ms));
1158 }
1159 
1160 /* mpt_debug.c functions */
1161 void mpt_print_reply(void *vmsg);
1162 void mpt_print_db(uint32_t mb);
1163 void mpt_print_config_reply(void *vmsg);
1164 char *mpt_ioc_diag(uint32_t diag);
1165 void mpt_req_state(mpt_req_state_t state);
1166 void mpt_print_config_request(void *vmsg);
1167 void mpt_print_request(void *vmsg);
1168 void mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *msg);
1169 void mpt_dump_sgl(SGE_IO_UNION *se, int offset);
1170 #endif /* _MPT_H_ */
1171