xref: /freebsd/sys/dev/mpt/mpt.h (revision 7dfd9569a2f0637fb9a48157b1c1bfe5709faee3)
1 /* $FreeBSD$ */
2 /*-
3  * Generic defines for LSI '909 FC  adapters.
4  * FreeBSD Version.
5  *
6  * Copyright (c)  2000, 2001 by Greg Ansley
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 /*-
30  * Copyright (c) 2002, 2006 by Matthew Jacob
31  * All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions are
35  * met:
36  * 1. Redistributions of source code must retain the above copyright
37  *    notice, this list of conditions and the following disclaimer.
38  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
39  *    substantially similar to the "NO WARRANTY" disclaimer below
40  *    ("Disclaimer") and any redistribution must be conditioned upon including
41  *    a substantially similar Disclaimer requirement for further binary
42  *    redistribution.
43  * 3. Neither the names of the above listed copyright holders nor the names
44  *    of any contributors may be used to endorse or promote products derived
45  *    from this software without specific prior written permission.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
48  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
51  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
52  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
53  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
54  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
55  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
57  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58  *
59  * Support from Chris Ellsworth in order to make SAS adapters work
60  * is gratefully acknowledged.
61  */
62 /*
63  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
64  * Copyright (c) 2004, 2005 Justin T. Gibbs
65  * Copyright (c) 2005, WHEEL Sp. z o.o.
66  * All rights reserved.
67  *
68  * Redistribution and use in source and binary forms, with or without
69  * modification, are permitted provided that the following conditions are
70  * met:
71  * 1. Redistributions of source code must retain the above copyright
72  *    notice, this list of conditions and the following disclaimer.
73  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
74  *    substantially similar to the "NO WARRANTY" disclaimer below
75  *    ("Disclaimer") and any redistribution must be conditioned upon including
76  *    a substantially similar Disclaimer requirement for further binary
77  *    redistribution.
78  * 3. Neither the names of the above listed copyright holders nor the names
79  *    of any contributors may be used to endorse or promote products derived
80  *    from this software without specific prior written permission.
81  *
82  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
83  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
84  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
85  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
86  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
87  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
88  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
89  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
90  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
91  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
92  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
93  */
94 
95 #ifndef _MPT_H_
96 #define _MPT_H_
97 
98 /********************************* OS Includes ********************************/
99 #include <sys/types.h>
100 #include <sys/param.h>
101 #include <sys/systm.h>
102 #include <sys/endian.h>
103 #include <sys/eventhandler.h>
104 #if __FreeBSD_version < 500000
105 #include <sys/kernel.h>
106 #include <sys/queue.h>
107 #include <sys/malloc.h>
108 #else
109 #include <sys/lock.h>
110 #include <sys/kernel.h>
111 #include <sys/queue.h>
112 #include <sys/malloc.h>
113 #include <sys/mutex.h>
114 #include <sys/condvar.h>
115 #endif
116 #include <sys/proc.h>
117 #include <sys/bus.h>
118 #include <sys/module.h>
119 
120 #include <machine/clock.h>
121 #include <machine/cpu.h>
122 #include <machine/resource.h>
123 
124 #if __FreeBSD_version < 500000
125 #include <machine/bus.h>
126 #endif
127 
128 #include <sys/rman.h>
129 
130 #if __FreeBSD_version < 500000
131 #include <pci/pcireg.h>
132 #include <pci/pcivar.h>
133 #else
134 #include <dev/pci/pcireg.h>
135 #include <dev/pci/pcivar.h>
136 #endif
137 
138 #include <machine/bus.h>
139 #include "opt_ddb.h"
140 
141 /**************************** Register Definitions ****************************/
142 #include <dev/mpt/mpt_reg.h>
143 
144 /******************************* MPI Definitions ******************************/
145 #include <dev/mpt/mpilib/mpi_type.h>
146 #include <dev/mpt/mpilib/mpi.h>
147 #include <dev/mpt/mpilib/mpi_cnfg.h>
148 #include <dev/mpt/mpilib/mpi_ioc.h>
149 #include <dev/mpt/mpilib/mpi_raid.h>
150 
151 /* XXX For mpt_debug.c */
152 #include <dev/mpt/mpilib/mpi_init.h>
153 
154 /****************************** Misc Definitions ******************************/
155 #define MPT_OK (0)
156 #define MPT_FAIL (0x10000)
157 
158 #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array))
159 
160 #define	MPT_ROLE_NONE		0
161 #define	MPT_ROLE_INITIATOR	1
162 #define	MPT_ROLE_TARGET		2
163 #define	MPT_ROLE_BOTH		3
164 #define	MPT_ROLE_DEFAULT	MPT_ROLE_INITIATOR
165 
166 /**************************** Forward Declarations ****************************/
167 struct mpt_softc;
168 struct mpt_personality;
169 typedef struct req_entry request_t;
170 
171 /************************* Personality Module Support *************************/
172 typedef int mpt_load_handler_t(struct mpt_personality *);
173 typedef int mpt_probe_handler_t(struct mpt_softc *);
174 typedef int mpt_attach_handler_t(struct mpt_softc *);
175 typedef int mpt_enable_handler_t(struct mpt_softc *);
176 typedef int mpt_event_handler_t(struct mpt_softc *, request_t *,
177 				MSG_EVENT_NOTIFY_REPLY *);
178 typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/);
179 /* XXX Add return value and use for veto? */
180 typedef void mpt_shutdown_handler_t(struct mpt_softc *);
181 typedef void mpt_detach_handler_t(struct mpt_softc *);
182 typedef int mpt_unload_handler_t(struct mpt_personality *);
183 
184 struct mpt_personality
185 {
186 	const char		*name;
187 	uint32_t		 id;		/* Assigned identifier. */
188 	u_int			 use_count;	/* Instances using personality*/
189 	mpt_load_handler_t	*load;		/* configure personailty */
190 #define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load)
191 	mpt_probe_handler_t	*probe;		/* configure personailty */
192 	mpt_attach_handler_t	*attach;	/* initialize device instance */
193 	mpt_enable_handler_t	*enable;	/* enable device */
194 	mpt_event_handler_t	*event;		/* Handle MPI event. */
195 	mpt_reset_handler_t	*reset;		/* Re-init after reset. */
196 	mpt_shutdown_handler_t	*shutdown;	/* Shutdown instance. */
197 	mpt_detach_handler_t	*detach;	/* release device instance */
198 	mpt_unload_handler_t	*unload;	/* Shutdown personality */
199 #define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload)
200 };
201 
202 int mpt_modevent(module_t, int, void *);
203 
204 /* Maximum supported number of personalities. */
205 #define MPT_MAX_PERSONALITIES	(15)
206 
207 #define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \
208 	MODULE_DEPEND(name, dep, vmin, vpref, vmax)
209 
210 #define DECLARE_MPT_PERSONALITY(name, order)				  \
211 	static moduledata_t name##_mod = {				  \
212 		#name, mpt_modevent, &name##_personality		  \
213 	};								  \
214 	DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order);	  \
215 	MODULE_VERSION(name, 1);					  \
216 	MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1)
217 
218 /******************************* Bus DMA Support ******************************/
219 /* XXX Need to update bus_dmamap_sync to take a range argument. */
220 #define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op)	\
221 	bus_dmamap_sync(dma_tag, dmamap, op)
222 
223 #if __FreeBSD_version >= 501102
224 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary,	\
225 			   lowaddr, highaddr, filter, filterarg,	\
226 			   maxsize, nsegments, maxsegsz, flags,		\
227 			   dma_tagp)					\
228 	bus_dma_tag_create(parent_tag, alignment, boundary,		\
229 			   lowaddr, highaddr, filter, filterarg,	\
230 			   maxsize, nsegments, maxsegsz, flags,		\
231 			   busdma_lock_mutex, &Giant,			\
232 			   dma_tagp)
233 #else
234 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary,	\
235 			   lowaddr, highaddr, filter, filterarg,	\
236 			   maxsize, nsegments, maxsegsz, flags,		\
237 			   dma_tagp)					\
238 	bus_dma_tag_create(parent_tag, alignment, boundary,		\
239 			   lowaddr, highaddr, filter, filterarg,	\
240 			   maxsize, nsegments, maxsegsz, flags,		\
241 			   dma_tagp)
242 #endif
243 
244 struct mpt_map_info {
245 	struct mpt_softc *mpt;
246 	int		  error;
247 	uint32_t	  phys;
248 };
249 
250 void mpt_map_rquest(void *, bus_dma_segment_t *, int, int);
251 
252 /**************************** Kernel Thread Support ***************************/
253 #if __FreeBSD_version > 500005
254 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
255 	kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
256 #else
257 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
258 	kthread_create(func, farg, proc_ptr, fmtstr, arg)
259 #endif
260 
261 /****************************** Timer Facilities ******************************/
262 #if __FreeBSD_version > 500000
263 #define mpt_callout_init(c)	callout_init(c, /*mpsafe*/0);
264 #else
265 #define mpt_callout_init(c)	callout_init(c);
266 #endif
267 
268 /********************************** Endianess *********************************/
269 static __inline uint64_t
270 u64toh(U64 s)
271 {
272 	uint64_t result;
273 
274 	result = le32toh(s.Low);
275 	result |= ((uint64_t)le32toh(s.High)) << 32;
276 	return (result);
277 }
278 
279 /**************************** MPI Transaction State ***************************/
280 typedef enum {
281 	REQ_STATE_NIL		= 0x00,
282 	REQ_STATE_FREE		= 0x01,
283 	REQ_STATE_ALLOCATED	= 0x02,
284 	REQ_STATE_QUEUED	= 0x04,
285 	REQ_STATE_DONE		= 0x08,
286 	REQ_STATE_TIMEDOUT	= 0x10,
287 	REQ_STATE_NEED_WAKEUP	= 0x20,
288 	REQ_STATE_LOCKED	= 0x80,	/* can't be freed */
289 	REQ_STATE_MASK		= 0xFF
290 } mpt_req_state_t;
291 
292 struct req_entry {
293 	TAILQ_ENTRY(req_entry) links;	/* Pointer to next in list */
294 	mpt_req_state_t	state;		/* Request State Information */
295 	uint16_t	index;		/* Index of this entry */
296 	uint16_t	IOCStatus;	/* Completion status */
297 	uint16_t	ResponseCode;	/* TMF Reponse Code */
298 	uint16_t	serno;		/* serial number */
299 	union ccb      *ccb;		/* CAM request */
300 	void	       *req_vbuf;	/* Virtual Address of Entry */
301 	void	       *sense_vbuf;	/* Virtual Address of sense data */
302 	bus_addr_t	req_pbuf;	/* Physical Address of Entry */
303 	bus_addr_t	sense_pbuf;	/* Physical Address of sense data */
304 	bus_dmamap_t	dmap;		/* DMA map for data buffer */
305 	struct req_entry *chain;	/* for SGE overallocations */
306 };
307 
308 /**************************** MPI Target State Info ***************************/
309 
310 typedef struct {
311 	uint32_t reply_desc;	/* current reply descriptor */
312 	uint32_t resid;		/* current data residual */
313 	uint32_t bytes_xfered;	/* current relative offset */
314 	union ccb *ccb;		/* pointer to currently active ccb */
315 	request_t *req;		/* pointer to currently active assist request */
316 	int	nxfers;
317 	uint32_t tag_id;
318 	enum {
319 		TGT_STATE_NIL,
320 		TGT_STATE_LOADING,
321 		TGT_STATE_LOADED,
322 		TGT_STATE_IN_CAM,
323                 TGT_STATE_SETTING_UP_FOR_DATA,
324                 TGT_STATE_MOVING_DATA,
325                 TGT_STATE_MOVING_DATA_AND_STATUS,
326                 TGT_STATE_SENDING_STATUS
327 	} state;
328 } mpt_tgt_state_t;
329 
330 /*
331  * When we get an incoming command it has its own tag which is called the
332  * IoIndex. This is the value we gave that particular command buffer when
333  * we originally assigned it. It's just a number, really. The FC card uses
334  * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which
335  * contains pointers the request_t structures related to that IoIndex.
336  *
337  * What *we* do is construct a tag out of the index for the target command
338  * which owns the incoming ATIO plus a rolling sequence number.
339  */
340 #define	MPT_MAKE_TAGID(mpt, req, ioindex)	\
341  ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff))
342 
343 #ifdef	INVARIANTS
344 #define	MPT_TAG_2_REQ(a, b)		mpt_tag_2_req(a, (uint32_t) b)
345 #else
346 #define	MPT_TAG_2_REQ(mpt, tag)		mpt->tgt_cmd_ptrs[tag >> 18]
347 #endif
348 
349 #define	MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \
350     (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)]))
351 
352 STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr);
353 #define	MPT_MAX_LUNS	256
354 typedef struct {
355 	struct mpt_hdr_stailq	atios;
356 	struct mpt_hdr_stailq	inots;
357 	int enabled;
358 } tgt_resource_t;
359 #define	MPT_MAX_ELS	64
360 
361 /**************************** Handler Registration ****************************/
362 /*
363  * Global table of registered reply handlers.  The
364  * handler is indicated by byte 3 of the request
365  * index submitted to the IOC.  This allows the
366  * driver core to perform generic processing without
367  * any knowledge of per-personality behavior.
368  *
369  * MPT_NUM_REPLY_HANDLERS must be a power of 2
370  * to allow the easy generation of a mask.
371  *
372  * The handler offsets used by the core are hard coded
373  * allowing faster code generation when assigning a handler
374  * to a request.  All "personalities" must use the
375  * the handler registration mechanism.
376  *
377  * The IOC handlers that are rarely executed are placed
378  * at the tail of the table to make it more likely that
379  * all commonly executed handlers fit in a single cache
380  * line.
381  */
382 #define MPT_NUM_REPLY_HANDLERS		(32)
383 #define MPT_REPLY_HANDLER_EVENTS	MPT_CBI_TO_HID(0)
384 #define MPT_REPLY_HANDLER_CONFIG	MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1)
385 #define MPT_REPLY_HANDLER_HANDSHAKE	MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2)
386 typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request,
387     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame);
388 typedef union {
389 	mpt_reply_handler_t	*reply_handler;
390 } mpt_handler_t;
391 
392 typedef enum {
393 	MPT_HANDLER_REPLY,
394 	MPT_HANDLER_EVENT,
395 	MPT_HANDLER_RESET,
396 	MPT_HANDLER_SHUTDOWN
397 } mpt_handler_type;
398 
399 struct mpt_handler_record
400 {
401 	LIST_ENTRY(mpt_handler_record)	links;
402 	mpt_handler_t			handler;
403 };
404 
405 LIST_HEAD(mpt_handler_list, mpt_handler_record);
406 
407 /*
408  * The handler_id is currently unused but would contain the
409  * handler ID used in the MsgContext field to allow direction
410  * of replies to the handler.  Registrations that don't require
411  * a handler id can pass in NULL for the handler_id.
412  *
413  * Deregistrations for handlers without a handler id should
414  * pass in MPT_HANDLER_ID_NONE.
415  */
416 #define MPT_HANDLER_ID_NONE		(0xFFFFFFFF)
417 int mpt_register_handler(struct mpt_softc *, mpt_handler_type,
418 			 mpt_handler_t, uint32_t *);
419 int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type,
420 			   mpt_handler_t, uint32_t);
421 
422 /******************* Per-Controller Instance Data Structures ******************/
423 TAILQ_HEAD(req_queue, req_entry);
424 
425 /* Structure for saving proper values for modifyable PCI config registers */
426 struct mpt_pci_cfg {
427 	uint16_t Command;
428 	uint16_t LatencyTimer_LineSize;
429 	uint32_t IO_BAR;
430 	uint32_t Mem0_BAR[2];
431 	uint32_t Mem1_BAR[2];
432 	uint32_t ROM_BAR;
433 	uint8_t  IntLine;
434 	uint32_t PMCSR;
435 };
436 
437 typedef enum {
438 	MPT_RVF_NONE		= 0x0,
439 	MPT_RVF_ACTIVE		= 0x1,
440 	MPT_RVF_ANNOUNCED	= 0x2,
441 	MPT_RVF_UP2DATE		= 0x4,
442 	MPT_RVF_REFERENCED	= 0x8,
443 	MPT_RVF_WCE_CHANGED	= 0x10
444 } mpt_raid_volume_flags;
445 
446 struct mpt_raid_volume {
447 	CONFIG_PAGE_RAID_VOL_0	       *config_page;
448 	MPI_RAID_VOL_INDICATOR		sync_progress;
449 	mpt_raid_volume_flags		flags;
450 	u_int				quieced_disks;
451 };
452 
453 typedef enum {
454 	MPT_RDF_NONE		= 0x00,
455 	MPT_RDF_ACTIVE		= 0x01,
456 	MPT_RDF_ANNOUNCED	= 0x02,
457 	MPT_RDF_UP2DATE		= 0x04,
458 	MPT_RDF_REFERENCED	= 0x08,
459 	MPT_RDF_QUIESCING	= 0x10,
460 	MPT_RDF_QUIESCED	= 0x20
461 } mpt_raid_disk_flags;
462 
463 struct mpt_raid_disk {
464 	CONFIG_PAGE_RAID_PHYS_DISK_0	config_page;
465 	struct mpt_raid_volume	       *volume;
466 	u_int				member_number;
467 	u_int				pass_thru_active;
468 	mpt_raid_disk_flags		flags;
469 };
470 
471 struct mpt_evtf_record {
472 	MSG_EVENT_NOTIFY_REPLY		reply;
473 	uint32_t			context;
474 	LIST_ENTRY(mpt_evtf_record)	links;
475 };
476 
477 LIST_HEAD(mpt_evtf_list, mpt_evtf_record);
478 
479 struct mpt_softc {
480 	device_t		dev;
481 #if __FreeBSD_version < 500000
482 	uint32_t		mpt_islocked;
483 	int			mpt_splsaved;
484 #else
485 	struct mtx		mpt_lock;
486 	int			mpt_locksetup;
487 #endif
488 	uint32_t		mpt_pers_mask;
489 	uint32_t		: 8,
490 		unit		: 8,
491 				: 1,
492 		twildcard	: 1,
493 		tenabled	: 1,
494 				: 2,
495 		role		: 2,	/* none, ini, target, both */
496 		raid_mwce_set	: 1,
497 		getreqwaiter	: 1,
498 		shutdwn_raid    : 1,
499 		shutdwn_recovery: 1,
500 		outofbeer	: 1,
501 				: 1,
502 		disabled	: 1,
503 		is_sas		: 1,
504 		is_fc		: 1;
505 
506 	u_int			verbose;
507 
508 	/*
509 	 * IOC Facts
510 	 */
511 	uint16_t	mpt_global_credits;
512 	uint16_t	request_frame_size;
513 	uint8_t		mpt_max_devices;
514 	uint8_t		mpt_max_buses;
515 	uint8_t		ioc_facts_flags;
516 	uint8_t		padding0;
517 
518 	/*
519 	 * Port Facts
520 	 * XXX - Add multi-port support!.
521 	 */
522 	uint16_t	mpt_ini_id;
523 	uint16_t	mpt_port_type;
524 	uint16_t	mpt_proto_flags;
525 	uint16_t	mpt_max_tgtcmds;
526 
527 	/*
528 	 * Device Configuration Information
529 	 */
530 	union {
531 		struct mpt_spi_cfg {
532 			CONFIG_PAGE_SCSI_PORT_0		_port_page0;
533 			CONFIG_PAGE_SCSI_PORT_1		_port_page1;
534 			CONFIG_PAGE_SCSI_PORT_2		_port_page2;
535 			CONFIG_PAGE_SCSI_DEVICE_0	_dev_page0[16];
536 			CONFIG_PAGE_SCSI_DEVICE_1	_dev_page1[16];
537 			uint16_t			_tag_enable;
538 			uint16_t			_disc_enable;
539 		} spi;
540 #define	mpt_port_page0		cfg.spi._port_page0
541 #define	mpt_port_page1		cfg.spi._port_page1
542 #define	mpt_port_page2		cfg.spi._port_page2
543 #define	mpt_dev_page0		cfg.spi._dev_page0
544 #define	mpt_dev_page1		cfg.spi._dev_page1
545 #define	mpt_tag_enable		cfg.spi._tag_enable
546 #define	mpt_disc_enable		cfg.spi._disc_enable
547 		struct mpi_fc_cfg {
548 			CONFIG_PAGE_FC_PORT_0 _port_page0;
549 #define	mpt_fcport_page0	cfg.fc._port_page0
550 		} fc;
551 	} cfg;
552 
553 	/* Controller Info */
554 	CONFIG_PAGE_IOC_2 *	ioc_page2;
555 	CONFIG_PAGE_IOC_3 *	ioc_page3;
556 
557 	/* Raid Data */
558 	struct mpt_raid_volume* raid_volumes;
559 	struct mpt_raid_disk*	raid_disks;
560 	u_int			raid_max_volumes;
561 	u_int			raid_max_disks;
562 	u_int			raid_page0_len;
563 	u_int			raid_wakeup;
564 	u_int			raid_rescan;
565 	u_int			raid_resync_rate;
566 	u_int			raid_mwce_setting;
567 	u_int			raid_queue_depth;
568 	u_int			raid_nonopt_volumes;
569 	struct proc	       *raid_thread;
570 	struct callout		raid_timer;
571 
572 	/*
573 	 * PCI Hardware info
574 	 */
575 	struct resource *	pci_irq;	/* Interrupt map for chip */
576 	void *			ih;		/* Interupt handle */
577 	struct mpt_pci_cfg	pci_cfg;	/* saved PCI conf registers */
578 
579 	/*
580 	 * DMA Mapping Stuff
581 	 */
582 	struct resource *	pci_reg;	/* Register map for chip */
583 	int			pci_mem_rid;	/* Resource ID */
584 	bus_space_tag_t		pci_st;		/* Bus tag for registers */
585 	bus_space_handle_t	pci_sh;		/* Bus handle for registers */
586 	/* PIO versions of above. */
587 	int			pci_pio_rid;
588 	struct resource *	pci_pio_reg;
589 	bus_space_tag_t		pci_pio_st;
590 	bus_space_handle_t	pci_pio_sh;
591 
592 	bus_dma_tag_t		parent_dmat;	/* DMA tag for parent PCI bus */
593 	bus_dma_tag_t		reply_dmat;	/* DMA tag for reply memory */
594 	bus_dmamap_t		reply_dmap;	/* DMA map for reply memory */
595 	uint8_t		       *reply;		/* KVA of reply memory */
596 	bus_addr_t		reply_phys;	/* BusAddr of reply memory */
597 
598 	bus_dma_tag_t		buffer_dmat;	/* DMA tag for buffers */
599 	bus_dma_tag_t		request_dmat;	/* DMA tag for request memroy */
600 	bus_dmamap_t		request_dmap;	/* DMA map for request memroy */
601 	uint8_t		       *request;	/* KVA of Request memory */
602 	bus_addr_t		request_phys;	/* BusAddr of request memory */
603 
604 	uint32_t		max_seg_cnt;	/* calculated after IOC facts */
605 
606 	/*
607 	 * Hardware management
608 	 */
609 	u_int			reset_cnt;
610 
611 	/*
612 	 * CAM && Software Management
613 	 */
614 	request_t	       *request_pool;
615 	struct req_queue	request_free_list;
616 	struct req_queue	request_pending_list;
617 	struct req_queue	request_timeout_list;
618 
619 
620 	struct cam_sim	       *sim;
621 	struct cam_path	       *path;
622 
623 	struct cam_sim	       *phydisk_sim;
624 	struct cam_path	       *phydisk_path;
625 
626 	struct proc	       *recovery_thread;
627 	request_t	       *tmf_req;
628 
629 	/*
630 	 * Deferred frame acks due to resource shortage.
631 	 */
632 	struct mpt_evtf_list	ack_frames;
633 	/*
634 	 * Target Mode Support
635 	 */
636 	uint32_t		scsi_tgt_handler_id;
637 	request_t **		tgt_cmd_ptrs;
638 	request_t **		els_cmd_ptrs;	/* FC only */
639 
640 	/*
641 	 * *snork*- this is chosen to be here *just in case* somebody
642 	 * forgets to point to it exactly and we index off of trt with
643 	 * CAM_LUN_WILDCARD.
644 	 */
645 	tgt_resource_t		trt_wildcard;	/* wildcard luns */
646 	tgt_resource_t		trt[MPT_MAX_LUNS];
647 	uint16_t		tgt_cmds_allocated;
648 	uint16_t		els_cmds_allocated;	/* FC only */
649 
650 	uint16_t		timeouts;	/* timeout count */
651 	uint16_t		success;	/* successes afer timeout */
652 	uint16_t		sequence;	/* Sequence Number */
653 	uint16_t		pad3;
654 
655 
656 	/* Opposing port in a 929 or 1030, or NULL */
657 	struct mpt_softc *	mpt2;
658 
659 	/* FW Image management */
660 	uint32_t		fw_image_size;
661 	uint8_t		       *fw_image;
662 	bus_dma_tag_t		fw_dmat;	/* DMA tag for firmware image */
663 	bus_dmamap_t		fw_dmap;	/* DMA map for firmware image */
664 	bus_addr_t		fw_phys;	/* BusAddr of request memory */
665 
666 	/* Shutdown Event Handler. */
667 	eventhandler_tag         eh;
668 
669 	TAILQ_ENTRY(mpt_softc)	links;
670 };
671 
672 static __inline void mpt_assign_serno(struct mpt_softc *, request_t *);
673 
674 static __inline void
675 mpt_assign_serno(struct mpt_softc *mpt, request_t *req)
676 {
677 	if ((req->serno = mpt->sequence++) == 0) {
678 		req->serno = mpt->sequence++;
679 	}
680 }
681 
682 /***************************** Locking Primitives *****************************/
683 #if __FreeBSD_version < 500000
684 #define	MPT_IFLAGS		INTR_TYPE_CAM
685 #define	MPT_LOCK(mpt)		mpt_lockspl(mpt)
686 #define	MPT_UNLOCK(mpt)		mpt_unlockspl(mpt)
687 #define	MPT_OWNED(mpt)		mpt->mpt_islocked
688 #define	MPTLOCK_2_CAMLOCK	MPT_UNLOCK
689 #define	CAMLOCK_2_MPTLOCK	MPT_LOCK
690 #define	MPT_LOCK_SETUP(mpt)
691 #define	MPT_LOCK_DESTROY(mpt)
692 
693 static __inline void mpt_lockspl(struct mpt_softc *mpt);
694 static __inline void mpt_unlockspl(struct mpt_softc *mpt);
695 
696 static __inline void
697 mpt_lockspl(struct mpt_softc *mpt)
698 {
699        int s;
700 
701        s = splcam();
702        if (mpt->mpt_islocked++ == 0) {
703                mpt->mpt_splsaved = s;
704        } else {
705                splx(s);
706 	       panic("Recursed lock with mask: 0x%x\n", s);
707        }
708 }
709 
710 static __inline void
711 mpt_unlockspl(struct mpt_softc *mpt)
712 {
713        if (mpt->mpt_islocked) {
714                if (--mpt->mpt_islocked == 0) {
715                        splx(mpt->mpt_splsaved);
716                }
717        } else
718 	       panic("Negative lock count\n");
719 }
720 
721 static __inline int
722 mpt_sleep(struct mpt_softc *mpt, void *ident, int priority,
723 	   const char *wmesg, int timo)
724 {
725 	int saved_cnt;
726 	int saved_spl;
727 	int error;
728 
729 	KASSERT(mpt->mpt_islocked <= 1, ("Invalid lock count on tsleep"));
730 	saved_cnt = mpt->mpt_islocked;
731 	saved_spl = mpt->mpt_splsaved;
732 	mpt->mpt_islocked = 0;
733 	error = tsleep(ident, priority, wmesg, timo);
734 	KASSERT(mpt->mpt_islocked == 0, ("Invalid lock count on wakeup"));
735 	mpt->mpt_islocked = saved_cnt;
736 	mpt->mpt_splsaved = saved_spl;
737 	return (error);
738 }
739 
740 #else
741 #ifdef	LOCKING_WORKED_AS_IT_SHOULD
742 #error "Shouldn't Be Here!"
743 #define	MPT_IFLAGS		INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE
744 #define	MPT_LOCK_SETUP(mpt)						\
745 		mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF);		\
746 		mpt->mpt_locksetup = 1
747 #define	MPT_LOCK_DESTROY(mpt)						\
748 	if (mpt->mpt_locksetup) {					\
749 		mtx_destroy(&mpt->mpt_lock);				\
750 		mpt->mpt_locksetup = 0;					\
751 	}
752 
753 #define	MPT_LOCK(mpt)		mtx_lock(&(mpt)->mpt_lock)
754 #define	MPT_UNLOCK(mpt)		mtx_unlock(&(mpt)->mpt_lock)
755 #define	MPT_OWNED(mpt)		mtx_owned(&(mpt)->mpt_lock)
756 #define	MPTLOCK_2_CAMLOCK(mpt)	\
757 	mtx_unlock(&(mpt)->mpt_lock); mtx_lock(&Giant)
758 #define	CAMLOCK_2_MPTLOCK(mpt)	\
759 	mtx_unlock(&Giant); mtx_lock(&(mpt)->mpt_lock)
760 #define mpt_sleep(mpt, ident, priority, wmesg, timo) \
761 	msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo)
762 
763 #else
764 
765 #define	MPT_IFLAGS		INTR_TYPE_CAM | INTR_ENTROPY
766 #define	MPT_LOCK_SETUP(mpt)	do { } while (0)
767 #define	MPT_LOCK_DESTROY(mpt)	do { } while (0)
768 #if	0
769 #define	MPT_LOCK(mpt)		\
770 	device_printf(mpt->dev, "LOCK %s:%d\n", __FILE__, __LINE__); 	\
771 	KASSERT(mpt->mpt_locksetup == 0,				\
772 	    ("recursive lock acquire at %s:%d", __FILE__, __LINE__));	\
773 	mpt->mpt_locksetup = 1
774 #define	MPT_UNLOCK(mpt)		\
775 	device_printf(mpt->dev, "UNLK %s:%d\n", __FILE__, __LINE__); 	\
776 	KASSERT(mpt->mpt_locksetup == 1,				\
777 	    ("release unowned lock at %s:%d", __FILE__, __LINE__));	\
778 	mpt->mpt_locksetup = 0
779 #else
780 #define	MPT_LOCK(mpt)							\
781 	KASSERT(mpt->mpt_locksetup == 0,				\
782 	    ("recursive lock acquire at %s:%d", __FILE__, __LINE__));	\
783 	mpt->mpt_locksetup = 1
784 #define	MPT_UNLOCK(mpt)							\
785 	KASSERT(mpt->mpt_locksetup == 1,				\
786 	    ("release unowned lock at %s:%d", __FILE__, __LINE__));	\
787 	mpt->mpt_locksetup = 0
788 #endif
789 #define	MPT_OWNED(mpt)		mpt->mpt_locksetup
790 #define	MPTLOCK_2_CAMLOCK(mpt)	MPT_UNLOCK(mpt)
791 #define	CAMLOCK_2_MPTLOCK(mpt)	MPT_LOCK(mpt)
792 
793 static __inline int
794 mpt_sleep(struct mpt_softc *, void *, int, const char *, int);
795 
796 static __inline int
797 mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t)
798 {
799 	int r;
800 	MPT_UNLOCK(mpt);
801 	r = tsleep(i, p, w, t);
802 	MPT_LOCK(mpt);
803 	return (r);
804 }
805 #endif
806 #endif
807 
808 /******************************* Register Access ******************************/
809 static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t);
810 static __inline uint32_t mpt_read(struct mpt_softc *, int);
811 static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t);
812 static __inline uint32_t mpt_pio_read(struct mpt_softc *, int);
813 
814 static __inline void
815 mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
816 {
817 	bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val);
818 }
819 
820 static __inline uint32_t
821 mpt_read(struct mpt_softc *mpt, int offset)
822 {
823 	return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset));
824 }
825 
826 /*
827  * Some operations (e.g. diagnostic register writes while the ARM proccessor
828  * is disabled), must be performed using "PCI pio" operations.  On non-PCI
829  * busses, these operations likely map to normal register accesses.
830  */
831 static __inline void
832 mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
833 {
834 	bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val);
835 }
836 
837 static __inline uint32_t
838 mpt_pio_read(struct mpt_softc *mpt, int offset)
839 {
840 	return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset));
841 }
842 /*********************** Reply Frame/Request Management ***********************/
843 /* Max MPT Reply we are willing to accept (must be power of 2) */
844 #define MPT_REPLY_SIZE   	256
845 
846 /*
847  * Must be less than 16384 in order for target mode to work
848  */
849 #define MPT_MAX_REQUESTS(mpt)	512
850 #define MPT_REQUEST_AREA	512
851 #define MPT_SENSE_SIZE		32	/* included in MPT_REQUEST_AREA */
852 #define MPT_REQ_MEM_SIZE(mpt)	(MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA)
853 
854 #define MPT_CONTEXT_CB_SHIFT	(16)
855 #define MPT_CBI(handle)		(handle >> MPT_CONTEXT_CB_SHIFT)
856 #define MPT_CBI_TO_HID(cbi)	((cbi) << MPT_CONTEXT_CB_SHIFT)
857 #define MPT_CONTEXT_TO_CBI(x)	\
858     (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1))
859 #define MPT_CONTEXT_REQI_MASK	0xFFFF
860 #define MPT_CONTEXT_TO_REQI(x)	((x) & MPT_CONTEXT_REQI_MASK)
861 
862 /*
863  * Convert a 32bit physical address returned from IOC to an
864  * offset into our reply frame memory or the kvm address needed
865  * to access the data.  The returned address is only the low
866  * 32 bits, so mask our base physical address accordingly.
867  */
868 #define MPT_REPLY_BADDR(x)		\
869 	(x << 1)
870 #define MPT_REPLY_OTOV(m, i) 		\
871 	((void *)(&m->reply[i]))
872 
873 #define	MPT_DUMP_REPLY_FRAME(mpt, reply_frame)		\
874 do {							\
875 	if (mpt->verbose >= MPT_PRT_DEBUG)		\
876 		mpt_dump_reply_frame(mpt, reply_frame);	\
877 } while(0)
878 
879 static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt);
880 static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr);
881 
882 /*
883  * Give the reply buffer back to the IOC after we have
884  * finished processing it.
885  */
886 static __inline void
887 mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr)
888 {
889      mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr);
890 }
891 
892 /* Get a reply from the IOC */
893 static __inline uint32_t
894 mpt_pop_reply_queue(struct mpt_softc *mpt)
895 {
896      return mpt_read(mpt, MPT_OFFSET_REPLY_Q);
897 }
898 
899 void
900 mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int);
901 
902 /************************** Scatter Gather Managment **************************/
903 /* MPT_RQSL- size of request frame, in bytes */
904 #define	MPT_RQSL(mpt)		(mpt->request_frame_size << 2)
905 
906 /* MPT_NSGL- how many SG entries can fit in a request frame size */
907 #define	MPT_NSGL(mpt)		(MPT_RQSL(mpt) / sizeof (SGE_IO_UNION))
908 
909 /* MPT_NRFM- how many request frames can fit in each request alloc we make */
910 #define	MPT_NRFM(mpt)		(MPT_REQUEST_AREA / MPT_RQSL(mpt))
911 
912 /*
913  * MPT_NSGL_FIRST- # of SG elements that can fit after
914  * an I/O request but still within the request frame.
915  * Do this safely based upon SGE_IO_UNION.
916  *
917  * Note that the first element is *within* the SCSI request.
918  */
919 #define	MPT_NSGL_FIRST(mpt)	\
920     ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \
921     sizeof (SGE_IO_UNION))
922 
923 /***************************** IOC Initialization *****************************/
924 int mpt_reset(struct mpt_softc *, int /*reinit*/);
925 
926 /****************************** Debugging ************************************/
927 typedef struct mpt_decode_entry {
928 	char    *name;
929 	u_int	 value;
930 	u_int	 mask;
931 } mpt_decode_entry_t;
932 
933 int mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries,
934 		     const char *name, u_int value, u_int *cur_column,
935 		     u_int wrap_point);
936 
937 enum {
938 	MPT_PRT_ALWAYS,
939 	MPT_PRT_FATAL,
940 	MPT_PRT_ERROR,
941 	MPT_PRT_WARN,
942 	MPT_PRT_INFO,
943 	MPT_PRT_DEBUG,
944 	MPT_PRT_DEBUG1,
945 	MPT_PRT_DEBUG2,
946 	MPT_PRT_DEBUG3,
947 	MPT_PRT_TRACE,
948 	MPT_PRT_NONE=100
949 };
950 
951 #if __FreeBSD_version > 500000
952 #define mpt_lprt(mpt, level, ...)		\
953 do {						\
954 	if (level <= (mpt)->verbose)		\
955 		mpt_prt(mpt, __VA_ARGS__);	\
956 } while (0)
957 
958 #define mpt_lprtc(mpt, level, ...)		 \
959 do {						 \
960 	if (level <= (mpt)->debug_level)	 \
961 		mpt_prtc(mpt, __VA_ARGS__);	 \
962 } while (0)
963 #else
964 void mpt_lprt(struct mpt_softc *, int, const char *, ...)
965 	__printflike(3, 4);
966 void mpt_lprtc(struct mpt_softc *, int, const char *, ...)
967 	__printflike(3, 4);
968 #endif
969 void mpt_prt(struct mpt_softc *, const char *, ...)
970 	__printflike(2, 3);
971 void mpt_prtc(struct mpt_softc *, const char *, ...)
972 	__printflike(2, 3);
973 
974 /**************************** Target Mode Related ***************************/
975 static __inline int mpt_cdblen(uint8_t, int);
976 static __inline int
977 mpt_cdblen(uint8_t cdb0, int maxlen)
978 {
979 	int group = cdb0 >> 5;
980 	switch (group) {
981 	case 0:
982 		return (6);
983 	case 1:
984 		return (10);
985 	case 4:
986 	case 5:
987 		return (12);
988 	default:
989 		return (16);
990 	}
991 }
992 #ifdef	INVARIANTS
993 static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t);
994 static __inline request_t *
995 mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag)
996 {
997 	uint16_t rtg = (tag >> 18);
998 	KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d\n", tag));
999 	KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array"));
1000 	KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer"));
1001 	return (mpt->tgt_cmd_ptrs[rtg]);
1002 }
1003 
1004 
1005 static __inline int
1006 mpt_req_on_free_list(struct mpt_softc *, request_t *);
1007 static __inline int
1008 mpt_req_on_pending_list(struct mpt_softc *, request_t *);
1009 
1010 static __inline void
1011 mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int);
1012 static __inline void
1013 mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int);
1014 
1015 
1016 /*
1017  * Is request on freelist?
1018  */
1019 static __inline int
1020 mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req)
1021 {
1022 	request_t *lrq;
1023 
1024 	TAILQ_FOREACH(lrq, &mpt->request_free_list, links) {
1025 		if (lrq == req) {
1026 			return (1);
1027 		}
1028 	}
1029 	return (0);
1030 }
1031 
1032 /*
1033  * Is request on pending list?
1034  */
1035 static __inline int
1036 mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req)
1037 {
1038 	request_t *lrq;
1039 
1040 	TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) {
1041 		if (lrq == req) {
1042 			return (1);
1043 		}
1044 	}
1045 	return (0);
1046 }
1047 
1048 /*
1049  * Make sure that req *is* part of one of the special lists
1050  */
1051 static __inline void
1052 mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1053 {
1054 	int i;
1055 	for (i = 0; i < mpt->els_cmds_allocated; i++) {
1056 		if (req == mpt->els_cmd_ptrs[i]) {
1057 			return;
1058 		}
1059 	}
1060 	for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1061 		if (req == mpt->tgt_cmd_ptrs[i]) {
1062 			return;
1063 		}
1064 	}
1065 	panic("%s(%d): req %p:%u function %x not in els or tgt ptrs\n",
1066 	    s, line, req, req->serno,
1067 	    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function);
1068 }
1069 
1070 /*
1071  * Make sure that req is *not* part of one of the special lists.
1072  */
1073 static __inline void
1074 mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1075 {
1076 	int i;
1077 	for (i = 0; i < mpt->els_cmds_allocated; i++) {
1078 		KASSERT(req != mpt->els_cmd_ptrs[i],
1079 		    ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d\n",
1080 		    s, line, req, req->serno,
1081 		    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1082 	}
1083 	for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1084 		KASSERT(req != mpt->tgt_cmd_ptrs[i],
1085 		    ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d\n",
1086 		    s, line, req, req->serno,
1087 		    ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1088 	}
1089 }
1090 #endif
1091 
1092 typedef enum {
1093 	MPT_ABORT_TASK_SET=1234,
1094 	MPT_CLEAR_TASK_SET,
1095 	MPT_TARGET_RESET,
1096 	MPT_CLEAR_ACA,
1097 	MPT_TERMINATE_TASK,
1098 	MPT_NIL_TMT_VALUE=5678
1099 } mpt_task_mgmt_t;
1100 
1101 /**************************** Unclassified Routines ***************************/
1102 void		mpt_send_cmd(struct mpt_softc *mpt, request_t *req);
1103 int		mpt_recv_handshake_reply(struct mpt_softc *mpt,
1104 					 size_t reply_len, void *reply);
1105 int		mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1106 			     mpt_req_state_t state, mpt_req_state_t mask,
1107 			     int sleep_ok, int time_ms);
1108 void		mpt_enable_ints(struct mpt_softc *mpt);
1109 void		mpt_disable_ints(struct mpt_softc *mpt);
1110 int		mpt_attach(struct mpt_softc *mpt);
1111 int		mpt_shutdown(struct mpt_softc *mpt);
1112 int		mpt_detach(struct mpt_softc *mpt);
1113 int		mpt_send_handshake_cmd(struct mpt_softc *mpt,
1114 				       size_t len, void *cmd);
1115 request_t *	mpt_get_request(struct mpt_softc *mpt, int sleep_ok);
1116 void		mpt_free_request(struct mpt_softc *mpt, request_t *req);
1117 void		mpt_intr(void *arg);
1118 void		mpt_check_doorbell(struct mpt_softc *mpt);
1119 void		mpt_dump_reply_frame(struct mpt_softc *mpt,
1120 				     MSG_DEFAULT_REPLY *reply_frame);
1121 
1122 void		mpt_set_config_regs(struct mpt_softc *);
1123 int		mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/,
1124 				  u_int /*Action*/, u_int /*PageVersion*/,
1125 				  u_int /*PageLength*/, u_int /*PageNumber*/,
1126 				  u_int /*PageType*/, uint32_t /*PageAddress*/,
1127 				  bus_addr_t /*addr*/, bus_size_t/*len*/,
1128 				  int /*sleep_ok*/, int /*timeout_ms*/);
1129 int		mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/,
1130 				    int /*PageNumber*/,
1131 				    uint32_t /*PageAddress*/,
1132 				    CONFIG_PAGE_HEADER *,
1133 				    int /*sleep_ok*/, int /*timeout_ms*/);
1134 int		mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/,
1135 				  uint32_t /*PageAddress*/,
1136 				  CONFIG_PAGE_HEADER *, size_t /*len*/,
1137 				  int /*sleep_ok*/, int /*timeout_ms*/);
1138 int		mpt_write_cfg_page(struct mpt_softc *, int /*Action*/,
1139 				   uint32_t /*PageAddress*/,
1140 				   CONFIG_PAGE_HEADER *, size_t /*len*/,
1141 				   int /*sleep_ok*/, int /*timeout_ms*/);
1142 static __inline int
1143 mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1144 		      CONFIG_PAGE_HEADER *hdr, size_t len,
1145 		      int sleep_ok, int timeout_ms)
1146 {
1147 	return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
1148 				  PageAddress, hdr, len, sleep_ok, timeout_ms));
1149 }
1150 
1151 static __inline int
1152 mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1153 		       CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1154 		       int timeout_ms)
1155 {
1156 	return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT,
1157 				   PageAddress, hdr, len, sleep_ok,
1158 				   timeout_ms));
1159 }
1160 
1161 /* mpt_debug.c functions */
1162 void mpt_print_reply(void *vmsg);
1163 void mpt_print_db(uint32_t mb);
1164 void mpt_print_config_reply(void *vmsg);
1165 char *mpt_ioc_diag(uint32_t diag);
1166 void mpt_req_state(mpt_req_state_t state);
1167 void mpt_print_config_request(void *vmsg);
1168 void mpt_print_request(void *vmsg);
1169 void mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *msg);
1170 void mpt_dump_sgl(SGE_IO_UNION *se, int offset);
1171 #endif /* _MPT_H_ */
1172