xref: /freebsd/sys/dev/mpt/mpt.c (revision 282a3889ebf826db9839be296ff1dd903f6d6d6e)
1 /*-
2  * Generic routines for LSI Fusion adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c) 2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  *
62  * Support from LSI-Logic has also gone a great deal toward making this a
63  * workable subsystem and is gratefully acknowledged.
64  */
65 /*-
66  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67  * Copyright (c) 2005, WHEEL Sp. z o.o.
68  * Copyright (c) 2004, 2005 Justin T. Gibbs
69  * All rights reserved.
70  *
71  * Redistribution and use in source and binary forms, with or without
72  * modification, are permitted provided that the following conditions are
73  * met:
74  * 1. Redistributions of source code must retain the above copyright
75  *    notice, this list of conditions and the following disclaimer.
76  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77  *    substantially similar to the "NO WARRANTY" disclaimer below
78  *    ("Disclaimer") and any redistribution must be conditioned upon including
79  *    a substantially similar Disclaimer requirement for further binary
80  *    redistribution.
81  * 3. Neither the names of the above listed copyright holders nor the names
82  *    of any contributors may be used to endorse or promote products derived
83  *    from this software without specific prior written permission.
84  *
85  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96  */
97 
98 #include <sys/cdefs.h>
99 __FBSDID("$FreeBSD$");
100 
101 #include <dev/mpt/mpt.h>
102 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
103 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
104 
105 #include <dev/mpt/mpilib/mpi.h>
106 #include <dev/mpt/mpilib/mpi_ioc.h>
107 #include <dev/mpt/mpilib/mpi_fc.h>
108 #include <dev/mpt/mpilib/mpi_targ.h>
109 
110 #include <sys/sysctl.h>
111 
112 #define MPT_MAX_TRYS 3
113 #define MPT_MAX_WAIT 300000
114 
115 static int maxwait_ack = 0;
116 static int maxwait_int = 0;
117 static int maxwait_state = 0;
118 
119 static TAILQ_HEAD(, mpt_softc)	mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
120 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
121 
122 static mpt_reply_handler_t mpt_default_reply_handler;
123 static mpt_reply_handler_t mpt_config_reply_handler;
124 static mpt_reply_handler_t mpt_handshake_reply_handler;
125 static mpt_reply_handler_t mpt_event_reply_handler;
126 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
127 			       MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
128 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
129 static int mpt_soft_reset(struct mpt_softc *mpt);
130 static void mpt_hard_reset(struct mpt_softc *mpt);
131 static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
132 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
133 
134 /************************* Personality Module Support *************************/
135 /*
136  * We include one extra entry that is guaranteed to be NULL
137  * to simplify our itterator.
138  */
139 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
140 static __inline struct mpt_personality*
141 	mpt_pers_find(struct mpt_softc *, u_int);
142 static __inline struct mpt_personality*
143 	mpt_pers_find_reverse(struct mpt_softc *, u_int);
144 
145 static __inline struct mpt_personality *
146 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
147 {
148 	KASSERT(start_at <= MPT_MAX_PERSONALITIES,
149 		("mpt_pers_find: starting position out of range\n"));
150 
151 	while (start_at < MPT_MAX_PERSONALITIES
152 	    && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
153 		start_at++;
154 	}
155 	return (mpt_personalities[start_at]);
156 }
157 
158 /*
159  * Used infrequently, so no need to optimize like a forward
160  * traversal where we use the MAX+1 is guaranteed to be NULL
161  * trick.
162  */
163 static __inline struct mpt_personality *
164 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
165 {
166 	while (start_at < MPT_MAX_PERSONALITIES
167 	    && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
168 		start_at--;
169 	}
170 	if (start_at < MPT_MAX_PERSONALITIES)
171 		return (mpt_personalities[start_at]);
172 	return (NULL);
173 }
174 
175 #define MPT_PERS_FOREACH(mpt, pers)				\
176 	for (pers = mpt_pers_find(mpt, /*start_at*/0);		\
177 	     pers != NULL;					\
178 	     pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
179 
180 #define MPT_PERS_FOREACH_REVERSE(mpt, pers)				\
181 	for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
182 	     pers != NULL;						\
183 	     pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
184 
185 static mpt_load_handler_t      mpt_stdload;
186 static mpt_probe_handler_t     mpt_stdprobe;
187 static mpt_attach_handler_t    mpt_stdattach;
188 static mpt_enable_handler_t    mpt_stdenable;
189 static mpt_ready_handler_t     mpt_stdready;
190 static mpt_event_handler_t     mpt_stdevent;
191 static mpt_reset_handler_t     mpt_stdreset;
192 static mpt_shutdown_handler_t  mpt_stdshutdown;
193 static mpt_detach_handler_t    mpt_stddetach;
194 static mpt_unload_handler_t    mpt_stdunload;
195 static struct mpt_personality mpt_default_personality =
196 {
197 	.load		= mpt_stdload,
198 	.probe		= mpt_stdprobe,
199 	.attach		= mpt_stdattach,
200 	.enable		= mpt_stdenable,
201 	.ready		= mpt_stdready,
202 	.event		= mpt_stdevent,
203 	.reset		= mpt_stdreset,
204 	.shutdown	= mpt_stdshutdown,
205 	.detach		= mpt_stddetach,
206 	.unload		= mpt_stdunload
207 };
208 
209 static mpt_load_handler_t      mpt_core_load;
210 static mpt_attach_handler_t    mpt_core_attach;
211 static mpt_enable_handler_t    mpt_core_enable;
212 static mpt_reset_handler_t     mpt_core_ioc_reset;
213 static mpt_event_handler_t     mpt_core_event;
214 static mpt_shutdown_handler_t  mpt_core_shutdown;
215 static mpt_shutdown_handler_t  mpt_core_detach;
216 static mpt_unload_handler_t    mpt_core_unload;
217 static struct mpt_personality mpt_core_personality =
218 {
219 	.name		= "mpt_core",
220 	.load		= mpt_core_load,
221 	.attach		= mpt_core_attach,
222 	.enable		= mpt_core_enable,
223 	.event		= mpt_core_event,
224 	.reset		= mpt_core_ioc_reset,
225 	.shutdown	= mpt_core_shutdown,
226 	.detach		= mpt_core_detach,
227 	.unload		= mpt_core_unload,
228 };
229 
230 /*
231  * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
232  * ordering information.  We want the core to always register FIRST.
233  * other modules are set to SI_ORDER_SECOND.
234  */
235 static moduledata_t mpt_core_mod = {
236 	"mpt_core", mpt_modevent, &mpt_core_personality
237 };
238 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
239 MODULE_VERSION(mpt_core, 1);
240 
241 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
242 
243 int
244 mpt_modevent(module_t mod, int type, void *data)
245 {
246 	struct mpt_personality *pers;
247 	int error;
248 
249 	pers = (struct mpt_personality *)data;
250 
251 	error = 0;
252 	switch (type) {
253 	case MOD_LOAD:
254 	{
255 		mpt_load_handler_t **def_handler;
256 		mpt_load_handler_t **pers_handler;
257 		int i;
258 
259 		for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
260 			if (mpt_personalities[i] == NULL)
261 				break;
262 		}
263 		if (i >= MPT_MAX_PERSONALITIES) {
264 			error = ENOMEM;
265 			break;
266 		}
267 		pers->id = i;
268 		mpt_personalities[i] = pers;
269 
270 		/* Install standard/noop handlers for any NULL entries. */
271 		def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
272 		pers_handler = MPT_PERS_FIRST_HANDLER(pers);
273 		while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
274 			if (*pers_handler == NULL)
275 				*pers_handler = *def_handler;
276 			pers_handler++;
277 			def_handler++;
278 		}
279 
280 		error = (pers->load(pers));
281 		if (error != 0)
282 			mpt_personalities[i] = NULL;
283 		break;
284 	}
285 	case MOD_SHUTDOWN:
286 		break;
287 #if __FreeBSD_version >= 500000
288 	case MOD_QUIESCE:
289 		break;
290 #endif
291 	case MOD_UNLOAD:
292 		error = pers->unload(pers);
293 		mpt_personalities[pers->id] = NULL;
294 		break;
295 	default:
296 		error = EINVAL;
297 		break;
298 	}
299 	return (error);
300 }
301 
302 int
303 mpt_stdload(struct mpt_personality *pers)
304 {
305 	/* Load is always successfull. */
306 	return (0);
307 }
308 
309 int
310 mpt_stdprobe(struct mpt_softc *mpt)
311 {
312 	/* Probe is always successfull. */
313 	return (0);
314 }
315 
316 int
317 mpt_stdattach(struct mpt_softc *mpt)
318 {
319 	/* Attach is always successfull. */
320 	return (0);
321 }
322 
323 int
324 mpt_stdenable(struct mpt_softc *mpt)
325 {
326 	/* Enable is always successfull. */
327 	return (0);
328 }
329 
330 void
331 mpt_stdready(struct mpt_softc *mpt)
332 {
333 }
334 
335 
336 int
337 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
338 {
339 	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
340 	/* Event was not for us. */
341 	return (0);
342 }
343 
344 void
345 mpt_stdreset(struct mpt_softc *mpt, int type)
346 {
347 }
348 
349 void
350 mpt_stdshutdown(struct mpt_softc *mpt)
351 {
352 }
353 
354 void
355 mpt_stddetach(struct mpt_softc *mpt)
356 {
357 }
358 
359 int
360 mpt_stdunload(struct mpt_personality *pers)
361 {
362 	/* Unload is always successfull. */
363 	return (0);
364 }
365 
366 /*
367  * Post driver attachment, we may want to perform some global actions.
368  * Here is the hook to do so.
369  */
370 
371 static void
372 mpt_postattach(void *unused)
373 {
374 	struct mpt_softc *mpt;
375 	struct mpt_personality *pers;
376 
377 	TAILQ_FOREACH(mpt, &mpt_tailq, links) {
378 		MPT_PERS_FOREACH(mpt, pers)
379 			pers->ready(mpt);
380 	}
381 }
382 SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
383 
384 
385 /******************************* Bus DMA Support ******************************/
386 void
387 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
388 {
389 	struct mpt_map_info *map_info;
390 
391 	map_info = (struct mpt_map_info *)arg;
392 	map_info->error = error;
393 	map_info->phys = segs->ds_addr;
394 }
395 
396 /**************************** Reply/Event Handling ****************************/
397 int
398 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
399 		     mpt_handler_t handler, uint32_t *phandler_id)
400 {
401 
402 	switch (type) {
403 	case MPT_HANDLER_REPLY:
404 	{
405 		u_int cbi;
406 		u_int free_cbi;
407 
408 		if (phandler_id == NULL)
409 			return (EINVAL);
410 
411 		free_cbi = MPT_HANDLER_ID_NONE;
412 		for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
413 			/*
414 			 * If the same handler is registered multiple
415 			 * times, don't error out.  Just return the
416 			 * index of the original registration.
417 			 */
418 			if (mpt_reply_handlers[cbi] == handler.reply_handler) {
419 				*phandler_id = MPT_CBI_TO_HID(cbi);
420 				return (0);
421 			}
422 
423 			/*
424 			 * Fill from the front in the hope that
425 			 * all registered handlers consume only a
426 			 * single cache line.
427 			 *
428 			 * We don't break on the first empty slot so
429 			 * that the full table is checked to see if
430 			 * this handler was previously registered.
431 			 */
432 			if (free_cbi == MPT_HANDLER_ID_NONE &&
433 			    (mpt_reply_handlers[cbi]
434 			  == mpt_default_reply_handler))
435 				free_cbi = cbi;
436 		}
437 		if (free_cbi == MPT_HANDLER_ID_NONE) {
438 			return (ENOMEM);
439 		}
440 		mpt_reply_handlers[free_cbi] = handler.reply_handler;
441 		*phandler_id = MPT_CBI_TO_HID(free_cbi);
442 		break;
443 	}
444 	default:
445 		mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
446 		return (EINVAL);
447 	}
448 	return (0);
449 }
450 
451 int
452 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
453 		       mpt_handler_t handler, uint32_t handler_id)
454 {
455 
456 	switch (type) {
457 	case MPT_HANDLER_REPLY:
458 	{
459 		u_int cbi;
460 
461 		cbi = MPT_CBI(handler_id);
462 		if (cbi >= MPT_NUM_REPLY_HANDLERS
463 		 || mpt_reply_handlers[cbi] != handler.reply_handler)
464 			return (ENOENT);
465 		mpt_reply_handlers[cbi] = mpt_default_reply_handler;
466 		break;
467 	}
468 	default:
469 		mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
470 		return (EINVAL);
471 	}
472 	return (0);
473 }
474 
475 static int
476 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
477 	uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
478 {
479 	mpt_prt(mpt,
480 	    "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
481 	    req, req->serno, reply_desc, reply_frame);
482 
483 	if (reply_frame != NULL)
484 		mpt_dump_reply_frame(mpt, reply_frame);
485 
486 	mpt_prt(mpt, "Reply Frame Ignored\n");
487 
488 	return (/*free_reply*/TRUE);
489 }
490 
491 static int
492 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
493  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
494 {
495 	if (req != NULL) {
496 
497 		if (reply_frame != NULL) {
498 			MSG_CONFIG *cfgp;
499 			MSG_CONFIG_REPLY *reply;
500 
501 			cfgp = (MSG_CONFIG *)req->req_vbuf;
502 			reply = (MSG_CONFIG_REPLY *)reply_frame;
503 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
504 			bcopy(&reply->Header, &cfgp->Header,
505 			      sizeof(cfgp->Header));
506 			cfgp->ExtPageLength = reply->ExtPageLength;
507 			cfgp->ExtPageType = reply->ExtPageType;
508 		}
509 		req->state &= ~REQ_STATE_QUEUED;
510 		req->state |= REQ_STATE_DONE;
511 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
512 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
513 			wakeup(req);
514 		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
515 			/*
516 			 * Whew- we can free this request (late completion)
517 			 */
518 			mpt_free_request(mpt, req);
519 		}
520 	}
521 
522 	return (TRUE);
523 }
524 
525 static int
526 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
527  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
528 {
529 	/* Nothing to be done. */
530 	return (TRUE);
531 }
532 
533 static int
534 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
535     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
536 {
537 	int free_reply;
538 
539 	KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
540 	KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
541 
542 	free_reply = TRUE;
543 	switch (reply_frame->Function) {
544 	case MPI_FUNCTION_EVENT_NOTIFICATION:
545 	{
546 		MSG_EVENT_NOTIFY_REPLY *msg;
547 		struct mpt_personality *pers;
548 		u_int handled;
549 
550 		handled = 0;
551 		msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
552 		msg->EventDataLength = le16toh(msg->EventDataLength);
553 		msg->IOCStatus = le16toh(msg->IOCStatus);
554 		msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
555 		msg->Event = le32toh(msg->Event);
556 		MPT_PERS_FOREACH(mpt, pers)
557 			handled += pers->event(mpt, req, msg);
558 
559 		if (handled == 0 && mpt->mpt_pers_mask == 0) {
560 			mpt_lprt(mpt, MPT_PRT_INFO,
561 				"No Handlers For Any Event Notify Frames. "
562 				"Event %#x (ACK %sequired).\n",
563 				msg->Event, msg->AckRequired? "r" : "not r");
564 		} else if (handled == 0) {
565 			mpt_lprt(mpt, MPT_PRT_WARN,
566 				"Unhandled Event Notify Frame. Event %#x "
567 				"(ACK %sequired).\n",
568 				msg->Event, msg->AckRequired? "r" : "not r");
569 		}
570 
571 		if (msg->AckRequired) {
572 			request_t *ack_req;
573 			uint32_t context;
574 
575 			context = req->index | MPT_REPLY_HANDLER_EVENTS;
576 			ack_req = mpt_get_request(mpt, FALSE);
577 			if (ack_req == NULL) {
578 				struct mpt_evtf_record *evtf;
579 
580 				evtf = (struct mpt_evtf_record *)reply_frame;
581 				evtf->context = context;
582 				LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
583 				free_reply = FALSE;
584 				break;
585 			}
586 			mpt_send_event_ack(mpt, ack_req, msg, context);
587 			/*
588 			 * Don't check for CONTINUATION_REPLY here
589 			 */
590 			return (free_reply);
591 		}
592 		break;
593 	}
594 	case MPI_FUNCTION_PORT_ENABLE:
595 		mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
596 		break;
597 	case MPI_FUNCTION_EVENT_ACK:
598 		break;
599 	default:
600 		mpt_prt(mpt, "unknown event function: %x\n",
601 			reply_frame->Function);
602 		break;
603 	}
604 
605 	/*
606 	 * I'm not sure that this continuation stuff works as it should.
607 	 *
608 	 * I've had FC async events occur that free the frame up because
609 	 * the continuation bit isn't set, and then additional async events
610 	 * then occur using the same context. As you might imagine, this
611 	 * leads to Very Bad Thing.
612 	 *
613 	 *  Let's just be safe for now and not free them up until we figure
614 	 * out what's actually happening here.
615 	 */
616 #if	0
617 	if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
618 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
619 		mpt_free_request(mpt, req);
620 		mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
621 		    reply_frame->Function, req, req->serno);
622 		if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
623 			MSG_EVENT_NOTIFY_REPLY *msg =
624 			    (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
625 			mpt_prtc(mpt, " Event=0x%x AckReq=%d",
626 			    msg->Event, msg->AckRequired);
627 		}
628 	} else {
629 		mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
630 		    reply_frame->Function, req, req->serno);
631 		if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
632 			MSG_EVENT_NOTIFY_REPLY *msg =
633 			    (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
634 			mpt_prtc(mpt, " Event=0x%x AckReq=%d",
635 			    msg->Event, msg->AckRequired);
636 		}
637 		mpt_prtc(mpt, "\n");
638 	}
639 #endif
640 	return (free_reply);
641 }
642 
643 /*
644  * Process an asynchronous event from the IOC.
645  */
646 static int
647 mpt_core_event(struct mpt_softc *mpt, request_t *req,
648 	       MSG_EVENT_NOTIFY_REPLY *msg)
649 {
650 	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
651                  msg->Event & 0xFF);
652 	switch(msg->Event & 0xFF) {
653 	case MPI_EVENT_NONE:
654 		break;
655 	case MPI_EVENT_LOG_DATA:
656 	{
657 		int i;
658 
659 		/* Some error occured that LSI wants logged */
660 		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
661 			msg->IOCLogInfo);
662 		mpt_prt(mpt, "\tEvtLogData: Event Data:");
663 		for (i = 0; i < msg->EventDataLength; i++)
664 			mpt_prtc(mpt, "  %08x", msg->Data[i]);
665 		mpt_prtc(mpt, "\n");
666 		break;
667 	}
668 	case MPI_EVENT_EVENT_CHANGE:
669 		/*
670 		 * This is just an acknowledgement
671 		 * of our mpt_send_event_request.
672 		 */
673 		break;
674 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
675 		break;
676 	default:
677 		return (0);
678 		break;
679 	}
680 	return (1);
681 }
682 
683 static void
684 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
685 		   MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
686 {
687 	MSG_EVENT_ACK *ackp;
688 
689 	ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
690 	memset(ackp, 0, sizeof (*ackp));
691 	ackp->Function = MPI_FUNCTION_EVENT_ACK;
692 	ackp->Event = htole32(msg->Event);
693 	ackp->EventContext = htole32(msg->EventContext);
694 	ackp->MsgContext = htole32(context);
695 	mpt_check_doorbell(mpt);
696 	mpt_send_cmd(mpt, ack_req);
697 }
698 
699 /***************************** Interrupt Handling *****************************/
700 void
701 mpt_intr(void *arg)
702 {
703 	struct mpt_softc *mpt;
704 	uint32_t reply_desc;
705 	int ntrips = 0;
706 
707 	mpt = (struct mpt_softc *)arg;
708 	mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
709 	MPT_LOCK_ASSERT(mpt);
710 
711 	while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
712 		request_t	  *req;
713 		MSG_DEFAULT_REPLY *reply_frame;
714 		uint32_t	   reply_baddr;
715 		uint32_t           ctxt_idx;
716 		u_int		   cb_index;
717 		u_int		   req_index;
718 		int		   free_rf;
719 
720 		req = NULL;
721 		reply_frame = NULL;
722 		reply_baddr = 0;
723 		if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
724 			u_int offset;
725 			/*
726 			 * Insure that the reply frame is coherent.
727 			 */
728 			reply_baddr = MPT_REPLY_BADDR(reply_desc);
729 			offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
730 			bus_dmamap_sync_range(mpt->reply_dmat,
731 			    mpt->reply_dmap, offset, MPT_REPLY_SIZE,
732 			    BUS_DMASYNC_POSTREAD);
733 			reply_frame = MPT_REPLY_OTOV(mpt, offset);
734 			ctxt_idx = le32toh(reply_frame->MsgContext);
735 		} else {
736 			uint32_t type;
737 
738 			type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
739 			ctxt_idx = reply_desc;
740 			mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
741 				    reply_desc);
742 
743 			switch (type) {
744 			case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
745 				ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
746 				break;
747 			case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
748 				ctxt_idx = GET_IO_INDEX(reply_desc);
749 				if (mpt->tgt_cmd_ptrs == NULL) {
750 					mpt_prt(mpt,
751 					    "mpt_intr: no target cmd ptrs\n");
752 					reply_desc = MPT_REPLY_EMPTY;
753 					break;
754 				}
755 				if (ctxt_idx >= mpt->tgt_cmds_allocated) {
756 					mpt_prt(mpt,
757 					    "mpt_intr: bad tgt cmd ctxt %u\n",
758 					    ctxt_idx);
759 					reply_desc = MPT_REPLY_EMPTY;
760 					ntrips = 1000;
761 					break;
762 				}
763 				req = mpt->tgt_cmd_ptrs[ctxt_idx];
764 				if (req == NULL) {
765 					mpt_prt(mpt, "no request backpointer "
766 					    "at index %u", ctxt_idx);
767 					reply_desc = MPT_REPLY_EMPTY;
768 					ntrips = 1000;
769 					break;
770 				}
771 				/*
772 				 * Reformulate ctxt_idx to be just as if
773 				 * it were another type of context reply
774 				 * so the code below will find the request
775 				 * via indexing into the pool.
776 				 */
777 				ctxt_idx =
778 				    req->index | mpt->scsi_tgt_handler_id;
779 				req = NULL;
780 				break;
781 			case MPI_CONTEXT_REPLY_TYPE_LAN:
782 				mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
783 				    reply_desc);
784 				reply_desc = MPT_REPLY_EMPTY;
785 				break;
786 			default:
787 				mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
788 				reply_desc = MPT_REPLY_EMPTY;
789 				break;
790 			}
791 			if (reply_desc == MPT_REPLY_EMPTY) {
792 				if (ntrips++ > 1000) {
793 					break;
794 				}
795 				continue;
796 			}
797 		}
798 
799 		cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
800 		req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
801 		if (req_index < MPT_MAX_REQUESTS(mpt)) {
802 			req = &mpt->request_pool[req_index];
803 		} else {
804 			mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
805 			    " 0x%x)\n", req_index, reply_desc);
806 		}
807 
808 		free_rf = mpt_reply_handlers[cb_index](mpt, req,
809 		    reply_desc, reply_frame);
810 
811 		if (reply_frame != NULL && free_rf) {
812 			mpt_free_reply(mpt, reply_baddr);
813 		}
814 
815 		/*
816 		 * If we got ourselves disabled, don't get stuck in a loop
817 		 */
818 		if (mpt->disabled) {
819 			mpt_disable_ints(mpt);
820 			break;
821 		}
822 		if (ntrips++ > 1000) {
823 			break;
824 		}
825 	}
826 	mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
827 }
828 
829 /******************************* Error Recovery *******************************/
830 void
831 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
832 			    u_int iocstatus)
833 {
834 	MSG_DEFAULT_REPLY  ioc_status_frame;
835 	request_t	  *req;
836 
837 	memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
838 	ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
839 	ioc_status_frame.IOCStatus = iocstatus;
840 	while((req = TAILQ_FIRST(chain)) != NULL) {
841 		MSG_REQUEST_HEADER *msg_hdr;
842 		u_int		    cb_index;
843 
844 		TAILQ_REMOVE(chain, req, links);
845 		msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
846 		ioc_status_frame.Function = msg_hdr->Function;
847 		ioc_status_frame.MsgContext = msg_hdr->MsgContext;
848 		cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
849 		mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
850 		    &ioc_status_frame);
851 	}
852 }
853 
854 /********************************* Diagnostics ********************************/
855 /*
856  * Perform a diagnostic dump of a reply frame.
857  */
858 void
859 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
860 {
861 	mpt_prt(mpt, "Address Reply:\n");
862 	mpt_print_reply(reply_frame);
863 }
864 
865 /******************************* Doorbell Access ******************************/
866 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
867 static __inline  uint32_t mpt_rd_intr(struct mpt_softc *mpt);
868 
869 static __inline uint32_t
870 mpt_rd_db(struct mpt_softc *mpt)
871 {
872 	return mpt_read(mpt, MPT_OFFSET_DOORBELL);
873 }
874 
875 static __inline uint32_t
876 mpt_rd_intr(struct mpt_softc *mpt)
877 {
878 	return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
879 }
880 
881 /* Busy wait for a door bell to be read by IOC */
882 static int
883 mpt_wait_db_ack(struct mpt_softc *mpt)
884 {
885 	int i;
886 	for (i=0; i < MPT_MAX_WAIT; i++) {
887 		if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
888 			maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
889 			return (MPT_OK);
890 		}
891 		DELAY(200);
892 	}
893 	return (MPT_FAIL);
894 }
895 
896 /* Busy wait for a door bell interrupt */
897 static int
898 mpt_wait_db_int(struct mpt_softc *mpt)
899 {
900 	int i;
901 	for (i = 0; i < MPT_MAX_WAIT; i++) {
902 		if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
903 			maxwait_int = i > maxwait_int ? i : maxwait_int;
904 			return MPT_OK;
905 		}
906 		DELAY(100);
907 	}
908 	return (MPT_FAIL);
909 }
910 
911 /* Wait for IOC to transition to a give state */
912 void
913 mpt_check_doorbell(struct mpt_softc *mpt)
914 {
915 	uint32_t db = mpt_rd_db(mpt);
916 	if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
917 		mpt_prt(mpt, "Device not running\n");
918 		mpt_print_db(db);
919 	}
920 }
921 
922 /* Wait for IOC to transition to a give state */
923 static int
924 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
925 {
926 	int i;
927 
928 	for (i = 0; i < MPT_MAX_WAIT; i++) {
929 		uint32_t db = mpt_rd_db(mpt);
930 		if (MPT_STATE(db) == state) {
931 			maxwait_state = i > maxwait_state ? i : maxwait_state;
932 			return (MPT_OK);
933 		}
934 		DELAY(100);
935 	}
936 	return (MPT_FAIL);
937 }
938 
939 
940 /************************* Intialization/Configuration ************************/
941 static int mpt_download_fw(struct mpt_softc *mpt);
942 
943 /* Issue the reset COMMAND to the IOC */
944 static int
945 mpt_soft_reset(struct mpt_softc *mpt)
946 {
947 	mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
948 
949 	/* Have to use hard reset if we are not in Running state */
950 	if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
951 		mpt_prt(mpt, "soft reset failed: device not running\n");
952 		return (MPT_FAIL);
953 	}
954 
955 	/* If door bell is in use we don't have a chance of getting
956 	 * a word in since the IOC probably crashed in message
957 	 * processing. So don't waste our time.
958 	 */
959 	if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
960 		mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
961 		return (MPT_FAIL);
962 	}
963 
964 	/* Send the reset request to the IOC */
965 	mpt_write(mpt, MPT_OFFSET_DOORBELL,
966 	    MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
967 	if (mpt_wait_db_ack(mpt) != MPT_OK) {
968 		mpt_prt(mpt, "soft reset failed: ack timeout\n");
969 		return (MPT_FAIL);
970 	}
971 
972 	/* Wait for the IOC to reload and come out of reset state */
973 	if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
974 		mpt_prt(mpt, "soft reset failed: device did not restart\n");
975 		return (MPT_FAIL);
976 	}
977 
978 	return MPT_OK;
979 }
980 
981 static int
982 mpt_enable_diag_mode(struct mpt_softc *mpt)
983 {
984 	int try;
985 
986 	try = 20;
987 	while (--try) {
988 
989 		if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
990 			break;
991 
992 		/* Enable diagnostic registers */
993 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
994 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
995 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
996 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
997 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
998 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
999 
1000 		DELAY(100000);
1001 	}
1002 	if (try == 0)
1003 		return (EIO);
1004 	return (0);
1005 }
1006 
1007 static void
1008 mpt_disable_diag_mode(struct mpt_softc *mpt)
1009 {
1010 	mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
1011 }
1012 
1013 /* This is a magic diagnostic reset that resets all the ARM
1014  * processors in the chip.
1015  */
1016 static void
1017 mpt_hard_reset(struct mpt_softc *mpt)
1018 {
1019 	int error;
1020 	int wait;
1021 	uint32_t diagreg;
1022 
1023 	mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
1024 
1025 	error = mpt_enable_diag_mode(mpt);
1026 	if (error) {
1027 		mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
1028 		mpt_prt(mpt, "Trying to reset anyway.\n");
1029 	}
1030 
1031 	diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1032 
1033 	/*
1034 	 * This appears to be a workaround required for some
1035 	 * firmware or hardware revs.
1036 	 */
1037 	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1038 	DELAY(1000);
1039 
1040 	/* Diag. port is now active so we can now hit the reset bit */
1041 	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1042 
1043         /*
1044          * Ensure that the reset has finished.  We delay 1ms
1045          * prior to reading the register to make sure the chip
1046          * has sufficiently completed its reset to handle register
1047          * accesses.
1048          */
1049 	wait = 5000;
1050 	do {
1051 		DELAY(1000);
1052 		diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1053 	} while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1054 
1055 	if (wait == 0) {
1056 		mpt_prt(mpt, "WARNING - Failed hard reset! "
1057 			"Trying to initialize anyway.\n");
1058 	}
1059 
1060 	/*
1061 	 * If we have firmware to download, it must be loaded before
1062 	 * the controller will become operational.  Do so now.
1063 	 */
1064 	if (mpt->fw_image != NULL) {
1065 
1066 		error = mpt_download_fw(mpt);
1067 
1068 		if (error) {
1069 			mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1070 			mpt_prt(mpt, "Trying to initialize anyway.\n");
1071 		}
1072 	}
1073 
1074 	/*
1075 	 * Reseting the controller should have disabled write
1076 	 * access to the diagnostic registers, but disable
1077 	 * manually to be sure.
1078 	 */
1079 	mpt_disable_diag_mode(mpt);
1080 }
1081 
1082 static void
1083 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1084 {
1085 	/*
1086 	 * Complete all pending requests with a status
1087 	 * appropriate for an IOC reset.
1088 	 */
1089 	mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1090 				   MPI_IOCSTATUS_INVALID_STATE);
1091 }
1092 
1093 
1094 /*
1095  * Reset the IOC when needed. Try software command first then if needed
1096  * poke at the magic diagnostic reset. Note that a hard reset resets
1097  * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1098  * fouls up the PCI configuration registers.
1099  */
1100 int
1101 mpt_reset(struct mpt_softc *mpt, int reinit)
1102 {
1103 	struct	mpt_personality *pers;
1104 	int	ret;
1105 	int	retry_cnt = 0;
1106 
1107 	/*
1108 	 * Try a soft reset. If that fails, get out the big hammer.
1109 	 */
1110  again:
1111 	if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1112 		int	cnt;
1113 		for (cnt = 0; cnt < 5; cnt++) {
1114 			/* Failed; do a hard reset */
1115 			mpt_hard_reset(mpt);
1116 
1117 			/*
1118 			 * Wait for the IOC to reload
1119 			 * and come out of reset state
1120 			 */
1121 			ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1122 			if (ret == MPT_OK) {
1123 				break;
1124 			}
1125 			/*
1126 			 * Okay- try to check again...
1127 			 */
1128 			ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1129 			if (ret == MPT_OK) {
1130 				break;
1131 			}
1132 			mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1133 			    retry_cnt, cnt);
1134 		}
1135 	}
1136 
1137 	if (retry_cnt == 0) {
1138 		/*
1139 		 * Invoke reset handlers.  We bump the reset count so
1140 		 * that mpt_wait_req() understands that regardless of
1141 		 * the specified wait condition, it should stop its wait.
1142 		 */
1143 		mpt->reset_cnt++;
1144 		MPT_PERS_FOREACH(mpt, pers)
1145 			pers->reset(mpt, ret);
1146 	}
1147 
1148 	if (reinit) {
1149 		ret = mpt_enable_ioc(mpt, 1);
1150 		if (ret == MPT_OK) {
1151 			mpt_enable_ints(mpt);
1152 		}
1153 	}
1154 	if (ret != MPT_OK && retry_cnt++ < 2) {
1155 		goto again;
1156 	}
1157 	return ret;
1158 }
1159 
1160 /* Return a command buffer to the free queue */
1161 void
1162 mpt_free_request(struct mpt_softc *mpt, request_t *req)
1163 {
1164 	request_t *nxt;
1165 	struct mpt_evtf_record *record;
1166 	uint32_t reply_baddr;
1167 
1168 	if (req == NULL || req != &mpt->request_pool[req->index]) {
1169 		panic("mpt_free_request bad req ptr\n");
1170 		return;
1171 	}
1172 	if ((nxt = req->chain) != NULL) {
1173 		req->chain = NULL;
1174 		mpt_free_request(mpt, nxt);	/* NB: recursion */
1175 	}
1176 	KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1177 	KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1178 	MPT_LOCK_ASSERT(mpt);
1179 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1180 	    ("mpt_free_request: req %p:%u func %x already on freelist",
1181 	    req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1182 	KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1183 	    ("mpt_free_request: req %p:%u func %x on pending list",
1184 	    req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1185 #ifdef	INVARIANTS
1186 	mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1187 #endif
1188 
1189 	req->ccb = NULL;
1190 	if (LIST_EMPTY(&mpt->ack_frames)) {
1191 		/*
1192 		 * Insert free ones at the tail
1193 		 */
1194 		req->serno = 0;
1195 		req->state = REQ_STATE_FREE;
1196 #ifdef	INVARIANTS
1197 		memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1198 #endif
1199 		TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1200 		if (mpt->getreqwaiter != 0) {
1201 			mpt->getreqwaiter = 0;
1202 			wakeup(&mpt->request_free_list);
1203 		}
1204 		return;
1205 	}
1206 
1207 	/*
1208 	 * Process an ack frame deferred due to resource shortage.
1209 	 */
1210 	record = LIST_FIRST(&mpt->ack_frames);
1211 	LIST_REMOVE(record, links);
1212 	req->state = REQ_STATE_ALLOCATED;
1213 	mpt_assign_serno(mpt, req);
1214 	mpt_send_event_ack(mpt, req, &record->reply, record->context);
1215 	reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1216 		    + (mpt->reply_phys & 0xFFFFFFFF);
1217 	mpt_free_reply(mpt, reply_baddr);
1218 }
1219 
1220 /* Get a command buffer from the free queue */
1221 request_t *
1222 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1223 {
1224 	request_t *req;
1225 
1226 retry:
1227 	MPT_LOCK_ASSERT(mpt);
1228 	req = TAILQ_FIRST(&mpt->request_free_list);
1229 	if (req != NULL) {
1230 		KASSERT(req == &mpt->request_pool[req->index],
1231 		    ("mpt_get_request: corrupted request free list\n"));
1232 		KASSERT(req->state == REQ_STATE_FREE,
1233 		    ("req %p:%u not free on free list %x index %d function %x",
1234 		    req, req->serno, req->state, req->index,
1235 		    ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1236 		TAILQ_REMOVE(&mpt->request_free_list, req, links);
1237 		req->state = REQ_STATE_ALLOCATED;
1238 		req->chain = NULL;
1239 		mpt_assign_serno(mpt, req);
1240 	} else if (sleep_ok != 0) {
1241 		mpt->getreqwaiter = 1;
1242 		mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1243 		goto retry;
1244 	}
1245 	return (req);
1246 }
1247 
1248 /* Pass the command to the IOC */
1249 void
1250 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1251 {
1252 	if (mpt->verbose > MPT_PRT_DEBUG2) {
1253 		mpt_dump_request(mpt, req);
1254 	}
1255 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1256 	    BUS_DMASYNC_PREWRITE);
1257 	req->state |= REQ_STATE_QUEUED;
1258 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1259 	    ("req %p:%u func %x on freelist list in mpt_send_cmd",
1260 	    req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1261 	KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1262 	    ("req %p:%u func %x already on pending list in mpt_send_cmd",
1263 	    req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1264 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1265 	mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1266 }
1267 
1268 /*
1269  * Wait for a request to complete.
1270  *
1271  * Inputs:
1272  *	mpt		softc of controller executing request
1273  *	req		request to wait for
1274  *	sleep_ok	nonzero implies may sleep in this context
1275  *	time_ms		timeout in ms.  0 implies no timeout.
1276  *
1277  * Return Values:
1278  *	0		Request completed
1279  *	non-0		Timeout fired before request completion.
1280  */
1281 int
1282 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1283 	     mpt_req_state_t state, mpt_req_state_t mask,
1284 	     int sleep_ok, int time_ms)
1285 {
1286 	int   error;
1287 	int   timeout;
1288 	u_int saved_cnt;
1289 
1290 	/*
1291 	 * timeout is in ms.  0 indicates infinite wait.
1292 	 * Convert to ticks or 500us units depending on
1293 	 * our sleep mode.
1294 	 */
1295 	if (sleep_ok != 0) {
1296 		timeout = (time_ms * hz) / 1000;
1297 	} else {
1298 		timeout = time_ms * 2;
1299 	}
1300 	req->state |= REQ_STATE_NEED_WAKEUP;
1301 	mask &= ~REQ_STATE_NEED_WAKEUP;
1302 	saved_cnt = mpt->reset_cnt;
1303 	while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1304 		if (sleep_ok != 0) {
1305 			error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1306 			if (error == EWOULDBLOCK) {
1307 				timeout = 0;
1308 				break;
1309 			}
1310 		} else {
1311 			if (time_ms != 0 && --timeout == 0) {
1312 				break;
1313 			}
1314 			DELAY(500);
1315 			mpt_intr(mpt);
1316 		}
1317 	}
1318 	req->state &= ~REQ_STATE_NEED_WAKEUP;
1319 	if (mpt->reset_cnt != saved_cnt) {
1320 		return (EIO);
1321 	}
1322 	if (time_ms && timeout <= 0) {
1323 		MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1324 		req->state |= REQ_STATE_TIMEDOUT;
1325 		mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1326 		return (ETIMEDOUT);
1327 	}
1328 	return (0);
1329 }
1330 
1331 /*
1332  * Send a command to the IOC via the handshake register.
1333  *
1334  * Only done at initialization time and for certain unusual
1335  * commands such as device/bus reset as specified by LSI.
1336  */
1337 int
1338 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1339 {
1340 	int i;
1341 	uint32_t data, *data32;
1342 
1343 	/* Check condition of the IOC */
1344 	data = mpt_rd_db(mpt);
1345 	if ((MPT_STATE(data) != MPT_DB_STATE_READY
1346 	  && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1347 	  && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1348 	 || MPT_DB_IS_IN_USE(data)) {
1349 		mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1350 		mpt_print_db(data);
1351 		return (EBUSY);
1352 	}
1353 
1354 	/* We move things in 32 bit chunks */
1355 	len = (len + 3) >> 2;
1356 	data32 = cmd;
1357 
1358 	/* Clear any left over pending doorbell interupts */
1359 	if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1360 		mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1361 
1362 	/*
1363 	 * Tell the handshake reg. we are going to send a command
1364          * and how long it is going to be.
1365 	 */
1366 	data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1367 	    (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1368 	mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1369 
1370 	/* Wait for the chip to notice */
1371 	if (mpt_wait_db_int(mpt) != MPT_OK) {
1372 		mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
1373 		return (ETIMEDOUT);
1374 	}
1375 
1376 	/* Clear the interrupt */
1377 	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1378 
1379 	if (mpt_wait_db_ack(mpt) != MPT_OK) {
1380 		mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
1381 		return (ETIMEDOUT);
1382 	}
1383 
1384 	/* Send the command */
1385 	for (i = 0; i < len; i++) {
1386 		mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
1387 		if (mpt_wait_db_ack(mpt) != MPT_OK) {
1388 			mpt_prt(mpt,
1389 			    "mpt_send_handshake_cmd: timeout @ index %d\n", i);
1390 			return (ETIMEDOUT);
1391 		}
1392 	}
1393 	return MPT_OK;
1394 }
1395 
1396 /* Get the response from the handshake register */
1397 int
1398 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1399 {
1400 	int left, reply_left;
1401 	u_int16_t *data16;
1402 	uint32_t data;
1403 	MSG_DEFAULT_REPLY *hdr;
1404 
1405 	/* We move things out in 16 bit chunks */
1406 	reply_len >>= 1;
1407 	data16 = (u_int16_t *)reply;
1408 
1409 	hdr = (MSG_DEFAULT_REPLY *)reply;
1410 
1411 	/* Get first word */
1412 	if (mpt_wait_db_int(mpt) != MPT_OK) {
1413 		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1414 		return ETIMEDOUT;
1415 	}
1416 	data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1417 	*data16++ = le16toh(data & MPT_DB_DATA_MASK);
1418 	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1419 
1420 	/* Get Second Word */
1421 	if (mpt_wait_db_int(mpt) != MPT_OK) {
1422 		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1423 		return ETIMEDOUT;
1424 	}
1425 	data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1426 	*data16++ = le16toh(data & MPT_DB_DATA_MASK);
1427 	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1428 
1429 	/*
1430 	 * With the second word, we can now look at the length.
1431 	 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1432 	 */
1433 	if ((reply_len >> 1) != hdr->MsgLength &&
1434 	    (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1435 #if __FreeBSD_version >= 500000
1436 		mpt_prt(mpt, "reply length does not match message length: "
1437 			"got %x; expected %zx for function %x\n",
1438 			hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1439 #else
1440 		mpt_prt(mpt, "reply length does not match message length: "
1441 			"got %x; expected %x for function %x\n",
1442 			hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1443 #endif
1444 	}
1445 
1446 	/* Get rest of the reply; but don't overflow the provided buffer */
1447 	left = (hdr->MsgLength << 1) - 2;
1448 	reply_left =  reply_len - 2;
1449 	while (left--) {
1450 		u_int16_t datum;
1451 
1452 		if (mpt_wait_db_int(mpt) != MPT_OK) {
1453 			mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1454 			return ETIMEDOUT;
1455 		}
1456 		data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1457 		datum = le16toh(data & MPT_DB_DATA_MASK);
1458 
1459 		if (reply_left-- > 0)
1460 			*data16++ = datum;
1461 
1462 		mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1463 	}
1464 
1465 	/* One more wait & clear at the end */
1466 	if (mpt_wait_db_int(mpt) != MPT_OK) {
1467 		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1468 		return ETIMEDOUT;
1469 	}
1470 	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1471 
1472 	if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1473 		if (mpt->verbose >= MPT_PRT_TRACE)
1474 			mpt_print_reply(hdr);
1475 		return (MPT_FAIL | hdr->IOCStatus);
1476 	}
1477 
1478 	return (0);
1479 }
1480 
1481 static int
1482 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1483 {
1484 	MSG_IOC_FACTS f_req;
1485 	int error;
1486 
1487 	memset(&f_req, 0, sizeof f_req);
1488 	f_req.Function = MPI_FUNCTION_IOC_FACTS;
1489 	f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1490 	error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1491 	if (error) {
1492 		return(error);
1493 	}
1494 	error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1495 	return (error);
1496 }
1497 
1498 static int
1499 mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
1500 {
1501 	MSG_PORT_FACTS f_req;
1502 	int error;
1503 
1504 	memset(&f_req, 0, sizeof f_req);
1505 	f_req.Function = MPI_FUNCTION_PORT_FACTS;
1506 	f_req.PortNumber = port;
1507 	f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1508 	error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1509 	if (error) {
1510 		return(error);
1511 	}
1512 	error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1513 	return (error);
1514 }
1515 
1516 /*
1517  * Send the initialization request. This is where we specify how many
1518  * SCSI busses and how many devices per bus we wish to emulate.
1519  * This is also the command that specifies the max size of the reply
1520  * frames from the IOC that we will be allocating.
1521  */
1522 static int
1523 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1524 {
1525 	int error = 0;
1526 	MSG_IOC_INIT init;
1527 	MSG_IOC_INIT_REPLY reply;
1528 
1529 	memset(&init, 0, sizeof init);
1530 	init.WhoInit = who;
1531 	init.Function = MPI_FUNCTION_IOC_INIT;
1532 	init.MaxDevices = 0;	/* at least 256 devices per bus */
1533 	init.MaxBuses = 16;	/* at least 16 busses */
1534 
1535 	init.MsgVersion = htole16(MPI_VERSION);
1536 	init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1537 	init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1538 	init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1539 
1540 	if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1541 		return(error);
1542 	}
1543 
1544 	error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1545 	return (error);
1546 }
1547 
1548 
1549 /*
1550  * Utiltity routine to read configuration headers and pages
1551  */
1552 int
1553 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
1554 		  bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms)
1555 {
1556 	MSG_CONFIG *cfgp;
1557 	SGE_SIMPLE32 *se;
1558 
1559 	cfgp = req->req_vbuf;
1560 	memset(cfgp, 0, sizeof *cfgp);
1561 	cfgp->Action = params->Action;
1562 	cfgp->Function = MPI_FUNCTION_CONFIG;
1563 	cfgp->Header.PageVersion = params->PageVersion;
1564 	cfgp->Header.PageNumber = params->PageNumber;
1565 	cfgp->PageAddress = htole32(params->PageAddress);
1566 	if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
1567 	    MPI_CONFIG_PAGETYPE_EXTENDED) {
1568 		cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1569 		cfgp->Header.PageLength = 0;
1570 		cfgp->ExtPageLength = htole16(params->ExtPageLength);
1571 		cfgp->ExtPageType = params->ExtPageType;
1572 	} else {
1573 		cfgp->Header.PageType = params->PageType;
1574 		cfgp->Header.PageLength = params->PageLength;
1575 	}
1576 	se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1577 	se->Address = htole32(addr);
1578 	MPI_pSGE_SET_LENGTH(se, len);
1579 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1580 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1581 	    MPI_SGE_FLAGS_END_OF_LIST |
1582 	    ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1583 	  || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1584 	   ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1585 	se->FlagsLength = htole32(se->FlagsLength);
1586 	cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1587 
1588 	mpt_check_doorbell(mpt);
1589 	mpt_send_cmd(mpt, req);
1590 	return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1591 			     sleep_ok, timeout_ms));
1592 }
1593 
1594 int
1595 mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
1596 		       uint32_t PageAddress, int ExtPageType,
1597 		       CONFIG_EXTENDED_PAGE_HEADER *rslt,
1598 		       int sleep_ok, int timeout_ms)
1599 {
1600 	request_t  *req;
1601 	cfgparms_t params;
1602 	MSG_CONFIG_REPLY *cfgp;
1603 	int	    error;
1604 
1605 	req = mpt_get_request(mpt, sleep_ok);
1606 	if (req == NULL) {
1607 		mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
1608 		return (ENOMEM);
1609 	}
1610 
1611 	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1612 	params.PageVersion = PageVersion;
1613 	params.PageLength = 0;
1614 	params.PageNumber = PageNumber;
1615 	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1616 	params.PageAddress = PageAddress;
1617 	params.ExtPageType = ExtPageType;
1618 	params.ExtPageLength = 0;
1619 	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1620 				  sleep_ok, timeout_ms);
1621 	if (error != 0) {
1622 		/*
1623 		 * Leave the request. Without resetting the chip, it's
1624 		 * still owned by it and we'll just get into trouble
1625 		 * freeing it now. Mark it as abandoned so that if it
1626 		 * shows up later it can be freed.
1627 		 */
1628 		mpt_prt(mpt, "read_extcfg_header timed out\n");
1629 		return (ETIMEDOUT);
1630 	}
1631 
1632         switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1633 	case MPI_IOCSTATUS_SUCCESS:
1634 		cfgp = req->req_vbuf;
1635 		rslt->PageVersion = cfgp->Header.PageVersion;
1636 		rslt->PageNumber = cfgp->Header.PageNumber;
1637 		rslt->PageType = cfgp->Header.PageType;
1638 		rslt->ExtPageLength = cfgp->ExtPageLength;
1639 		rslt->ExtPageType = cfgp->ExtPageType;
1640 		error = 0;
1641 		break;
1642 	case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1643 		mpt_lprt(mpt, MPT_PRT_DEBUG,
1644 		    "Invalid Page Type %d Number %d Addr 0x%0x\n",
1645 		    MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress);
1646 		error = EINVAL;
1647 		break;
1648 	default:
1649 		mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
1650 			req->IOCStatus);
1651 		error = EIO;
1652 		break;
1653 	}
1654 	mpt_free_request(mpt, req);
1655 	return (error);
1656 }
1657 
1658 int
1659 mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1660 		     CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len,
1661 		     int sleep_ok, int timeout_ms)
1662 {
1663 	request_t    *req;
1664 	cfgparms_t    params;
1665 	int	      error;
1666 
1667 	req = mpt_get_request(mpt, sleep_ok);
1668 	if (req == NULL) {
1669 		mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1670 		return (-1);
1671 	}
1672 
1673 	params.Action = Action;
1674 	params.PageVersion = hdr->PageVersion;
1675 	params.PageLength = 0;
1676 	params.PageNumber = hdr->PageNumber;
1677 	params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1678 	params.PageAddress = PageAddress;
1679 	params.ExtPageType = hdr->ExtPageType;
1680 	params.ExtPageLength = hdr->ExtPageLength;
1681 	error = mpt_issue_cfg_req(mpt, req, &params,
1682 				  req->req_pbuf + MPT_RQSL(mpt),
1683 				  len, sleep_ok, timeout_ms);
1684 	if (error != 0) {
1685 		mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
1686 		return (-1);
1687 	}
1688 
1689 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1690 		mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
1691 			req->IOCStatus);
1692 		mpt_free_request(mpt, req);
1693 		return (-1);
1694 	}
1695 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1696 	    BUS_DMASYNC_POSTREAD);
1697 	memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1698 	mpt_free_request(mpt, req);
1699 	return (0);
1700 }
1701 
1702 int
1703 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1704 		    uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1705 		    int sleep_ok, int timeout_ms)
1706 {
1707 	request_t  *req;
1708 	cfgparms_t params;
1709 	MSG_CONFIG *cfgp;
1710 	int	    error;
1711 
1712 	req = mpt_get_request(mpt, sleep_ok);
1713 	if (req == NULL) {
1714 		mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1715 		return (ENOMEM);
1716 	}
1717 
1718 	params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1719 	params.PageVersion = 0;
1720 	params.PageLength = 0;
1721 	params.PageNumber = PageNumber;
1722 	params.PageType = PageType;
1723 	params.PageAddress = PageAddress;
1724 	error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1725 				  sleep_ok, timeout_ms);
1726 	if (error != 0) {
1727 		/*
1728 		 * Leave the request. Without resetting the chip, it's
1729 		 * still owned by it and we'll just get into trouble
1730 		 * freeing it now. Mark it as abandoned so that if it
1731 		 * shows up later it can be freed.
1732 		 */
1733 		mpt_prt(mpt, "read_cfg_header timed out\n");
1734 		return (ETIMEDOUT);
1735 	}
1736 
1737         switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1738 	case MPI_IOCSTATUS_SUCCESS:
1739 		cfgp = req->req_vbuf;
1740 		bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1741 		error = 0;
1742 		break;
1743 	case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1744 		mpt_lprt(mpt, MPT_PRT_DEBUG,
1745 		    "Invalid Page Type %d Number %d Addr 0x%0x\n",
1746 		    PageType, PageNumber, PageAddress);
1747 		error = EINVAL;
1748 		break;
1749 	default:
1750 		mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1751 			req->IOCStatus);
1752 		error = EIO;
1753 		break;
1754 	}
1755 	mpt_free_request(mpt, req);
1756 	return (error);
1757 }
1758 
1759 int
1760 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1761 		  CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1762 		  int timeout_ms)
1763 {
1764 	request_t    *req;
1765 	cfgparms_t    params;
1766 	int	      error;
1767 
1768 	req = mpt_get_request(mpt, sleep_ok);
1769 	if (req == NULL) {
1770 		mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1771 		return (-1);
1772 	}
1773 
1774 	params.Action = Action;
1775 	params.PageVersion = hdr->PageVersion;
1776 	params.PageLength = hdr->PageLength;
1777 	params.PageNumber = hdr->PageNumber;
1778 	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1779 	params.PageAddress = PageAddress;
1780 	error = mpt_issue_cfg_req(mpt, req, &params,
1781 				  req->req_pbuf + MPT_RQSL(mpt),
1782 				  len, sleep_ok, timeout_ms);
1783 	if (error != 0) {
1784 		mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1785 		return (-1);
1786 	}
1787 
1788 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1789 		mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1790 			req->IOCStatus);
1791 		mpt_free_request(mpt, req);
1792 		return (-1);
1793 	}
1794 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1795 	    BUS_DMASYNC_POSTREAD);
1796 	memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1797 	mpt_free_request(mpt, req);
1798 	return (0);
1799 }
1800 
1801 int
1802 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1803 		   CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1804 		   int timeout_ms)
1805 {
1806 	request_t    *req;
1807 	cfgparms_t    params;
1808 	u_int	      hdr_attr;
1809 	int	      error;
1810 
1811 	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1812 	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1813 	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1814 		mpt_prt(mpt, "page type 0x%x not changeable\n",
1815 			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1816 		return (-1);
1817 	}
1818 
1819 #if	0
1820 	/*
1821 	 * We shouldn't mask off other bits here.
1822 	 */
1823 	hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
1824 #endif
1825 
1826 	req = mpt_get_request(mpt, sleep_ok);
1827 	if (req == NULL)
1828 		return (-1);
1829 
1830 	memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1831 
1832 	/*
1833 	 * There isn't any point in restoring stripped out attributes
1834 	 * if you then mask them going down to issue the request.
1835 	 */
1836 
1837 	params.Action = Action;
1838 	params.PageVersion = hdr->PageVersion;
1839 	params.PageLength = hdr->PageLength;
1840 	params.PageNumber = hdr->PageNumber;
1841 	params.PageAddress = PageAddress;
1842 #if	0
1843 	/* Restore stripped out attributes */
1844 	hdr->PageType |= hdr_attr;
1845 	params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1846 #else
1847 	params.PageType = hdr->PageType;
1848 #endif
1849 	error = mpt_issue_cfg_req(mpt, req, &params,
1850 				  req->req_pbuf + MPT_RQSL(mpt),
1851 				  len, sleep_ok, timeout_ms);
1852 	if (error != 0) {
1853 		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1854 		return (-1);
1855 	}
1856 
1857         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1858 		mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1859 			req->IOCStatus);
1860 		mpt_free_request(mpt, req);
1861 		return (-1);
1862 	}
1863 	mpt_free_request(mpt, req);
1864 	return (0);
1865 }
1866 
1867 /*
1868  * Read IOC configuration information
1869  */
1870 static int
1871 mpt_read_config_info_ioc(struct mpt_softc *mpt)
1872 {
1873 	CONFIG_PAGE_HEADER hdr;
1874 	struct mpt_raid_volume *mpt_raid;
1875 	int rv;
1876 	int i;
1877 	size_t len;
1878 
1879 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1880 		2, 0, &hdr, FALSE, 5000);
1881 	/*
1882 	 * If it's an invalid page, so what? Not a supported function....
1883 	 */
1884 	if (rv == EINVAL) {
1885 		return (0);
1886 	}
1887 	if (rv) {
1888 		return (rv);
1889 	}
1890 
1891 	mpt_lprt(mpt, MPT_PRT_DEBUG,
1892 	    "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
1893 	    hdr.PageVersion, hdr.PageLength << 2,
1894 	    hdr.PageNumber, hdr.PageType);
1895 
1896 	len = hdr.PageLength * sizeof(uint32_t);
1897 	mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1898 	if (mpt->ioc_page2 == NULL) {
1899 		mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1900 		mpt_raid_free_mem(mpt);
1901 		return (ENOMEM);
1902 	}
1903 	memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1904 	rv = mpt_read_cur_cfg_page(mpt, 0,
1905 	    &mpt->ioc_page2->Header, len, FALSE, 5000);
1906 	if (rv) {
1907 		mpt_prt(mpt, "failed to read IOC Page 2\n");
1908 		mpt_raid_free_mem(mpt);
1909 		return (EIO);
1910 	}
1911 	mpt2host_config_page_ioc2(mpt->ioc_page2);
1912 
1913 	if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1914 		uint32_t mask;
1915 
1916 		mpt_prt(mpt, "Capabilities: (");
1917 		for (mask = 1; mask != 0; mask <<= 1) {
1918 			if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1919 				continue;
1920 			}
1921 			switch (mask) {
1922 			case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1923 				mpt_prtc(mpt, " RAID-0");
1924 				break;
1925 			case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1926 				mpt_prtc(mpt, " RAID-1E");
1927 				break;
1928 			case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1929 				mpt_prtc(mpt, " RAID-1");
1930 				break;
1931 			case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1932 				mpt_prtc(mpt, " SES");
1933 				break;
1934 			case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1935 				mpt_prtc(mpt, " SAFTE");
1936 				break;
1937 			case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1938 				mpt_prtc(mpt, " Multi-Channel-Arrays");
1939 			default:
1940 				break;
1941 			}
1942 		}
1943 		mpt_prtc(mpt, " )\n");
1944 		if ((mpt->ioc_page2->CapabilitiesFlags
1945 		   & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1946 		    | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1947 		    | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1948 			mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1949 				mpt->ioc_page2->NumActiveVolumes,
1950 				mpt->ioc_page2->NumActiveVolumes != 1
1951 			      ? "s " : " ",
1952 				mpt->ioc_page2->MaxVolumes);
1953 			mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1954 				mpt->ioc_page2->NumActivePhysDisks,
1955 				mpt->ioc_page2->NumActivePhysDisks != 1
1956 			      ? "s " : " ",
1957 				mpt->ioc_page2->MaxPhysDisks);
1958 		}
1959 	}
1960 
1961 	len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1962 	mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1963 	if (mpt->raid_volumes == NULL) {
1964 		mpt_prt(mpt, "Could not allocate RAID volume data\n");
1965 		mpt_raid_free_mem(mpt);
1966 		return (ENOMEM);
1967 	}
1968 
1969 	/*
1970 	 * Copy critical data out of ioc_page2 so that we can
1971 	 * safely refresh the page without windows of unreliable
1972 	 * data.
1973 	 */
1974 	mpt->raid_max_volumes =  mpt->ioc_page2->MaxVolumes;
1975 
1976 	len = sizeof(*mpt->raid_volumes->config_page) +
1977 	    (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
1978 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1979 		mpt_raid = &mpt->raid_volumes[i];
1980 		mpt_raid->config_page =
1981 		    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1982 		if (mpt_raid->config_page == NULL) {
1983 			mpt_prt(mpt, "Could not allocate RAID page data\n");
1984 			mpt_raid_free_mem(mpt);
1985 			return (ENOMEM);
1986 		}
1987 	}
1988 	mpt->raid_page0_len = len;
1989 
1990 	len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1991 	mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1992 	if (mpt->raid_disks == NULL) {
1993 		mpt_prt(mpt, "Could not allocate RAID disk data\n");
1994 		mpt_raid_free_mem(mpt);
1995 		return (ENOMEM);
1996 	}
1997 	mpt->raid_max_disks =  mpt->ioc_page2->MaxPhysDisks;
1998 
1999 	/*
2000 	 * Load page 3.
2001 	 */
2002 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
2003 	    3, 0, &hdr, FALSE, 5000);
2004 	if (rv) {
2005 		mpt_raid_free_mem(mpt);
2006 		return (EIO);
2007 	}
2008 
2009 	mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
2010 	    hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
2011 
2012 	len = hdr.PageLength * sizeof(uint32_t);
2013 	mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2014 	if (mpt->ioc_page3 == NULL) {
2015 		mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
2016 		mpt_raid_free_mem(mpt);
2017 		return (ENOMEM);
2018 	}
2019 	memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
2020 	rv = mpt_read_cur_cfg_page(mpt, 0,
2021 	    &mpt->ioc_page3->Header, len, FALSE, 5000);
2022 	if (rv) {
2023 		mpt_raid_free_mem(mpt);
2024 		return (EIO);
2025 	}
2026 	mpt_raid_wakeup(mpt);
2027 	return (0);
2028 }
2029 
2030 /*
2031  * Enable IOC port
2032  */
2033 static int
2034 mpt_send_port_enable(struct mpt_softc *mpt, int port)
2035 {
2036 	request_t	*req;
2037 	MSG_PORT_ENABLE *enable_req;
2038 	int		 error;
2039 
2040 	req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
2041 	if (req == NULL)
2042 		return (-1);
2043 
2044 	enable_req = req->req_vbuf;
2045 	memset(enable_req, 0,  MPT_RQSL(mpt));
2046 
2047 	enable_req->Function   = MPI_FUNCTION_PORT_ENABLE;
2048 	enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
2049 	enable_req->PortNumber = port;
2050 
2051 	mpt_check_doorbell(mpt);
2052 	mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
2053 
2054 	mpt_send_cmd(mpt, req);
2055 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
2056 	    FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000);
2057 	if (error != 0) {
2058 		mpt_prt(mpt, "port %d enable timed out\n", port);
2059 		return (-1);
2060 	}
2061 	mpt_free_request(mpt, req);
2062 	mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
2063 	return (0);
2064 }
2065 
2066 /*
2067  * Enable/Disable asynchronous event reporting.
2068  */
2069 static int
2070 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
2071 {
2072 	request_t *req;
2073 	MSG_EVENT_NOTIFY *enable_req;
2074 
2075 	req = mpt_get_request(mpt, FALSE);
2076 	if (req == NULL) {
2077 		return (ENOMEM);
2078 	}
2079 	enable_req = req->req_vbuf;
2080 	memset(enable_req, 0, sizeof *enable_req);
2081 
2082 	enable_req->Function   = MPI_FUNCTION_EVENT_NOTIFICATION;
2083 	enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
2084 	enable_req->Switch     = onoff;
2085 
2086 	mpt_check_doorbell(mpt);
2087 	mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
2088 	    onoff ? "en" : "dis");
2089 	/*
2090 	 * Send the command off, but don't wait for it.
2091 	 */
2092 	mpt_send_cmd(mpt, req);
2093 	return (0);
2094 }
2095 
2096 /*
2097  * Un-mask the interupts on the chip.
2098  */
2099 void
2100 mpt_enable_ints(struct mpt_softc *mpt)
2101 {
2102 	/* Unmask every thing except door bell int */
2103 	mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
2104 }
2105 
2106 /*
2107  * Mask the interupts on the chip.
2108  */
2109 void
2110 mpt_disable_ints(struct mpt_softc *mpt)
2111 {
2112 	/* Mask all interrupts */
2113 	mpt_write(mpt, MPT_OFFSET_INTR_MASK,
2114 	    MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
2115 }
2116 
2117 static void
2118 mpt_sysctl_attach(struct mpt_softc *mpt)
2119 {
2120 #if __FreeBSD_version >= 500000
2121 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
2122 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
2123 
2124 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2125 		       "debug", CTLFLAG_RW, &mpt->verbose, 0,
2126 		       "Debugging/Verbose level");
2127 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2128 		       "role", CTLFLAG_RD, &mpt->role, 0,
2129 		       "HBA role");
2130 #ifdef	MPT_TEST_MULTIPATH
2131 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2132 		       "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
2133 		       "Next Target to Fail");
2134 #endif
2135 #endif
2136 }
2137 
2138 int
2139 mpt_attach(struct mpt_softc *mpt)
2140 {
2141 	struct mpt_personality *pers;
2142 	int i;
2143 	int error;
2144 
2145 	TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
2146 	for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2147 		pers = mpt_personalities[i];
2148 		if (pers == NULL) {
2149 			continue;
2150 		}
2151 		if (pers->probe(mpt) == 0) {
2152 			error = pers->attach(mpt);
2153 			if (error != 0) {
2154 				mpt_detach(mpt);
2155 				return (error);
2156 			}
2157 			mpt->mpt_pers_mask |= (0x1 << pers->id);
2158 			pers->use_count++;
2159 		}
2160 	}
2161 
2162 	/*
2163 	 * Now that we've attached everything, do the enable function
2164 	 * for all of the personalities. This allows the personalities
2165 	 * to do setups that are appropriate for them prior to enabling
2166 	 * any ports.
2167 	 */
2168 	for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2169 		pers = mpt_personalities[i];
2170 		if (pers != NULL  && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2171 			error = pers->enable(mpt);
2172 			if (error != 0) {
2173 				mpt_prt(mpt, "personality %s attached but would"
2174 				    " not enable (%d)\n", pers->name, error);
2175 				mpt_detach(mpt);
2176 				return (error);
2177 			}
2178 		}
2179 	}
2180 	return (0);
2181 }
2182 
2183 int
2184 mpt_shutdown(struct mpt_softc *mpt)
2185 {
2186 	struct mpt_personality *pers;
2187 
2188 	MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2189 		pers->shutdown(mpt);
2190 	}
2191 	return (0);
2192 }
2193 
2194 int
2195 mpt_detach(struct mpt_softc *mpt)
2196 {
2197 	struct mpt_personality *pers;
2198 
2199 	MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2200 		pers->detach(mpt);
2201 		mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2202 		pers->use_count--;
2203 	}
2204 	TAILQ_REMOVE(&mpt_tailq, mpt, links);
2205 	return (0);
2206 }
2207 
2208 int
2209 mpt_core_load(struct mpt_personality *pers)
2210 {
2211 	int i;
2212 
2213 	/*
2214 	 * Setup core handlers and insert the default handler
2215 	 * into all "empty slots".
2216 	 */
2217 	for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2218 		mpt_reply_handlers[i] = mpt_default_reply_handler;
2219 	}
2220 
2221 	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2222 	    mpt_event_reply_handler;
2223 	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2224 	    mpt_config_reply_handler;
2225 	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2226 	    mpt_handshake_reply_handler;
2227 	return (0);
2228 }
2229 
2230 /*
2231  * Initialize per-instance driver data and perform
2232  * initial controller configuration.
2233  */
2234 int
2235 mpt_core_attach(struct mpt_softc *mpt)
2236 {
2237         int val, error;
2238 
2239 	LIST_INIT(&mpt->ack_frames);
2240 	/* Put all request buffers on the free list */
2241 	TAILQ_INIT(&mpt->request_pending_list);
2242 	TAILQ_INIT(&mpt->request_free_list);
2243 	TAILQ_INIT(&mpt->request_timeout_list);
2244 	MPT_LOCK(mpt);
2245 	for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2246 		request_t *req = &mpt->request_pool[val];
2247 		req->state = REQ_STATE_ALLOCATED;
2248 		mpt_free_request(mpt, req);
2249 	}
2250 	MPT_UNLOCK(mpt);
2251 	for (val = 0; val < MPT_MAX_LUNS; val++) {
2252 		STAILQ_INIT(&mpt->trt[val].atios);
2253 		STAILQ_INIT(&mpt->trt[val].inots);
2254 	}
2255 	STAILQ_INIT(&mpt->trt_wildcard.atios);
2256 	STAILQ_INIT(&mpt->trt_wildcard.inots);
2257 #ifdef	MPT_TEST_MULTIPATH
2258 	mpt->failure_id = -1;
2259 #endif
2260 	mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2261 	mpt_sysctl_attach(mpt);
2262 	mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2263 	    mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2264 
2265 	MPT_LOCK(mpt);
2266 	error = mpt_configure_ioc(mpt, 0, 0);
2267 	MPT_UNLOCK(mpt);
2268 
2269 	return (error);
2270 }
2271 
2272 int
2273 mpt_core_enable(struct mpt_softc *mpt)
2274 {
2275 	/*
2276 	 * We enter with the IOC enabled, but async events
2277 	 * not enabled, ports not enabled and interrupts
2278 	 * not enabled.
2279 	 */
2280 	MPT_LOCK(mpt);
2281 
2282 	/*
2283 	 * Enable asynchronous event reporting- all personalities
2284 	 * have attached so that they should be able to now field
2285 	 * async events.
2286 	 */
2287 	mpt_send_event_request(mpt, 1);
2288 
2289 	/*
2290 	 * Catch any pending interrupts
2291 	 *
2292 	 * This seems to be crucial- otherwise
2293 	 * the portenable below times out.
2294 	 */
2295 	mpt_intr(mpt);
2296 
2297 	/*
2298 	 * Enable Interrupts
2299 	 */
2300 	mpt_enable_ints(mpt);
2301 
2302 	/*
2303 	 * Catch any pending interrupts
2304 	 *
2305 	 * This seems to be crucial- otherwise
2306 	 * the portenable below times out.
2307 	 */
2308 	mpt_intr(mpt);
2309 
2310 	/*
2311 	 * Enable the port.
2312 	 */
2313 	if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2314 		mpt_prt(mpt, "failed to enable port 0\n");
2315 		MPT_UNLOCK(mpt);
2316 		return (ENXIO);
2317 	}
2318 	MPT_UNLOCK(mpt);
2319 	return (0);
2320 }
2321 
2322 void
2323 mpt_core_shutdown(struct mpt_softc *mpt)
2324 {
2325 	mpt_disable_ints(mpt);
2326 }
2327 
2328 void
2329 mpt_core_detach(struct mpt_softc *mpt)
2330 {
2331 	/*
2332 	 * XXX: FREE MEMORY
2333 	 */
2334 	mpt_disable_ints(mpt);
2335 }
2336 
2337 int
2338 mpt_core_unload(struct mpt_personality *pers)
2339 {
2340 	/* Unload is always successfull. */
2341 	return (0);
2342 }
2343 
2344 #define FW_UPLOAD_REQ_SIZE				\
2345 	(sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION)	\
2346        + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2347 
2348 static int
2349 mpt_upload_fw(struct mpt_softc *mpt)
2350 {
2351 	uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2352 	MSG_FW_UPLOAD_REPLY fw_reply;
2353 	MSG_FW_UPLOAD *fw_req;
2354 	FW_UPLOAD_TCSGE *tsge;
2355 	SGE_SIMPLE32 *sge;
2356 	uint32_t flags;
2357 	int error;
2358 
2359 	memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2360 	fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2361 	fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2362 	fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2363 	fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2364 	tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2365 	tsge->DetailsLength = 12;
2366 	tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2367 	tsge->ImageSize = htole32(mpt->fw_image_size);
2368 	sge = (SGE_SIMPLE32 *)(tsge + 1);
2369 	flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2370 	      | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2371 	      | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2372 	flags <<= MPI_SGE_FLAGS_SHIFT;
2373 	sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2374 	sge->Address = htole32(mpt->fw_phys);
2375 	error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2376 	if (error)
2377 		return(error);
2378 	error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2379 	return (error);
2380 }
2381 
2382 static void
2383 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2384 	       uint32_t *data, bus_size_t len)
2385 {
2386 	uint32_t *data_end;
2387 
2388 	data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2389 	if (mpt->is_sas) {
2390 		pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2391 	}
2392 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2393 	while (data != data_end) {
2394 		mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2395 		data++;
2396 	}
2397 	if (mpt->is_sas) {
2398 		pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2399 	}
2400 }
2401 
2402 static int
2403 mpt_download_fw(struct mpt_softc *mpt)
2404 {
2405 	MpiFwHeader_t *fw_hdr;
2406 	int error;
2407 	uint32_t ext_offset;
2408 	uint32_t data;
2409 
2410 	mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2411 		mpt->fw_image_size);
2412 
2413 	error = mpt_enable_diag_mode(mpt);
2414 	if (error != 0) {
2415 		mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2416 		return (EIO);
2417 	}
2418 
2419 	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2420 		  MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2421 
2422 	fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2423 	mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2424 		       fw_hdr->ImageSize);
2425 
2426 	ext_offset = fw_hdr->NextImageHeaderOffset;
2427 	while (ext_offset != 0) {
2428 		MpiExtImageHeader_t *ext;
2429 
2430 		ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2431 		ext_offset = ext->NextImageHeaderOffset;
2432 
2433 		mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2434 			       ext->ImageSize);
2435 	}
2436 
2437 	if (mpt->is_sas) {
2438 		pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2439 	}
2440 	/* Setup the address to jump to on reset. */
2441 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2442 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2443 
2444 	/*
2445 	 * The controller sets the "flash bad" status after attempting
2446 	 * to auto-boot from flash.  Clear the status so that the controller
2447 	 * will continue the boot process with our newly installed firmware.
2448 	 */
2449 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2450 	data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2451 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2452 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2453 
2454 	if (mpt->is_sas) {
2455 		pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2456 	}
2457 
2458 	/*
2459 	 * Re-enable the processor and clear the boot halt flag.
2460 	 */
2461 	data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2462 	data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2463 	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2464 
2465 	mpt_disable_diag_mode(mpt);
2466 	return (0);
2467 }
2468 
2469 /*
2470  * Allocate/Initialize data structures for the controller.  Called
2471  * once at instance startup.
2472  */
2473 static int
2474 mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
2475 {
2476 	PTR_MSG_PORT_FACTS_REPLY pfp;
2477 	int error,  port;
2478 	size_t len;
2479 
2480 	if (tn == MPT_MAX_TRYS) {
2481 		return (-1);
2482 	}
2483 
2484 	/*
2485 	 * No need to reset if the IOC is already in the READY state.
2486 	 *
2487 	 * Force reset if initialization failed previously.
2488 	 * Note that a hard_reset of the second channel of a '929
2489 	 * will stop operation of the first channel.  Hopefully, if the
2490 	 * first channel is ok, the second will not require a hard
2491 	 * reset.
2492 	 */
2493 	if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
2494 		if (mpt_reset(mpt, FALSE) != MPT_OK) {
2495 			return (mpt_configure_ioc(mpt, tn++, 1));
2496 		}
2497 		needreset = 0;
2498 	}
2499 
2500 	if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
2501 		mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2502 		return (mpt_configure_ioc(mpt, tn++, 1));
2503 	}
2504 	mpt2host_iocfacts_reply(&mpt->ioc_facts);
2505 
2506 	mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2507 	    mpt->ioc_facts.MsgVersion >> 8,
2508 	    mpt->ioc_facts.MsgVersion & 0xFF,
2509 	    mpt->ioc_facts.HeaderVersion >> 8,
2510 	    mpt->ioc_facts.HeaderVersion & 0xFF);
2511 
2512 	/*
2513 	 * Now that we know request frame size, we can calculate
2514 	 * the actual (reasonable) segment limit for read/write I/O.
2515 	 *
2516 	 * This limit is constrained by:
2517 	 *
2518 	 *  + The size of each area we allocate per command (and how
2519 	 *    many chain segments we can fit into it).
2520 	 *  + The total number of areas we've set up.
2521 	 *  + The actual chain depth the card will allow.
2522 	 *
2523 	 * The first area's segment count is limited by the I/O request
2524 	 * at the head of it. We cannot allocate realistically more
2525 	 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2526 	 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2527 	 *
2528 	 */
2529 	/* total number of request areas we (can) allocate */
2530 	mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2531 
2532 	/* converted to the number of chain areas possible */
2533 	mpt->max_seg_cnt *= MPT_NRFM(mpt);
2534 
2535 	/* limited by the number of chain areas the card will support */
2536 	if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
2537 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2538 		    "chain depth limited to %u (from %u)\n",
2539 		    mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
2540 		mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
2541 	}
2542 
2543 	/* converted to the number of simple sges in chain segments. */
2544 	mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2545 
2546 	mpt_lprt(mpt, MPT_PRT_DEBUG, "Maximum Segment Count: %u\n",
2547 	    mpt->max_seg_cnt);
2548 	mpt_lprt(mpt, MPT_PRT_DEBUG, "MsgLength=%u IOCNumber = %d\n",
2549 	    mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
2550 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2551 	    "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2552 	    "Request Frame Size %u bytes Max Chain Depth %u\n",
2553 	    mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
2554 	    mpt->ioc_facts.RequestFrameSize << 2,
2555 	    mpt->ioc_facts.MaxChainDepth);
2556 	mpt_lprt(mpt, MPT_PRT_DEBUG, "IOCFACTS: Num Ports %d, FWImageSize %d, "
2557 	    "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
2558 	    mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
2559 
2560 	len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
2561 	mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2562 	if (mpt->port_facts == NULL) {
2563 		mpt_prt(mpt, "unable to allocate memory for port facts\n");
2564 		return (ENOMEM);
2565 	}
2566 
2567 
2568 	if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
2569 	    (mpt->fw_uploaded == 0)) {
2570 		struct mpt_map_info mi;
2571 
2572 		/*
2573 		 * In some configurations, the IOC's firmware is
2574 		 * stored in a shared piece of system NVRAM that
2575 		 * is only accessable via the BIOS.  In this
2576 		 * case, the firmware keeps a copy of firmware in
2577 		 * RAM until the OS driver retrieves it.  Once
2578 		 * retrieved, we are responsible for re-downloading
2579 		 * the firmware after any hard-reset.
2580 		 */
2581 		mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
2582 		error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
2583 		    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2584 		    mpt->fw_image_size, 1, mpt->fw_image_size, 0,
2585 		    &mpt->fw_dmat);
2586 		if (error != 0) {
2587 			mpt_prt(mpt, "cannot create firmwarew dma tag\n");
2588 			return (ENOMEM);
2589 		}
2590 		error = bus_dmamem_alloc(mpt->fw_dmat,
2591 		    (void **)&mpt->fw_image, BUS_DMA_NOWAIT, &mpt->fw_dmap);
2592 		if (error != 0) {
2593 			mpt_prt(mpt, "cannot allocate firmware memory\n");
2594 			bus_dma_tag_destroy(mpt->fw_dmat);
2595 			return (ENOMEM);
2596 		}
2597 		mi.mpt = mpt;
2598 		mi.error = 0;
2599 		bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2600 		    mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
2601 		mpt->fw_phys = mi.phys;
2602 
2603 		error = mpt_upload_fw(mpt);
2604 		if (error != 0) {
2605 			mpt_prt(mpt, "firmware upload failed.\n");
2606 			bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2607 			bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2608 			    mpt->fw_dmap);
2609 			bus_dma_tag_destroy(mpt->fw_dmat);
2610 			mpt->fw_image = NULL;
2611 			return (EIO);
2612 		}
2613 		mpt->fw_uploaded = 1;
2614 	}
2615 
2616 	for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
2617 		pfp = &mpt->port_facts[port];
2618 		error = mpt_get_portfacts(mpt, 0, pfp);
2619 		if (error != MPT_OK) {
2620 			mpt_prt(mpt,
2621 			    "mpt_get_portfacts on port %d failed\n", port);
2622 			free(mpt->port_facts, M_DEVBUF);
2623 			mpt->port_facts = NULL;
2624 			return (mpt_configure_ioc(mpt, tn++, 1));
2625 		}
2626 		mpt2host_portfacts_reply(pfp);
2627 
2628 		if (port > 0) {
2629 			error = MPT_PRT_INFO;
2630 		} else {
2631 			error = MPT_PRT_DEBUG;
2632 		}
2633 		mpt_lprt(mpt, error,
2634 		    "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
2635 		    port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
2636 		    pfp->MaxDevices);
2637 
2638 	}
2639 
2640 	/*
2641 	 * XXX: Not yet supporting more than port 0
2642 	 */
2643 	pfp = &mpt->port_facts[0];
2644 	if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2645 		mpt->is_fc = 1;
2646 		mpt->is_sas = 0;
2647 		mpt->is_spi = 0;
2648 	} else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2649 		mpt->is_fc = 0;
2650 		mpt->is_sas = 1;
2651 		mpt->is_spi = 0;
2652 	} else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
2653 		mpt->is_fc = 0;
2654 		mpt->is_sas = 0;
2655 		mpt->is_spi = 1;
2656 	} else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
2657 		mpt_prt(mpt, "iSCSI not supported yet\n");
2658 		return (ENXIO);
2659 	} else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
2660 		mpt_prt(mpt, "Inactive Port\n");
2661 		return (ENXIO);
2662 	} else {
2663 		mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
2664 		return (ENXIO);
2665 	}
2666 
2667 	/*
2668 	 * Set our role with what this port supports.
2669 	 *
2670 	 * Note this might be changed later in different modules
2671 	 * if this is different from what is wanted.
2672 	 */
2673 	mpt->role = MPT_ROLE_NONE;
2674 	if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2675 		mpt->role |= MPT_ROLE_INITIATOR;
2676 	}
2677 	if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2678 		mpt->role |= MPT_ROLE_TARGET;
2679 	}
2680 
2681 	/*
2682 	 * Enable the IOC
2683 	 */
2684 	if (mpt_enable_ioc(mpt, 0) != MPT_OK) {
2685 		mpt_prt(mpt, "unable to initialize IOC\n");
2686 		return (ENXIO);
2687 	}
2688 
2689 	/*
2690 	 * Read IOC configuration information.
2691 	 *
2692 	 * We need this to determine whether or not we have certain
2693 	 * settings for Integrated Mirroring (e.g.).
2694 	 */
2695 	mpt_read_config_info_ioc(mpt);
2696 
2697 	return (0);
2698 }
2699 
2700 static int
2701 mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2702 {
2703 	uint32_t pptr;
2704 	int val;
2705 
2706 	if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2707 		mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2708 		return (EIO);
2709 	}
2710 
2711 	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2712 
2713 	if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2714 		mpt_prt(mpt, "IOC failed to go to run state\n");
2715 		return (ENXIO);
2716 	}
2717 	mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2718 
2719 	/*
2720 	 * Give it reply buffers
2721 	 *
2722 	 * Do *not* exceed global credits.
2723 	 */
2724 	for (val = 0, pptr = mpt->reply_phys;
2725 	    (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2726 	     pptr += MPT_REPLY_SIZE) {
2727 		mpt_free_reply(mpt, pptr);
2728 		if (++val == mpt->ioc_facts.GlobalCredits - 1)
2729 			break;
2730 	}
2731 
2732 
2733 	/*
2734 	 * Enable the port if asked. This is only done if we're resetting
2735 	 * the IOC after initial startup.
2736 	 */
2737 	if (portenable) {
2738 		/*
2739 		 * Enable asynchronous event reporting
2740 		 */
2741 		mpt_send_event_request(mpt, 1);
2742 
2743 		if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2744 			mpt_prt(mpt, "failed to enable port 0\n");
2745 			return (ENXIO);
2746 		}
2747 	}
2748 	return (MPT_OK);
2749 }
2750 
2751 /*
2752  * Endian Conversion Functions- only used on Big Endian machines
2753  */
2754 #if	_BYTE_ORDER == _BIG_ENDIAN
2755 void
2756 mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
2757 {
2758 	MPT_2_HOST32(sge, FlagsLength);
2759 	MPT_2_HOST32(sge, u.Address64.Low);
2760 	MPT_2_HOST32(sge, u.Address64.High);
2761 }
2762 
2763 void
2764 mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
2765 {
2766 	MPT_2_HOST16(rp, MsgVersion);
2767 	MPT_2_HOST16(rp, HeaderVersion);
2768 	MPT_2_HOST32(rp, MsgContext);
2769 	MPT_2_HOST16(rp, IOCExceptions);
2770 	MPT_2_HOST16(rp, IOCStatus);
2771 	MPT_2_HOST32(rp, IOCLogInfo);
2772 	MPT_2_HOST16(rp, ReplyQueueDepth);
2773 	MPT_2_HOST16(rp, RequestFrameSize);
2774 	MPT_2_HOST16(rp, Reserved_0101_FWVersion);
2775 	MPT_2_HOST16(rp, ProductID);
2776 	MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
2777 	MPT_2_HOST16(rp, GlobalCredits);
2778 	MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
2779 	MPT_2_HOST16(rp, CurReplyFrameSize);
2780 	MPT_2_HOST32(rp, FWImageSize);
2781 	MPT_2_HOST32(rp, IOCCapabilities);
2782 	MPT_2_HOST32(rp, FWVersion.Word);
2783 	MPT_2_HOST16(rp, HighPriorityQueueDepth);
2784 	MPT_2_HOST16(rp, Reserved2);
2785 	mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
2786 	MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
2787 }
2788 
2789 void
2790 mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
2791 {
2792 	MPT_2_HOST16(pfp, Reserved);
2793 	MPT_2_HOST16(pfp, Reserved1);
2794 	MPT_2_HOST32(pfp, MsgContext);
2795 	MPT_2_HOST16(pfp, Reserved2);
2796 	MPT_2_HOST16(pfp, IOCStatus);
2797 	MPT_2_HOST32(pfp, IOCLogInfo);
2798 	MPT_2_HOST16(pfp, MaxDevices);
2799 	MPT_2_HOST16(pfp, PortSCSIID);
2800 	MPT_2_HOST16(pfp, ProtocolFlags);
2801 	MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
2802 	MPT_2_HOST16(pfp, MaxPersistentIDs);
2803 	MPT_2_HOST16(pfp, MaxLanBuckets);
2804 	MPT_2_HOST16(pfp, Reserved4);
2805 	MPT_2_HOST32(pfp, Reserved5);
2806 }
2807 void
2808 mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
2809 {
2810 	int i;
2811 	ioc2->CapabilitiesFlags = htole32(ioc2->CapabilitiesFlags);
2812 	for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
2813 		MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
2814 	}
2815 }
2816 
2817 void
2818 mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
2819 {
2820 	int i;
2821 	MPT_2_HOST16(volp, VolumeStatus.Reserved);
2822 	MPT_2_HOST16(volp, VolumeSettings.Settings);
2823 	MPT_2_HOST32(volp, MaxLBA);
2824 	MPT_2_HOST32(volp, MaxLBAHigh);
2825 	MPT_2_HOST32(volp, StripeSize);
2826 	MPT_2_HOST32(volp, Reserved2);
2827 	MPT_2_HOST32(volp, Reserved3);
2828 	for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
2829 		MPT_2_HOST16(volp, PhysDisk[i].Reserved);
2830 	}
2831 }
2832 
2833 void
2834 mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
2835 {
2836 	MPT_2_HOST16(vi, TotalBlocks.High);
2837 	MPT_2_HOST16(vi, TotalBlocks.Low);
2838 	MPT_2_HOST16(vi, BlocksRemaining.High);
2839 	MPT_2_HOST16(vi, BlocksRemaining.Low);
2840 }
2841 #endif
2842