xref: /freebsd/sys/dev/mpt/mpt.c (revision 3d11b6c8f01e1fca5936a11d6996448467851a94)
1 /*-
2  * Generic routines for LSI Fusion adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c) 2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  */
61 /*-
62  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
63  * Copyright (c) 2005, WHEEL Sp. z o.o.
64  * Copyright (c) 2004, 2005 Justin T. Gibbs
65  * All rights reserved.
66  *
67  * Redistribution and use in source and binary forms, with or without
68  * modification, are permitted provided that the following conditions are
69  * met:
70  * 1. Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
73  *    substantially similar to the "NO WARRANTY" disclaimer below
74  *    ("Disclaimer") and any redistribution must be conditioned upon including
75  *    a substantially similar Disclaimer requirement for further binary
76  *    redistribution.
77  * 3. Neither the names of the above listed copyright holders nor the names
78  *    of any contributors may be used to endorse or promote products derived
79  *    from this software without specific prior written permission.
80  *
81  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
82  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
83  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
84  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
85  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
86  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
87  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
91  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
92  */
93 
94 #include <sys/cdefs.h>
95 __FBSDID("$FreeBSD$");
96 
97 #include <dev/mpt/mpt.h>
98 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
99 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
100 
101 #include <dev/mpt/mpilib/mpi.h>
102 #include <dev/mpt/mpilib/mpi_ioc.h>
103 #include <dev/mpt/mpilib/mpi_fc.h>
104 #include <dev/mpt/mpilib/mpi_targ.h>
105 
106 #include <sys/sysctl.h>
107 
108 #define MPT_MAX_TRYS 3
109 #define MPT_MAX_WAIT 300000
110 
111 static int maxwait_ack = 0;
112 static int maxwait_int = 0;
113 static int maxwait_state = 0;
114 
115 TAILQ_HEAD(, mpt_softc)	mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
116 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
117 
118 static mpt_reply_handler_t mpt_default_reply_handler;
119 static mpt_reply_handler_t mpt_config_reply_handler;
120 static mpt_reply_handler_t mpt_handshake_reply_handler;
121 static mpt_reply_handler_t mpt_event_reply_handler;
122 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
123 			       MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
124 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
125 static int mpt_soft_reset(struct mpt_softc *mpt);
126 static void mpt_hard_reset(struct mpt_softc *mpt);
127 static int mpt_configure_ioc(struct mpt_softc *mpt);
128 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
129 
130 /************************* Personality Module Support *************************/
131 /*
132  * We include one extra entry that is guaranteed to be NULL
133  * to simplify our itterator.
134  */
135 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
136 static __inline struct mpt_personality*
137 	mpt_pers_find(struct mpt_softc *, u_int);
138 static __inline struct mpt_personality*
139 	mpt_pers_find_reverse(struct mpt_softc *, u_int);
140 
141 static __inline struct mpt_personality *
142 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
143 {
144 	KASSERT(start_at <= MPT_MAX_PERSONALITIES,
145 		("mpt_pers_find: starting position out of range\n"));
146 
147 	while (start_at < MPT_MAX_PERSONALITIES
148 	    && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
149 		start_at++;
150 	}
151 	return (mpt_personalities[start_at]);
152 }
153 
154 /*
155  * Used infrequently, so no need to optimize like a forward
156  * traversal where we use the MAX+1 is guaranteed to be NULL
157  * trick.
158  */
159 static __inline struct mpt_personality *
160 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
161 {
162 	while (start_at < MPT_MAX_PERSONALITIES
163 	    && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
164 		start_at--;
165 	}
166 	if (start_at < MPT_MAX_PERSONALITIES)
167 		return (mpt_personalities[start_at]);
168 	return (NULL);
169 }
170 
171 #define MPT_PERS_FOREACH(mpt, pers)				\
172 	for (pers = mpt_pers_find(mpt, /*start_at*/0);		\
173 	     pers != NULL;					\
174 	     pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
175 
176 #define MPT_PERS_FOREACH_REVERSE(mpt, pers)				\
177 	for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
178 	     pers != NULL;						\
179 	     pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
180 
181 static mpt_load_handler_t      mpt_stdload;
182 static mpt_probe_handler_t     mpt_stdprobe;
183 static mpt_attach_handler_t    mpt_stdattach;
184 static mpt_enable_handler_t    mpt_stdenable;
185 static mpt_event_handler_t     mpt_stdevent;
186 static mpt_reset_handler_t     mpt_stdreset;
187 static mpt_shutdown_handler_t  mpt_stdshutdown;
188 static mpt_detach_handler_t    mpt_stddetach;
189 static mpt_unload_handler_t    mpt_stdunload;
190 static struct mpt_personality mpt_default_personality =
191 {
192 	.load		= mpt_stdload,
193 	.probe		= mpt_stdprobe,
194 	.attach		= mpt_stdattach,
195 	.enable		= mpt_stdenable,
196 	.event		= mpt_stdevent,
197 	.reset		= mpt_stdreset,
198 	.shutdown	= mpt_stdshutdown,
199 	.detach		= mpt_stddetach,
200 	.unload		= mpt_stdunload
201 };
202 
203 static mpt_load_handler_t      mpt_core_load;
204 static mpt_attach_handler_t    mpt_core_attach;
205 static mpt_enable_handler_t    mpt_core_enable;
206 static mpt_reset_handler_t     mpt_core_ioc_reset;
207 static mpt_event_handler_t     mpt_core_event;
208 static mpt_shutdown_handler_t  mpt_core_shutdown;
209 static mpt_shutdown_handler_t  mpt_core_detach;
210 static mpt_unload_handler_t    mpt_core_unload;
211 static struct mpt_personality mpt_core_personality =
212 {
213 	.name		= "mpt_core",
214 	.load		= mpt_core_load,
215 	.attach		= mpt_core_attach,
216 	.enable		= mpt_core_enable,
217 	.event		= mpt_core_event,
218 	.reset		= mpt_core_ioc_reset,
219 	.shutdown	= mpt_core_shutdown,
220 	.detach		= mpt_core_detach,
221 	.unload		= mpt_core_unload,
222 };
223 
224 /*
225  * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
226  * ordering information.  We want the core to always register FIRST.
227  * other modules are set to SI_ORDER_SECOND.
228  */
229 static moduledata_t mpt_core_mod = {
230 	"mpt_core", mpt_modevent, &mpt_core_personality
231 };
232 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
233 MODULE_VERSION(mpt_core, 1);
234 
235 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
236 
237 
238 int
239 mpt_modevent(module_t mod, int type, void *data)
240 {
241 	struct mpt_personality *pers;
242 	int error;
243 
244 	pers = (struct mpt_personality *)data;
245 
246 	error = 0;
247 	switch (type) {
248 	case MOD_LOAD:
249 	{
250 		mpt_load_handler_t **def_handler;
251 		mpt_load_handler_t **pers_handler;
252 		int i;
253 
254 		for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
255 			if (mpt_personalities[i] == NULL)
256 				break;
257 		}
258 		if (i >= MPT_MAX_PERSONALITIES) {
259 			error = ENOMEM;
260 			break;
261 		}
262 		pers->id = i;
263 		mpt_personalities[i] = pers;
264 
265 		/* Install standard/noop handlers for any NULL entries. */
266 		def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
267 		pers_handler = MPT_PERS_FIRST_HANDLER(pers);
268 		while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
269 			if (*pers_handler == NULL)
270 				*pers_handler = *def_handler;
271 			pers_handler++;
272 			def_handler++;
273 		}
274 
275 		error = (pers->load(pers));
276 		if (error != 0)
277 			mpt_personalities[i] = NULL;
278 		break;
279 	}
280 	case MOD_SHUTDOWN:
281 		break;
282 #if __FreeBSD_version >= 500000
283 	case MOD_QUIESCE:
284 		break;
285 #endif
286 	case MOD_UNLOAD:
287 		error = pers->unload(pers);
288 		mpt_personalities[pers->id] = NULL;
289 		break;
290 	default:
291 		error = EINVAL;
292 		break;
293 	}
294 	return (error);
295 }
296 
297 int
298 mpt_stdload(struct mpt_personality *pers)
299 {
300 	/* Load is always successfull. */
301 	return (0);
302 }
303 
304 int
305 mpt_stdprobe(struct mpt_softc *mpt)
306 {
307 	/* Probe is always successfull. */
308 	return (0);
309 }
310 
311 int
312 mpt_stdattach(struct mpt_softc *mpt)
313 {
314 	/* Attach is always successfull. */
315 	return (0);
316 }
317 
318 int
319 mpt_stdenable(struct mpt_softc *mpt)
320 {
321 	/* Enable is always successfull. */
322 	return (0);
323 }
324 
325 int
326 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
327 {
328 	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
329 	/* Event was not for us. */
330 	return (0);
331 }
332 
333 void
334 mpt_stdreset(struct mpt_softc *mpt, int type)
335 {
336 }
337 
338 void
339 mpt_stdshutdown(struct mpt_softc *mpt)
340 {
341 }
342 
343 void
344 mpt_stddetach(struct mpt_softc *mpt)
345 {
346 }
347 
348 int
349 mpt_stdunload(struct mpt_personality *pers)
350 {
351 	/* Unload is always successfull. */
352 	return (0);
353 }
354 
355 /******************************* Bus DMA Support ******************************/
356 void
357 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
358 {
359 	struct mpt_map_info *map_info;
360 
361 	map_info = (struct mpt_map_info *)arg;
362 	map_info->error = error;
363 	map_info->phys = segs->ds_addr;
364 }
365 
366 /**************************** Reply/Event Handling ****************************/
367 int
368 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
369 		     mpt_handler_t handler, uint32_t *phandler_id)
370 {
371 
372 	switch (type) {
373 	case MPT_HANDLER_REPLY:
374 	{
375 		u_int cbi;
376 		u_int free_cbi;
377 
378 		if (phandler_id == NULL)
379 			return (EINVAL);
380 
381 		free_cbi = MPT_HANDLER_ID_NONE;
382 		for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
383 			/*
384 			 * If the same handler is registered multiple
385 			 * times, don't error out.  Just return the
386 			 * index of the original registration.
387 			 */
388 			if (mpt_reply_handlers[cbi] == handler.reply_handler) {
389 				*phandler_id = MPT_CBI_TO_HID(cbi);
390 				return (0);
391 			}
392 
393 			/*
394 			 * Fill from the front in the hope that
395 			 * all registered handlers consume only a
396 			 * single cache line.
397 			 *
398 			 * We don't break on the first empty slot so
399 			 * that the full table is checked to see if
400 			 * this handler was previously registered.
401 			 */
402 			if (free_cbi == MPT_HANDLER_ID_NONE &&
403 			    (mpt_reply_handlers[cbi]
404 			  == mpt_default_reply_handler))
405 				free_cbi = cbi;
406 		}
407 		if (free_cbi == MPT_HANDLER_ID_NONE) {
408 			return (ENOMEM);
409 		}
410 		mpt_reply_handlers[free_cbi] = handler.reply_handler;
411 		*phandler_id = MPT_CBI_TO_HID(free_cbi);
412 		break;
413 	}
414 	default:
415 		mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
416 		return (EINVAL);
417 	}
418 	return (0);
419 }
420 
421 int
422 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
423 		       mpt_handler_t handler, uint32_t handler_id)
424 {
425 
426 	switch (type) {
427 	case MPT_HANDLER_REPLY:
428 	{
429 		u_int cbi;
430 
431 		cbi = MPT_CBI(handler_id);
432 		if (cbi >= MPT_NUM_REPLY_HANDLERS
433 		 || mpt_reply_handlers[cbi] != handler.reply_handler)
434 			return (ENOENT);
435 		mpt_reply_handlers[cbi] = mpt_default_reply_handler;
436 		break;
437 	}
438 	default:
439 		mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
440 		return (EINVAL);
441 	}
442 	return (0);
443 }
444 
445 static int
446 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
447 	uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
448 {
449 	mpt_prt(mpt,
450 	    "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
451 	    req, req->serno, reply_desc, reply_frame);
452 
453 	if (reply_frame != NULL)
454 		mpt_dump_reply_frame(mpt, reply_frame);
455 
456 	mpt_prt(mpt, "Reply Frame Ignored\n");
457 
458 	return (/*free_reply*/TRUE);
459 }
460 
461 static int
462 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
463  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
464 {
465 	if (req != NULL) {
466 
467 		if (reply_frame != NULL) {
468 			MSG_CONFIG *cfgp;
469 			MSG_CONFIG_REPLY *reply;
470 
471 			cfgp = (MSG_CONFIG *)req->req_vbuf;
472 			reply = (MSG_CONFIG_REPLY *)reply_frame;
473 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
474 			bcopy(&reply->Header, &cfgp->Header,
475 			      sizeof(cfgp->Header));
476 		}
477 		req->state &= ~REQ_STATE_QUEUED;
478 		req->state |= REQ_STATE_DONE;
479 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
480 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
481 			wakeup(req);
482 		}
483 	}
484 
485 	return (TRUE);
486 }
487 
488 static int
489 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
490  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
491 {
492 	/* Nothing to be done. */
493 	return (TRUE);
494 }
495 
496 static int
497 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
498     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
499 {
500 	int free_reply;
501 
502 	KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
503 	KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
504 
505 	free_reply = TRUE;
506 	switch (reply_frame->Function) {
507 	case MPI_FUNCTION_EVENT_NOTIFICATION:
508 	{
509 		MSG_EVENT_NOTIFY_REPLY *msg;
510 		struct mpt_personality *pers;
511 		u_int handled;
512 
513 		handled = 0;
514 		msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
515 		MPT_PERS_FOREACH(mpt, pers)
516 			handled += pers->event(mpt, req, msg);
517 
518 		if (handled == 0 && mpt->mpt_pers_mask == 0) {
519 			mpt_lprt(mpt, MPT_PRT_INFO,
520 				"No Handlers For Any Event Notify Frames. "
521 				"Event %#x (ACK %sequired).\n",
522 				msg->Event, msg->AckRequired? "r" : "not r");
523 		} else if (handled == 0) {
524 			mpt_lprt(mpt, MPT_PRT_WARN,
525 				"Unhandled Event Notify Frame. Event %#x "
526 				"(ACK %sequired).\n",
527 				msg->Event, msg->AckRequired? "r" : "not r");
528 		}
529 
530 		if (msg->AckRequired) {
531 			request_t *ack_req;
532 			uint32_t context;
533 
534 			context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS);
535 			ack_req = mpt_get_request(mpt, FALSE);
536 			if (ack_req == NULL) {
537 				struct mpt_evtf_record *evtf;
538 
539 				evtf = (struct mpt_evtf_record *)reply_frame;
540 				evtf->context = context;
541 				LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
542 				free_reply = FALSE;
543 				break;
544 			}
545 			mpt_send_event_ack(mpt, ack_req, msg, context);
546 			/*
547 			 * Don't check for CONTINUATION_REPLY here
548 			 */
549 			return (free_reply);
550 		}
551 		break;
552 	}
553 	case MPI_FUNCTION_PORT_ENABLE:
554 		mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
555 		break;
556 	case MPI_FUNCTION_EVENT_ACK:
557 		break;
558 	default:
559 		mpt_prt(mpt, "unknown event function: %x\n",
560 			reply_frame->Function);
561 		break;
562 	}
563 
564 	/*
565 	 * I'm not sure that this continuation stuff works as it should.
566 	 *
567 	 * I've had FC async events occur that free the frame up because
568 	 * the continuation bit isn't set, and then additional async events
569 	 * then occur using the same context. As you might imagine, this
570 	 * leads to Very Bad Thing.
571 	 *
572 	 *  Let's just be safe for now and not free them up until we figure
573 	 * out what's actually happening here.
574 	 */
575 #if	0
576 	if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
577 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
578 		mpt_free_request(mpt, req);
579 		mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
580 		    reply_frame->Function, req, req->serno);
581 		if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
582 			MSG_EVENT_NOTIFY_REPLY *msg =
583 			    (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
584 			mpt_prtc(mpt, " Event=0x%x AckReq=%d",
585 			    msg->Event, msg->AckRequired);
586 		}
587 	} else {
588 		mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
589 		    reply_frame->Function, req, req->serno);
590 		if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
591 			MSG_EVENT_NOTIFY_REPLY *msg =
592 			    (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
593 			mpt_prtc(mpt, " Event=0x%x AckReq=%d",
594 			    msg->Event, msg->AckRequired);
595 		}
596 		mpt_prtc(mpt, "\n");
597 	}
598 #endif
599 	return (free_reply);
600 }
601 
602 /*
603  * Process an asynchronous event from the IOC.
604  */
605 static int
606 mpt_core_event(struct mpt_softc *mpt, request_t *req,
607 	       MSG_EVENT_NOTIFY_REPLY *msg)
608 {
609 	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
610                  msg->Event & 0xFF);
611 	switch(msg->Event & 0xFF) {
612 	case MPI_EVENT_NONE:
613 		break;
614 	case MPI_EVENT_LOG_DATA:
615 	{
616 		int i;
617 
618 		/* Some error occured that LSI wants logged */
619 		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
620 			msg->IOCLogInfo);
621 		mpt_prt(mpt, "\tEvtLogData: Event Data:");
622 		for (i = 0; i < msg->EventDataLength; i++)
623 			mpt_prtc(mpt, "  %08x", msg->Data[i]);
624 		mpt_prtc(mpt, "\n");
625 		break;
626 	}
627 	case MPI_EVENT_EVENT_CHANGE:
628 		/*
629 		 * This is just an acknowledgement
630 		 * of our mpt_send_event_request.
631 		 */
632 		break;
633 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
634 		break;
635 	default:
636 		return (0);
637 		break;
638 	}
639 	return (1);
640 }
641 
642 static void
643 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
644 		   MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
645 {
646 	MSG_EVENT_ACK *ackp;
647 
648 	ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
649 	memset(ackp, 0, sizeof (*ackp));
650 	ackp->Function = MPI_FUNCTION_EVENT_ACK;
651 	ackp->Event = msg->Event;
652 	ackp->EventContext = msg->EventContext;
653 	ackp->MsgContext = context;
654 	mpt_check_doorbell(mpt);
655 	mpt_send_cmd(mpt, ack_req);
656 }
657 
658 /***************************** Interrupt Handling *****************************/
659 void
660 mpt_intr(void *arg)
661 {
662 	struct mpt_softc *mpt;
663 	uint32_t reply_desc;
664 	uint32_t last_reply_desc = MPT_REPLY_EMPTY;
665 	int ntrips = 0;
666 
667 	mpt = (struct mpt_softc *)arg;
668 	while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
669 		request_t	  *req;
670 		MSG_DEFAULT_REPLY *reply_frame;
671 		uint32_t	   reply_baddr;
672 		uint32_t           ctxt_idx;
673 		u_int		   cb_index;
674 		u_int		   req_index;
675 		int		   free_rf;
676 
677 		if (reply_desc == last_reply_desc) {
678 			mpt_prt(mpt, "debounce reply_desc 0x%x\n", reply_desc);
679 			if (ntrips++ == 1000) {
680 				break;
681 			}
682 			continue;
683 		}
684 		last_reply_desc = reply_desc;
685 
686 		req = NULL;
687 		reply_frame = NULL;
688 		reply_baddr = 0;
689 		if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
690 			u_int offset;
691 			/*
692 			 * Insure that the reply frame is coherent.
693 			 */
694 			reply_baddr = MPT_REPLY_BADDR(reply_desc);
695 			offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
696 			bus_dmamap_sync_range(mpt->reply_dmat,
697 			    mpt->reply_dmap, offset, MPT_REPLY_SIZE,
698 			    BUS_DMASYNC_POSTREAD);
699 			reply_frame = MPT_REPLY_OTOV(mpt, offset);
700 			ctxt_idx = le32toh(reply_frame->MsgContext);
701 		} else {
702 			uint32_t type;
703 
704 			type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
705 			ctxt_idx = reply_desc;
706 			mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
707 				    reply_desc);
708 
709 			switch (type) {
710 			case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
711 				ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
712 				break;
713 			case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
714 				ctxt_idx = GET_IO_INDEX(reply_desc);
715 				if (mpt->tgt_cmd_ptrs == NULL) {
716 					mpt_prt(mpt,
717 					    "mpt_intr: no target cmd ptrs\n");
718 					reply_desc = MPT_REPLY_EMPTY;
719 					break;
720 				}
721 				if (ctxt_idx >= mpt->tgt_cmds_allocated) {
722 					mpt_prt(mpt,
723 					    "mpt_intr: bad tgt cmd ctxt %u\n",
724 					    ctxt_idx);
725 					reply_desc = MPT_REPLY_EMPTY;
726 					ntrips = 1000;
727 					break;
728 				}
729 				req = mpt->tgt_cmd_ptrs[ctxt_idx];
730 				if (req == NULL) {
731 					mpt_prt(mpt, "no request backpointer "
732 					    "at index %u", ctxt_idx);
733 					reply_desc = MPT_REPLY_EMPTY;
734 					ntrips = 1000;
735 					break;
736 				}
737 				/*
738 				 * Reformulate ctxt_idx to be just as if
739 				 * it were another type of context reply
740 				 * so the code below will find the request
741 				 * via indexing into the pool.
742 				 */
743 				ctxt_idx =
744 				    req->index | mpt->scsi_tgt_handler_id;
745 				req = NULL;
746 				break;
747 			case MPI_CONTEXT_REPLY_TYPE_LAN:
748 				mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
749 				    reply_desc);
750 				reply_desc = MPT_REPLY_EMPTY;
751 				break;
752 			default:
753 				mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
754 				reply_desc = MPT_REPLY_EMPTY;
755 				break;
756 			}
757 			if (reply_desc == MPT_REPLY_EMPTY) {
758 				if (ntrips++ > 1000) {
759 					break;
760 				}
761 				continue;
762 			}
763 		}
764 
765 		cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
766 		req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
767 		if (req_index < MPT_MAX_REQUESTS(mpt)) {
768 			req = &mpt->request_pool[req_index];
769 		} else {
770 			mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
771 			    " 0x%x)\n", req_index, reply_desc);
772 		}
773 
774 		free_rf = mpt_reply_handlers[cb_index](mpt, req,
775 		    reply_desc, reply_frame);
776 
777 		if (reply_frame != NULL && free_rf) {
778 			mpt_free_reply(mpt, reply_baddr);
779 		}
780 
781 		/*
782 		 * If we got ourselves disabled, don't get stuck in a loop
783 		 */
784 		if (mpt->disabled) {
785 			mpt_disable_ints(mpt);
786 			break;
787 		}
788 		if (ntrips++ > 1000) {
789 			break;
790 		}
791 	}
792 }
793 
794 /******************************* Error Recovery *******************************/
795 void
796 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
797 			    u_int iocstatus)
798 {
799 	MSG_DEFAULT_REPLY  ioc_status_frame;
800 	request_t	  *req;
801 
802 	memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
803 	ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
804 	ioc_status_frame.IOCStatus = iocstatus;
805 	while((req = TAILQ_FIRST(chain)) != NULL) {
806 		MSG_REQUEST_HEADER *msg_hdr;
807 		u_int		    cb_index;
808 
809 		TAILQ_REMOVE(chain, req, links);
810 		msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
811 		ioc_status_frame.Function = msg_hdr->Function;
812 		ioc_status_frame.MsgContext = msg_hdr->MsgContext;
813 		cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
814 		mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
815 		    &ioc_status_frame);
816 	}
817 }
818 
819 /********************************* Diagnostics ********************************/
820 /*
821  * Perform a diagnostic dump of a reply frame.
822  */
823 void
824 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
825 {
826 	mpt_prt(mpt, "Address Reply:\n");
827 	mpt_print_reply(reply_frame);
828 }
829 
830 /******************************* Doorbell Access ******************************/
831 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
832 static __inline  uint32_t mpt_rd_intr(struct mpt_softc *mpt);
833 
834 static __inline uint32_t
835 mpt_rd_db(struct mpt_softc *mpt)
836 {
837 	return mpt_read(mpt, MPT_OFFSET_DOORBELL);
838 }
839 
840 static __inline uint32_t
841 mpt_rd_intr(struct mpt_softc *mpt)
842 {
843 	return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
844 }
845 
846 /* Busy wait for a door bell to be read by IOC */
847 static int
848 mpt_wait_db_ack(struct mpt_softc *mpt)
849 {
850 	int i;
851 	for (i=0; i < MPT_MAX_WAIT; i++) {
852 		if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
853 			maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
854 			return (MPT_OK);
855 		}
856 		DELAY(200);
857 	}
858 	return (MPT_FAIL);
859 }
860 
861 /* Busy wait for a door bell interrupt */
862 static int
863 mpt_wait_db_int(struct mpt_softc *mpt)
864 {
865 	int i;
866 	for (i=0; i < MPT_MAX_WAIT; i++) {
867 		if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
868 			maxwait_int = i > maxwait_int ? i : maxwait_int;
869 			return MPT_OK;
870 		}
871 		DELAY(100);
872 	}
873 	return (MPT_FAIL);
874 }
875 
876 /* Wait for IOC to transition to a give state */
877 void
878 mpt_check_doorbell(struct mpt_softc *mpt)
879 {
880 	uint32_t db = mpt_rd_db(mpt);
881 	if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
882 		mpt_prt(mpt, "Device not running\n");
883 		mpt_print_db(db);
884 	}
885 }
886 
887 /* Wait for IOC to transition to a give state */
888 static int
889 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
890 {
891 	int i;
892 
893 	for (i = 0; i < MPT_MAX_WAIT; i++) {
894 		uint32_t db = mpt_rd_db(mpt);
895 		if (MPT_STATE(db) == state) {
896 			maxwait_state = i > maxwait_state ? i : maxwait_state;
897 			return (MPT_OK);
898 		}
899 		DELAY(100);
900 	}
901 	return (MPT_FAIL);
902 }
903 
904 
905 /************************* Intialization/Configuration ************************/
906 static int mpt_download_fw(struct mpt_softc *mpt);
907 
908 /* Issue the reset COMMAND to the IOC */
909 static int
910 mpt_soft_reset(struct mpt_softc *mpt)
911 {
912 	mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
913 
914 	/* Have to use hard reset if we are not in Running state */
915 	if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
916 		mpt_prt(mpt, "soft reset failed: device not running\n");
917 		return (MPT_FAIL);
918 	}
919 
920 	/* If door bell is in use we don't have a chance of getting
921 	 * a word in since the IOC probably crashed in message
922 	 * processing. So don't waste our time.
923 	 */
924 	if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
925 		mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
926 		return (MPT_FAIL);
927 	}
928 
929 	/* Send the reset request to the IOC */
930 	mpt_write(mpt, MPT_OFFSET_DOORBELL,
931 	    MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
932 	if (mpt_wait_db_ack(mpt) != MPT_OK) {
933 		mpt_prt(mpt, "soft reset failed: ack timeout\n");
934 		return (MPT_FAIL);
935 	}
936 
937 	/* Wait for the IOC to reload and come out of reset state */
938 	if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
939 		mpt_prt(mpt, "soft reset failed: device did not restart\n");
940 		return (MPT_FAIL);
941 	}
942 
943 	return MPT_OK;
944 }
945 
946 static int
947 mpt_enable_diag_mode(struct mpt_softc *mpt)
948 {
949 	int try;
950 
951 	try = 20;
952 	while (--try) {
953 
954 		if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
955 			break;
956 
957 		/* Enable diagnostic registers */
958 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
959 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
960 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
961 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
962 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
963 		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
964 
965 		DELAY(100000);
966 	}
967 	if (try == 0)
968 		return (EIO);
969 	return (0);
970 }
971 
972 static void
973 mpt_disable_diag_mode(struct mpt_softc *mpt)
974 {
975 	mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
976 }
977 
978 /* This is a magic diagnostic reset that resets all the ARM
979  * processors in the chip.
980  */
981 static void
982 mpt_hard_reset(struct mpt_softc *mpt)
983 {
984 	int error;
985 	int wait;
986 	uint32_t diagreg;
987 
988 	mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
989 
990 	error = mpt_enable_diag_mode(mpt);
991 	if (error) {
992 		mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
993 		mpt_prt(mpt, "Trying to reset anyway.\n");
994 	}
995 
996 	diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
997 
998 	/*
999 	 * This appears to be a workaround required for some
1000 	 * firmware or hardware revs.
1001 	 */
1002 	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1003 	DELAY(1000);
1004 
1005 	/* Diag. port is now active so we can now hit the reset bit */
1006 	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1007 
1008         /*
1009          * Ensure that the reset has finished.  We delay 1ms
1010          * prior to reading the register to make sure the chip
1011          * has sufficiently completed its reset to handle register
1012          * accesses.
1013          */
1014 	wait = 5000;
1015 	do {
1016 		DELAY(1000);
1017 		diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1018 	} while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1019 
1020 	if (wait == 0) {
1021 		mpt_prt(mpt, "WARNING - Failed hard reset! "
1022 			"Trying to initialize anyway.\n");
1023 	}
1024 
1025 	/*
1026 	 * If we have firmware to download, it must be loaded before
1027 	 * the controller will become operational.  Do so now.
1028 	 */
1029 	if (mpt->fw_image != NULL) {
1030 
1031 		error = mpt_download_fw(mpt);
1032 
1033 		if (error) {
1034 			mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1035 			mpt_prt(mpt, "Trying to initialize anyway.\n");
1036 		}
1037 	}
1038 
1039 	/*
1040 	 * Reseting the controller should have disabled write
1041 	 * access to the diagnostic registers, but disable
1042 	 * manually to be sure.
1043 	 */
1044 	mpt_disable_diag_mode(mpt);
1045 }
1046 
1047 static void
1048 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1049 {
1050 	/*
1051 	 * Complete all pending requests with a status
1052 	 * appropriate for an IOC reset.
1053 	 */
1054 	mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1055 				   MPI_IOCSTATUS_INVALID_STATE);
1056 }
1057 
1058 
1059 /*
1060  * Reset the IOC when needed. Try software command first then if needed
1061  * poke at the magic diagnostic reset. Note that a hard reset resets
1062  * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1063  * fouls up the PCI configuration registers.
1064  */
1065 int
1066 mpt_reset(struct mpt_softc *mpt, int reinit)
1067 {
1068 	struct	mpt_personality *pers;
1069 	int	ret;
1070 	int	retry_cnt = 0;
1071 
1072 	/*
1073 	 * Try a soft reset. If that fails, get out the big hammer.
1074 	 */
1075  again:
1076 	if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1077 		int	cnt;
1078 		for (cnt = 0; cnt < 5; cnt++) {
1079 			/* Failed; do a hard reset */
1080 			mpt_hard_reset(mpt);
1081 
1082 			/*
1083 			 * Wait for the IOC to reload
1084 			 * and come out of reset state
1085 			 */
1086 			ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1087 			if (ret == MPT_OK) {
1088 				break;
1089 			}
1090 			/*
1091 			 * Okay- try to check again...
1092 			 */
1093 			ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1094 			if (ret == MPT_OK) {
1095 				break;
1096 			}
1097 			mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1098 			    retry_cnt, cnt);
1099 		}
1100 	}
1101 
1102 	if (retry_cnt == 0) {
1103 		/*
1104 		 * Invoke reset handlers.  We bump the reset count so
1105 		 * that mpt_wait_req() understands that regardless of
1106 		 * the specified wait condition, it should stop its wait.
1107 		 */
1108 		mpt->reset_cnt++;
1109 		MPT_PERS_FOREACH(mpt, pers)
1110 			pers->reset(mpt, ret);
1111 	}
1112 
1113 	if (reinit) {
1114 		ret = mpt_enable_ioc(mpt, 1);
1115 		if (ret == MPT_OK) {
1116 			mpt_enable_ints(mpt);
1117 		}
1118 	}
1119 	if (ret != MPT_OK && retry_cnt++ < 2) {
1120 		goto again;
1121 	}
1122 	return ret;
1123 }
1124 
1125 /* Return a command buffer to the free queue */
1126 void
1127 mpt_free_request(struct mpt_softc *mpt, request_t *req)
1128 {
1129 	request_t *nxt;
1130 	struct mpt_evtf_record *record;
1131 	uint32_t reply_baddr;
1132 
1133 	if (req == NULL || req != &mpt->request_pool[req->index]) {
1134 		panic("mpt_free_request bad req ptr\n");
1135 		return;
1136 	}
1137 	if ((nxt = req->chain) != NULL) {
1138 		req->chain = NULL;
1139 		mpt_free_request(mpt, nxt);	/* NB: recursion */
1140 	}
1141 
1142 	KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1143 	KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1144 
1145 	req->ccb = NULL;
1146 
1147 	if (LIST_EMPTY(&mpt->ack_frames)) {
1148 		/*
1149 		 * Insert free ones at the tail
1150 		 */
1151 		req->serno = 0;
1152 		req->state = REQ_STATE_FREE;
1153 		TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1154 		if (mpt->getreqwaiter != 0) {
1155 			mpt->getreqwaiter = 0;
1156 			wakeup(&mpt->request_free_list);
1157 		}
1158 		return;
1159 	}
1160 
1161 	/*
1162 	 * Process an ack frame deferred due to resource shortage.
1163 	 */
1164 	record = LIST_FIRST(&mpt->ack_frames);
1165 	LIST_REMOVE(record, links);
1166 	req->state = REQ_STATE_ALLOCATED;
1167 	if ((req->serno = mpt->sequence++) == 0) {
1168 		req->serno = mpt->sequence++;
1169 	}
1170 	mpt_send_event_ack(mpt, req, &record->reply, record->context);
1171 	reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1172 		    + (mpt->reply_phys & 0xFFFFFFFF);
1173 	mpt_free_reply(mpt, reply_baddr);
1174 }
1175 
1176 /* Get a command buffer from the free queue */
1177 request_t *
1178 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1179 {
1180 	request_t *req;
1181 
1182 retry:
1183 	req = TAILQ_FIRST(&mpt->request_free_list);
1184 	if (req != NULL) {
1185 		KASSERT(req == &mpt->request_pool[req->index],
1186 		    ("mpt_get_request: corrupted request free list\n"));
1187 		KASSERT(req->state == REQ_STATE_FREE,
1188 		    ("req not free on free list %x", req->state));
1189 		TAILQ_REMOVE(&mpt->request_free_list, req, links);
1190 		req->state = REQ_STATE_ALLOCATED;
1191 		req->chain = NULL;
1192 		if ((req->serno = mpt->sequence++) == 0) {
1193 			req->serno = mpt->sequence++;
1194 		}
1195 	} else if (sleep_ok != 0) {
1196 		mpt->getreqwaiter = 1;
1197 		mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1198 		goto retry;
1199 	}
1200 	return (req);
1201 }
1202 
1203 /* Pass the command to the IOC */
1204 void
1205 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1206 {
1207 	uint32_t *pReq;
1208 
1209 	pReq = req->req_vbuf;
1210 	if (mpt->verbose > MPT_PRT_TRACE) {
1211 		int offset;
1212 #if __FreeBSD_version >= 500000
1213 		mpt_prt(mpt, "Send Request %d (%jx):",
1214 		    req->index, (uintmax_t) req->req_pbuf);
1215 #else
1216 		mpt_prt(mpt, "Send Request %d (%llx):",
1217 		    req->index, (unsigned long long) req->req_pbuf);
1218 #endif
1219 		for (offset = 0; offset < mpt->request_frame_size; offset++) {
1220 			if ((offset & 0x7) == 0) {
1221 				mpt_prtc(mpt, "\n");
1222 				mpt_prt(mpt, " ");
1223 			}
1224 			mpt_prtc(mpt, " %08x", pReq[offset]);
1225 		}
1226 		mpt_prtc(mpt, "\n");
1227 	}
1228 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1229 	    BUS_DMASYNC_PREWRITE);
1230 	req->state |= REQ_STATE_QUEUED;
1231 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1232 	mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1233 }
1234 
1235 /*
1236  * Wait for a request to complete.
1237  *
1238  * Inputs:
1239  *	mpt		softc of controller executing request
1240  *	req		request to wait for
1241  *	sleep_ok	nonzero implies may sleep in this context
1242  *	time_ms		timeout in ms.  0 implies no timeout.
1243  *
1244  * Return Values:
1245  *	0		Request completed
1246  *	non-0		Timeout fired before request completion.
1247  */
1248 int
1249 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1250 	     mpt_req_state_t state, mpt_req_state_t mask,
1251 	     int sleep_ok, int time_ms)
1252 {
1253 	int   error;
1254 	int   timeout;
1255 	u_int saved_cnt;
1256 
1257 	/*
1258 	 * timeout is in ms.  0 indicates infinite wait.
1259 	 * Convert to ticks or 500us units depending on
1260 	 * our sleep mode.
1261 	 */
1262 	if (sleep_ok != 0) {
1263 		timeout = (time_ms * hz) / 1000;
1264 	} else {
1265 		timeout = time_ms * 2;
1266 	}
1267 	req->state |= REQ_STATE_NEED_WAKEUP;
1268 	mask &= ~REQ_STATE_NEED_WAKEUP;
1269 	saved_cnt = mpt->reset_cnt;
1270 	while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1271 		if (sleep_ok != 0) {
1272 			error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1273 			if (error == EWOULDBLOCK) {
1274 				timeout = 0;
1275 				break;
1276 			}
1277 		} else {
1278 			if (time_ms != 0 && --timeout == 0) {
1279 				break;
1280 			}
1281 			DELAY(500);
1282 			mpt_intr(mpt);
1283 		}
1284 	}
1285 	req->state &= ~REQ_STATE_NEED_WAKEUP;
1286 	if (mpt->reset_cnt != saved_cnt) {
1287 		return (EIO);
1288 	}
1289 	if (time_ms && timeout <= 0) {
1290 		MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1291 		mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1292 		return (ETIMEDOUT);
1293 	}
1294 	return (0);
1295 }
1296 
1297 /*
1298  * Send a command to the IOC via the handshake register.
1299  *
1300  * Only done at initialization time and for certain unusual
1301  * commands such as device/bus reset as specified by LSI.
1302  */
1303 int
1304 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1305 {
1306 	int i;
1307 	uint32_t data, *data32;
1308 
1309 	/* Check condition of the IOC */
1310 	data = mpt_rd_db(mpt);
1311 	if ((MPT_STATE(data) != MPT_DB_STATE_READY
1312 	  && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1313 	  && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1314 	 || MPT_DB_IS_IN_USE(data)) {
1315 		mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1316 		mpt_print_db(data);
1317 		return (EBUSY);
1318 	}
1319 
1320 	/* We move things in 32 bit chunks */
1321 	len = (len + 3) >> 2;
1322 	data32 = cmd;
1323 
1324 	/* Clear any left over pending doorbell interupts */
1325 	if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1326 		mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1327 
1328 	/*
1329 	 * Tell the handshake reg. we are going to send a command
1330          * and how long it is going to be.
1331 	 */
1332 	data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1333 	    (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1334 	mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1335 
1336 	/* Wait for the chip to notice */
1337 	if (mpt_wait_db_int(mpt) != MPT_OK) {
1338 		mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n");
1339 		return (ETIMEDOUT);
1340 	}
1341 
1342 	/* Clear the interrupt */
1343 	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1344 
1345 	if (mpt_wait_db_ack(mpt) != MPT_OK) {
1346 		mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n");
1347 		return (ETIMEDOUT);
1348 	}
1349 
1350 	/* Send the command */
1351 	for (i = 0; i < len; i++) {
1352 		mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++);
1353 		if (mpt_wait_db_ack(mpt) != MPT_OK) {
1354 			mpt_prt(mpt,
1355 				"mpt_send_handshake_cmd timeout! index = %d\n",
1356 				i);
1357 			return (ETIMEDOUT);
1358 		}
1359 	}
1360 	return MPT_OK;
1361 }
1362 
1363 /* Get the response from the handshake register */
1364 int
1365 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1366 {
1367 	int left, reply_left;
1368 	u_int16_t *data16;
1369 	MSG_DEFAULT_REPLY *hdr;
1370 
1371 	/* We move things out in 16 bit chunks */
1372 	reply_len >>= 1;
1373 	data16 = (u_int16_t *)reply;
1374 
1375 	hdr = (MSG_DEFAULT_REPLY *)reply;
1376 
1377 	/* Get first word */
1378 	if (mpt_wait_db_int(mpt) != MPT_OK) {
1379 		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1380 		return ETIMEDOUT;
1381 	}
1382 	*data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1383 	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1384 
1385 	/* Get Second Word */
1386 	if (mpt_wait_db_int(mpt) != MPT_OK) {
1387 		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1388 		return ETIMEDOUT;
1389 	}
1390 	*data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1391 	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1392 
1393 	/*
1394 	 * With the second word, we can now look at the length.
1395 	 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1396 	 */
1397 	if ((reply_len >> 1) != hdr->MsgLength &&
1398 	    (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1399 #if __FreeBSD_version >= 500000
1400 		mpt_prt(mpt, "reply length does not match message length: "
1401 			"got %x; expected %zx for function %x\n",
1402 			hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1403 #else
1404 		mpt_prt(mpt, "reply length does not match message length: "
1405 			"got %x; expected %x for function %x\n",
1406 			hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1407 #endif
1408 	}
1409 
1410 	/* Get rest of the reply; but don't overflow the provided buffer */
1411 	left = (hdr->MsgLength << 1) - 2;
1412 	reply_left =  reply_len - 2;
1413 	while (left--) {
1414 		u_int16_t datum;
1415 
1416 		if (mpt_wait_db_int(mpt) != MPT_OK) {
1417 			mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1418 			return ETIMEDOUT;
1419 		}
1420 		datum = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1421 
1422 		if (reply_left-- > 0)
1423 			*data16++ = datum & MPT_DB_DATA_MASK;
1424 
1425 		mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1426 	}
1427 
1428 	/* One more wait & clear at the end */
1429 	if (mpt_wait_db_int(mpt) != MPT_OK) {
1430 		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1431 		return ETIMEDOUT;
1432 	}
1433 	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1434 
1435 	if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1436 		if (mpt->verbose >= MPT_PRT_TRACE)
1437 			mpt_print_reply(hdr);
1438 		return (MPT_FAIL | hdr->IOCStatus);
1439 	}
1440 
1441 	return (0);
1442 }
1443 
1444 static int
1445 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1446 {
1447 	MSG_IOC_FACTS f_req;
1448 	int error;
1449 
1450 	memset(&f_req, 0, sizeof f_req);
1451 	f_req.Function = MPI_FUNCTION_IOC_FACTS;
1452 	f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1453 	error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1454 	if (error)
1455 		return(error);
1456 	error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1457 	return (error);
1458 }
1459 
1460 static int
1461 mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp)
1462 {
1463 	MSG_PORT_FACTS f_req;
1464 	int error;
1465 
1466 	/* XXX: Only getting PORT FACTS for Port 0 */
1467 	memset(&f_req, 0, sizeof f_req);
1468 	f_req.Function = MPI_FUNCTION_PORT_FACTS;
1469 	f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1470 	error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1471 	if (error)
1472 		return(error);
1473 	error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1474 	return (error);
1475 }
1476 
1477 /*
1478  * Send the initialization request. This is where we specify how many
1479  * SCSI busses and how many devices per bus we wish to emulate.
1480  * This is also the command that specifies the max size of the reply
1481  * frames from the IOC that we will be allocating.
1482  */
1483 static int
1484 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1485 {
1486 	int error = 0;
1487 	MSG_IOC_INIT init;
1488 	MSG_IOC_INIT_REPLY reply;
1489 
1490 	memset(&init, 0, sizeof init);
1491 	init.WhoInit = who;
1492 	init.Function = MPI_FUNCTION_IOC_INIT;
1493 	if (mpt->is_fc) {
1494 		init.MaxDevices = 255;
1495 	} else if (mpt->is_sas) {
1496 		init.MaxDevices = mpt->mpt_max_devices;
1497 	} else {
1498 		init.MaxDevices = 16;
1499 	}
1500 	init.MaxBuses = 1;
1501 
1502 	init.MsgVersion = htole16(MPI_VERSION);
1503 	init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1504 	init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1505 	init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1506 
1507 	if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1508 		return(error);
1509 	}
1510 
1511 	error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1512 	return (error);
1513 }
1514 
1515 
1516 /*
1517  * Utiltity routine to read configuration headers and pages
1518  */
1519 int
1520 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action,
1521 		  u_int PageVersion, u_int PageLength, u_int PageNumber,
1522 		  u_int PageType, uint32_t PageAddress, bus_addr_t addr,
1523 		  bus_size_t len, int sleep_ok, int timeout_ms)
1524 {
1525 	MSG_CONFIG *cfgp;
1526 	SGE_SIMPLE32 *se;
1527 
1528 	cfgp = req->req_vbuf;
1529 	memset(cfgp, 0, sizeof *cfgp);
1530 	cfgp->Action = Action;
1531 	cfgp->Function = MPI_FUNCTION_CONFIG;
1532 	cfgp->Header.PageVersion = PageVersion;
1533 	cfgp->Header.PageLength = PageLength;
1534 	cfgp->Header.PageNumber = PageNumber;
1535 	cfgp->Header.PageType = PageType;
1536 	cfgp->PageAddress = PageAddress;
1537 	se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1538 	se->Address = addr;
1539 	MPI_pSGE_SET_LENGTH(se, len);
1540 	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1541 	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1542 	    MPI_SGE_FLAGS_END_OF_LIST |
1543 	    ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1544 	  || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1545 	   ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1546 	cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1547 
1548 	mpt_check_doorbell(mpt);
1549 	mpt_send_cmd(mpt, req);
1550 	return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1551 			     sleep_ok, timeout_ms));
1552 }
1553 
1554 
1555 int
1556 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1557 		    uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1558 		    int sleep_ok, int timeout_ms)
1559 {
1560 	request_t  *req;
1561 	MSG_CONFIG *cfgp;
1562 	int	    error;
1563 
1564 	req = mpt_get_request(mpt, sleep_ok);
1565 	if (req == NULL) {
1566 		mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1567 		return (ENOMEM);
1568 	}
1569 
1570 	error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER,
1571 				  /*PageVersion*/0, /*PageLength*/0, PageNumber,
1572 				  PageType, PageAddress, /*addr*/0, /*len*/0,
1573 				  sleep_ok, timeout_ms);
1574 	if (error != 0) {
1575 		mpt_free_request(mpt, req);
1576 		mpt_prt(mpt, "read_cfg_header timed out\n");
1577 		return (ETIMEDOUT);
1578 	}
1579 
1580         switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1581 	case MPI_IOCSTATUS_SUCCESS:
1582 		cfgp = req->req_vbuf;
1583 		bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1584 		error = 0;
1585 		break;
1586 	case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1587 		mpt_lprt(mpt, MPT_PRT_DEBUG,
1588 		    "Invalid Page Type %d Number %d Addr 0x%0x\n",
1589 		    PageType, PageNumber, PageAddress);
1590 		error = EINVAL;
1591 		break;
1592 	default:
1593 		mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1594 			req->IOCStatus);
1595 		error = EIO;
1596 		break;
1597 	}
1598 	mpt_free_request(mpt, req);
1599 	return (error);
1600 }
1601 
1602 int
1603 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1604 		  CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1605 		  int timeout_ms)
1606 {
1607 	request_t    *req;
1608 	int	      error;
1609 
1610 	req = mpt_get_request(mpt, sleep_ok);
1611 	if (req == NULL) {
1612 		mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1613 		return (-1);
1614 	}
1615 
1616 	error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1617 				  hdr->PageLength, hdr->PageNumber,
1618 				  hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1619 				  PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1620 				  len, sleep_ok, timeout_ms);
1621 	if (error != 0) {
1622 		mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1623 		return (-1);
1624 	}
1625 
1626 	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1627 		mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1628 			req->IOCStatus);
1629 		mpt_free_request(mpt, req);
1630 		return (-1);
1631 	}
1632 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1633 	    BUS_DMASYNC_POSTREAD);
1634 	memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1635 	mpt_free_request(mpt, req);
1636 	return (0);
1637 }
1638 
1639 int
1640 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1641 		   CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1642 		   int timeout_ms)
1643 {
1644 	request_t    *req;
1645 	u_int	      hdr_attr;
1646 	int	      error;
1647 
1648 	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1649 	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1650 	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1651 		mpt_prt(mpt, "page type 0x%x not changeable\n",
1652 			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1653 		return (-1);
1654 	}
1655 	hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK,
1656 
1657 	req = mpt_get_request(mpt, sleep_ok);
1658 	if (req == NULL)
1659 		return (-1);
1660 
1661 	memcpy(((caddr_t)req->req_vbuf)+MPT_RQSL(mpt), hdr, len);
1662 	/* Restore stripped out attributes */
1663 	hdr->PageType |= hdr_attr;
1664 
1665 	error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1666 				  hdr->PageLength, hdr->PageNumber,
1667 				  hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1668 				  PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1669 				  len, sleep_ok, timeout_ms);
1670 	if (error != 0) {
1671 		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1672 		return (-1);
1673 	}
1674 
1675         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1676 		mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1677 			req->IOCStatus);
1678 		mpt_free_request(mpt, req);
1679 		return (-1);
1680 	}
1681 	mpt_free_request(mpt, req);
1682 	return (0);
1683 }
1684 
1685 /*
1686  * Read IOC configuration information
1687  */
1688 static int
1689 mpt_read_config_info_ioc(struct mpt_softc *mpt)
1690 {
1691 	CONFIG_PAGE_HEADER hdr;
1692 	struct mpt_raid_volume *mpt_raid;
1693 	int rv;
1694 	int i;
1695 	size_t len;
1696 
1697 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1698 				 /*PageNumber*/2, /*PageAddress*/0, &hdr,
1699 				 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1700 	/*
1701 	 * If it's an invalid page, so what? Not a supported function....
1702 	 */
1703 	if (rv == EINVAL)
1704 		return (0);
1705 	if (rv)
1706 		return (rv);
1707 
1708 #if __FreeBSD_version >= 500000
1709 	mpt_lprt(mpt, MPT_PRT_DEBUG,  "IOC Page 2 Header: ver %x, len %zx, "
1710 		 "num %x, type %x\n", hdr.PageVersion,
1711 		 hdr.PageLength * sizeof(uint32_t),
1712 		 hdr.PageNumber, hdr.PageType);
1713 #else
1714 	mpt_lprt(mpt, MPT_PRT_DEBUG,  "IOC Page 2 Header: ver %x, len %z, "
1715 		 "num %x, type %x\n", hdr.PageVersion,
1716 		 hdr.PageLength * sizeof(uint32_t),
1717 		 hdr.PageNumber, hdr.PageType);
1718 #endif
1719 
1720 	len = hdr.PageLength * sizeof(uint32_t);
1721 	mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1722 	if (mpt->ioc_page2 == NULL)
1723 		return (ENOMEM);
1724 	memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1725 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1726 				   &mpt->ioc_page2->Header, len,
1727 				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1728 	if (rv) {
1729 		mpt_prt(mpt, "failed to read IOC Page 2\n");
1730 	} else if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1731 		uint32_t mask;
1732 
1733 		mpt_prt(mpt, "Capabilities: (");
1734 		for (mask = 1; mask != 0; mask <<= 1) {
1735 			if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0)
1736 				continue;
1737 
1738 			switch (mask) {
1739 			case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1740 				mpt_prtc(mpt, " RAID-0");
1741 				break;
1742 			case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1743 				mpt_prtc(mpt, " RAID-1E");
1744 				break;
1745 			case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1746 				mpt_prtc(mpt, " RAID-1");
1747 				break;
1748 			case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1749 				mpt_prtc(mpt, " SES");
1750 				break;
1751 			case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1752 				mpt_prtc(mpt, " SAFTE");
1753 				break;
1754 			case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1755 				mpt_prtc(mpt, " Multi-Channel-Arrays");
1756 			default:
1757 				break;
1758 			}
1759 		}
1760 		mpt_prtc(mpt, " )\n");
1761 		if ((mpt->ioc_page2->CapabilitiesFlags
1762 		   & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1763 		    | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1764 		    | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1765 			mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1766 				mpt->ioc_page2->NumActiveVolumes,
1767 				mpt->ioc_page2->NumActiveVolumes != 1
1768 			      ? "s " : " ",
1769 				mpt->ioc_page2->MaxVolumes);
1770 			mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1771 				mpt->ioc_page2->NumActivePhysDisks,
1772 				mpt->ioc_page2->NumActivePhysDisks != 1
1773 			      ? "s " : " ",
1774 				mpt->ioc_page2->MaxPhysDisks);
1775 		}
1776 	}
1777 
1778 	len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1779 	mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT);
1780 	if (mpt->raid_volumes == NULL) {
1781 		mpt_prt(mpt, "Could not allocate RAID volume data\n");
1782 	} else {
1783 		memset(mpt->raid_volumes, 0, len);
1784 	}
1785 
1786 	/*
1787 	 * Copy critical data out of ioc_page2 so that we can
1788 	 * safely refresh the page without windows of unreliable
1789 	 * data.
1790 	 */
1791 	mpt->raid_max_volumes =  mpt->ioc_page2->MaxVolumes;
1792 
1793 	len = sizeof(*mpt->raid_volumes->config_page)
1794 	    + (sizeof(RAID_VOL0_PHYS_DISK)*(mpt->ioc_page2->MaxPhysDisks - 1));
1795 	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1796 		mpt_raid = &mpt->raid_volumes[i];
1797 		mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT);
1798 		if (mpt_raid->config_page == NULL) {
1799 			mpt_prt(mpt, "Could not allocate RAID page data\n");
1800 			break;
1801 		}
1802 		memset(mpt_raid->config_page, 0, len);
1803 	}
1804 	mpt->raid_page0_len = len;
1805 
1806 	len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1807 	mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT);
1808 	if (mpt->raid_disks == NULL) {
1809 		mpt_prt(mpt, "Could not allocate RAID disk data\n");
1810 	} else {
1811 		memset(mpt->raid_disks, 0, len);
1812 	}
1813 
1814 	mpt->raid_max_disks =  mpt->ioc_page2->MaxPhysDisks;
1815 
1816 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1817 				 /*PageNumber*/3, /*PageAddress*/0, &hdr,
1818 				 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1819 	if (rv)
1820 		return (EIO);
1821 
1822 	mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
1823 		 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
1824 
1825 	if (mpt->ioc_page3 != NULL)
1826 		free(mpt->ioc_page3, M_DEVBUF);
1827 	len = hdr.PageLength * sizeof(uint32_t);
1828 	mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1829 	if (mpt->ioc_page3 == NULL)
1830 		return (-1);
1831 	memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
1832 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1833 				   &mpt->ioc_page3->Header, len,
1834 				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1835 	if (rv) {
1836 		mpt_prt(mpt, "failed to read IOC Page 3\n");
1837 	}
1838 
1839 	mpt_raid_wakeup(mpt);
1840 
1841 	return (0);
1842 }
1843 
1844 /*
1845  * Enable IOC port
1846  */
1847 static int
1848 mpt_send_port_enable(struct mpt_softc *mpt, int port)
1849 {
1850 	request_t	*req;
1851 	MSG_PORT_ENABLE *enable_req;
1852 	int		 error;
1853 
1854 	req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1855 	if (req == NULL)
1856 		return (-1);
1857 
1858 	enable_req = req->req_vbuf;
1859 	memset(enable_req, 0,  MPT_RQSL(mpt));
1860 
1861 	enable_req->Function   = MPI_FUNCTION_PORT_ENABLE;
1862 	enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1863 	enable_req->PortNumber = port;
1864 
1865 	mpt_check_doorbell(mpt);
1866 	mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
1867 
1868 	mpt_send_cmd(mpt, req);
1869 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1870 	    /*sleep_ok*/FALSE,
1871 	    /*time_ms*/(mpt->is_sas || mpt->is_fc)? 30000 : 3000);
1872 	if (error != 0) {
1873 		mpt_prt(mpt, "port %d enable timed out\n", port);
1874 		return (-1);
1875 	}
1876 	mpt_free_request(mpt, req);
1877 	mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
1878 	return (0);
1879 }
1880 
1881 /*
1882  * Enable/Disable asynchronous event reporting.
1883  */
1884 static int
1885 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
1886 {
1887 	request_t *req;
1888 	MSG_EVENT_NOTIFY *enable_req;
1889 
1890 	req = mpt_get_request(mpt, FALSE);
1891 	if (req == NULL) {
1892 		return (ENOMEM);
1893 	}
1894 	enable_req = req->req_vbuf;
1895 	memset(enable_req, 0, sizeof *enable_req);
1896 
1897 	enable_req->Function   = MPI_FUNCTION_EVENT_NOTIFICATION;
1898 	enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
1899 	enable_req->Switch     = onoff;
1900 
1901 	mpt_check_doorbell(mpt);
1902 	mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
1903 	    onoff ? "en" : "dis");
1904 	/*
1905 	 * Send the command off, but don't wait for it.
1906 	 */
1907 	mpt_send_cmd(mpt, req);
1908 	return (0);
1909 }
1910 
1911 /*
1912  * Un-mask the interupts on the chip.
1913  */
1914 void
1915 mpt_enable_ints(struct mpt_softc *mpt)
1916 {
1917 	/* Unmask every thing except door bell int */
1918 	mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
1919 }
1920 
1921 /*
1922  * Mask the interupts on the chip.
1923  */
1924 void
1925 mpt_disable_ints(struct mpt_softc *mpt)
1926 {
1927 	/* Mask all interrupts */
1928 	mpt_write(mpt, MPT_OFFSET_INTR_MASK,
1929 	    MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
1930 }
1931 
1932 static void
1933 mpt_sysctl_attach(struct mpt_softc *mpt)
1934 {
1935 #if __FreeBSD_version >= 500000
1936 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1937 	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1938 
1939 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1940 		       "debug", CTLFLAG_RW, &mpt->verbose, 0,
1941 		       "Debugging/Verbose level");
1942 #endif
1943 }
1944 
1945 int
1946 mpt_attach(struct mpt_softc *mpt)
1947 {
1948 	struct mpt_personality *pers;
1949 	int i;
1950 	int error;
1951 
1952 	for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
1953 		pers = mpt_personalities[i];
1954 		if (pers == NULL) {
1955 			continue;
1956 		}
1957 		if (pers->probe(mpt) == 0) {
1958 			error = pers->attach(mpt);
1959 			if (error != 0) {
1960 				mpt_detach(mpt);
1961 				return (error);
1962 			}
1963 			mpt->mpt_pers_mask |= (0x1 << pers->id);
1964 			pers->use_count++;
1965 		}
1966 	}
1967 
1968 	/*
1969 	 * Now that we've attached everything, do the enable function
1970 	 * for all of the personalities. This allows the personalities
1971 	 * to do setups that are appropriate for them prior to enabling
1972 	 * any ports.
1973 	 */
1974 	for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
1975 		pers = mpt_personalities[i];
1976 		if (pers != NULL  && MPT_PERS_ATTACHED(pers, mpt) != 0) {
1977 			error = pers->enable(mpt);
1978 			if (error != 0) {
1979 				mpt_prt(mpt, "personality %s attached but would"
1980 				    " not enable (%d)\n", pers->name, error);
1981 				mpt_detach(mpt);
1982 				return (error);
1983 			}
1984 		}
1985 	}
1986 	return (0);
1987 }
1988 
1989 int
1990 mpt_shutdown(struct mpt_softc *mpt)
1991 {
1992 	struct mpt_personality *pers;
1993 
1994 	MPT_PERS_FOREACH_REVERSE(mpt, pers) {
1995 		pers->shutdown(mpt);
1996 	}
1997 	return (0);
1998 }
1999 
2000 int
2001 mpt_detach(struct mpt_softc *mpt)
2002 {
2003 	struct mpt_personality *pers;
2004 
2005 	MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2006 		pers->detach(mpt);
2007 		mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2008 		pers->use_count--;
2009 	}
2010 
2011 	return (0);
2012 }
2013 
2014 int
2015 mpt_core_load(struct mpt_personality *pers)
2016 {
2017 	int i;
2018 
2019 	/*
2020 	 * Setup core handlers and insert the default handler
2021 	 * into all "empty slots".
2022 	 */
2023 	for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2024 		mpt_reply_handlers[i] = mpt_default_reply_handler;
2025 	}
2026 
2027 	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2028 	    mpt_event_reply_handler;
2029 	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2030 	    mpt_config_reply_handler;
2031 	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2032 	    mpt_handshake_reply_handler;
2033 	return (0);
2034 }
2035 
2036 /*
2037  * Initialize per-instance driver data and perform
2038  * initial controller configuration.
2039  */
2040 int
2041 mpt_core_attach(struct mpt_softc *mpt)
2042 {
2043         int val;
2044 	int error;
2045 
2046 
2047 	LIST_INIT(&mpt->ack_frames);
2048 
2049 	/* Put all request buffers on the free list */
2050 	TAILQ_INIT(&mpt->request_pending_list);
2051 	TAILQ_INIT(&mpt->request_free_list);
2052 	TAILQ_INIT(&mpt->request_timeout_list);
2053 	for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2054 		request_t *req = &mpt->request_pool[val];
2055 		req->state = REQ_STATE_ALLOCATED;
2056 		mpt_free_request(mpt, req);
2057 	}
2058 
2059 	for (val = 0; val < MPT_MAX_LUNS; val++) {
2060 		STAILQ_INIT(&mpt->trt[val].atios);
2061 		STAILQ_INIT(&mpt->trt[val].inots);
2062 	}
2063 	STAILQ_INIT(&mpt->trt_wildcard.atios);
2064 	STAILQ_INIT(&mpt->trt_wildcard.inots);
2065 
2066 	mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2067 
2068 	mpt_sysctl_attach(mpt);
2069 
2070 	mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2071 	    mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2072 
2073 	error = mpt_configure_ioc(mpt);
2074 
2075 	return (error);
2076 }
2077 
2078 int
2079 mpt_core_enable(struct mpt_softc *mpt)
2080 {
2081 	/*
2082 	 * We enter with the IOC enabled, but async events
2083 	 * not enabled, ports not enabled and interrupts
2084 	 * not enabled.
2085 	 */
2086 
2087 	/*
2088 	 * Enable asynchronous event reporting- all personalities
2089 	 * have attached so that they should be able to now field
2090 	 * async events.
2091 	 */
2092 	mpt_send_event_request(mpt, 1);
2093 
2094 	/*
2095 	 * Catch any pending interrupts
2096 	 *
2097 	 * This seems to be crucial- otherwise
2098 	 * the portenable below times out.
2099 	 */
2100 	mpt_intr(mpt);
2101 
2102 	/*
2103 	 * Enable Interrupts
2104 	 */
2105 	mpt_enable_ints(mpt);
2106 
2107 	/*
2108 	 * Catch any pending interrupts
2109 	 *
2110 	 * This seems to be crucial- otherwise
2111 	 * the portenable below times out.
2112 	 */
2113 	mpt_intr(mpt);
2114 
2115 	/*
2116 	 * Enable the port- but only if we are not MPT_ROLE_NONE.
2117 	 */
2118 	if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2119 		mpt_prt(mpt, "failed to enable port 0\n");
2120 		return (ENXIO);
2121 	}
2122 	return (0);
2123 }
2124 
2125 void
2126 mpt_core_shutdown(struct mpt_softc *mpt)
2127 {
2128 	mpt_disable_ints(mpt);
2129 }
2130 
2131 void
2132 mpt_core_detach(struct mpt_softc *mpt)
2133 {
2134 	mpt_disable_ints(mpt);
2135 }
2136 
2137 int
2138 mpt_core_unload(struct mpt_personality *pers)
2139 {
2140 	/* Unload is always successfull. */
2141 	return (0);
2142 }
2143 
2144 #define FW_UPLOAD_REQ_SIZE				\
2145 	(sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION)	\
2146        + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2147 
2148 static int
2149 mpt_upload_fw(struct mpt_softc *mpt)
2150 {
2151 	uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2152 	MSG_FW_UPLOAD_REPLY fw_reply;
2153 	MSG_FW_UPLOAD *fw_req;
2154 	FW_UPLOAD_TCSGE *tsge;
2155 	SGE_SIMPLE32 *sge;
2156 	uint32_t flags;
2157 	int error;
2158 
2159 	memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2160 	fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2161 	fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2162 	fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2163 	fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2164 	tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2165 	tsge->DetailsLength = 12;
2166 	tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2167 	tsge->ImageSize = htole32(mpt->fw_image_size);
2168 	sge = (SGE_SIMPLE32 *)(tsge + 1);
2169 	flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2170 	      | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2171 	      | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2172 	flags <<= MPI_SGE_FLAGS_SHIFT;
2173 	sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2174 	sge->Address = htole32(mpt->fw_phys);
2175 	error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2176 	if (error)
2177 		return(error);
2178 	error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2179 	return (error);
2180 }
2181 
2182 static void
2183 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2184 	       uint32_t *data, bus_size_t len)
2185 {
2186 	uint32_t *data_end;
2187 
2188 	data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2189 	pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2190 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2191 	while (data != data_end) {
2192 		mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2193 		data++;
2194 	}
2195 	pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2196 }
2197 
2198 static int
2199 mpt_download_fw(struct mpt_softc *mpt)
2200 {
2201 	MpiFwHeader_t *fw_hdr;
2202 	int error;
2203 	uint32_t ext_offset;
2204 	uint32_t data;
2205 
2206 	mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2207 		mpt->fw_image_size);
2208 
2209 	error = mpt_enable_diag_mode(mpt);
2210 	if (error != 0) {
2211 		mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2212 		return (EIO);
2213 	}
2214 
2215 	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2216 		  MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2217 
2218 	fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2219 	mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2220 		       fw_hdr->ImageSize);
2221 
2222 	ext_offset = fw_hdr->NextImageHeaderOffset;
2223 	while (ext_offset != 0) {
2224 		MpiExtImageHeader_t *ext;
2225 
2226 		ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2227 		ext_offset = ext->NextImageHeaderOffset;
2228 
2229 		mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2230 			       ext->ImageSize);
2231 	}
2232 
2233 	pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2234 	/* Setup the address to jump to on reset. */
2235 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2236 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2237 
2238 	/*
2239 	 * The controller sets the "flash bad" status after attempting
2240 	 * to auto-boot from flash.  Clear the status so that the controller
2241 	 * will continue the boot process with our newly installed firmware.
2242 	 */
2243 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2244 	data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2245 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2246 	mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2247 
2248 	pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2249 
2250 	/*
2251 	 * Re-enable the processor and clear the boot halt flag.
2252 	 */
2253 	data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2254 	data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2255 	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2256 
2257 	mpt_disable_diag_mode(mpt);
2258 	return (0);
2259 }
2260 
2261 /*
2262  * Allocate/Initialize data structures for the controller.  Called
2263  * once at instance startup.
2264  */
2265 static int
2266 mpt_configure_ioc(struct mpt_softc *mpt)
2267 {
2268         MSG_PORT_FACTS_REPLY pfp;
2269         MSG_IOC_FACTS_REPLY facts;
2270 	int try;
2271 	int needreset;
2272 	uint32_t max_chain_depth;
2273 
2274 	needreset = 0;
2275 	for (try = 0; try < MPT_MAX_TRYS; try++) {
2276 
2277 		/*
2278 		 * No need to reset if the IOC is already in the READY state.
2279 		 *
2280 		 * Force reset if initialization failed previously.
2281 		 * Note that a hard_reset of the second channel of a '929
2282 		 * will stop operation of the first channel.  Hopefully, if the
2283 		 * first channel is ok, the second will not require a hard
2284 		 * reset.
2285 		 */
2286 		if (needreset || MPT_STATE(mpt_rd_db(mpt)) !=
2287 		    MPT_DB_STATE_READY) {
2288 			if (mpt_reset(mpt, FALSE) != MPT_OK) {
2289 				continue;
2290 			}
2291 		}
2292 		needreset = 0;
2293 
2294 		if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) {
2295 			mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2296 			needreset = 1;
2297 			continue;
2298 		}
2299 
2300 		mpt->mpt_global_credits = le16toh(facts.GlobalCredits);
2301 		mpt->request_frame_size = le16toh(facts.RequestFrameSize);
2302 		mpt->ioc_facts_flags = facts.Flags;
2303 		mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2304 			    le16toh(facts.MsgVersion) >> 8,
2305 			    le16toh(facts.MsgVersion) & 0xFF,
2306 			    le16toh(facts.HeaderVersion) >> 8,
2307 			    le16toh(facts.HeaderVersion) & 0xFF);
2308 
2309 		/*
2310 		 * Now that we know request frame size, we can calculate
2311 		 * the actual (reasonable) segment limit for read/write I/O.
2312 		 *
2313 		 * This limit is constrained by:
2314 		 *
2315 		 *  + The size of each area we allocate per command (and how
2316                  *    many chain segments we can fit into it).
2317                  *  + The total number of areas we've set up.
2318 		 *  + The actual chain depth the card will allow.
2319 		 *
2320 		 * The first area's segment count is limited by the I/O request
2321 		 * at the head of it. We cannot allocate realistically more
2322 		 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2323 		 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2324 		 *
2325 		 */
2326 		max_chain_depth = facts.MaxChainDepth;
2327 
2328 		/* total number of request areas we (can) allocate */
2329 		mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2330 
2331 		/* converted to the number of chain areas possible */
2332 		mpt->max_seg_cnt *= MPT_NRFM(mpt);
2333 
2334 		/* limited by the number of chain areas the card will support */
2335 		if (mpt->max_seg_cnt > max_chain_depth) {
2336 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2337 			    "chain depth limited to %u (from %u)\n",
2338 			    max_chain_depth, mpt->max_seg_cnt);
2339 			mpt->max_seg_cnt = max_chain_depth;
2340 		}
2341 
2342 		/* converted to the number of simple sges in chain segments. */
2343 		mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2344 
2345 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2346 		    "Maximum Segment Count: %u\n", mpt->max_seg_cnt);
2347 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2348 			 "MsgLength=%u IOCNumber = %d\n",
2349 			 facts.MsgLength, facts.IOCNumber);
2350 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2351 			 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2352 			 "Request Frame Size %u bytes Max Chain Depth %u\n",
2353                          mpt->mpt_global_credits, facts.BlockSize,
2354                          mpt->request_frame_size << 2, max_chain_depth);
2355 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2356 			 "IOCFACTS: Num Ports %d, FWImageSize %d, "
2357 			 "Flags=%#x\n", facts.NumberOfPorts,
2358 			 le32toh(facts.FWImageSize), facts.Flags);
2359 
2360 
2361 		if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) {
2362 			struct mpt_map_info mi;
2363 			int error;
2364 
2365 			/*
2366 			 * In some configurations, the IOC's firmware is
2367 			 * stored in a shared piece of system NVRAM that
2368 			 * is only accessable via the BIOS.  In this
2369 			 * case, the firmware keeps a copy of firmware in
2370 			 * RAM until the OS driver retrieves it.  Once
2371 			 * retrieved, we are responsible for re-downloading
2372 			 * the firmware after any hard-reset.
2373 			 */
2374 			mpt->fw_image_size = le32toh(facts.FWImageSize);
2375 			error = mpt_dma_tag_create(mpt, mpt->parent_dmat,
2376 			    /*alignment*/1, /*boundary*/0,
2377 			    /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2378 			    /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL,
2379 			    /*filterarg*/NULL, mpt->fw_image_size,
2380 			    /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size,
2381 			    /*flags*/0, &mpt->fw_dmat);
2382 			if (error != 0) {
2383 				mpt_prt(mpt, "cannot create fw dma tag\n");
2384 				return (ENOMEM);
2385 			}
2386 			error = bus_dmamem_alloc(mpt->fw_dmat,
2387 			    (void **)&mpt->fw_image, BUS_DMA_NOWAIT,
2388 			    &mpt->fw_dmap);
2389 			if (error != 0) {
2390 				mpt_prt(mpt, "cannot allocate fw mem.\n");
2391 				bus_dma_tag_destroy(mpt->fw_dmat);
2392 				return (ENOMEM);
2393 			}
2394 			mi.mpt = mpt;
2395 			mi.error = 0;
2396 			bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2397 			    mpt->fw_image, mpt->fw_image_size, mpt_map_rquest,
2398 			    &mi, 0);
2399 			mpt->fw_phys = mi.phys;
2400 
2401 			error = mpt_upload_fw(mpt);
2402 			if (error != 0) {
2403 				mpt_prt(mpt, "fw upload failed.\n");
2404 				bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2405 				bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2406 				    mpt->fw_dmap);
2407 				bus_dma_tag_destroy(mpt->fw_dmat);
2408 				mpt->fw_image = NULL;
2409 				return (EIO);
2410 			}
2411 		}
2412 
2413 		if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) {
2414 			mpt_prt(mpt, "mpt_get_portfacts failed\n");
2415 			needreset = 1;
2416 			continue;
2417 		}
2418 
2419 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2420 			 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n",
2421 			 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID,
2422 			 pfp.MaxDevices);
2423 
2424 		mpt->mpt_port_type = pfp.PortType;
2425 		mpt->mpt_proto_flags = pfp.ProtocolFlags;
2426 		if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI &&
2427 		    pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS &&
2428 		    pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) {
2429 			mpt_prt(mpt, "Unsupported Port Type (%x)\n",
2430 			    pfp.PortType);
2431 			return (ENXIO);
2432 		}
2433 		mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers);
2434 
2435 		if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2436 			mpt->is_fc = 1;
2437 			mpt->is_sas = 0;
2438 		} else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2439 			mpt->is_fc = 0;
2440 			mpt->is_sas = 1;
2441 		} else {
2442 			mpt->is_fc = 0;
2443 			mpt->is_sas = 0;
2444 		}
2445 		mpt->mpt_ini_id = pfp.PortSCSIID;
2446 		mpt->mpt_max_devices = pfp.MaxDevices;
2447 
2448 		/*
2449 		 * Match our expected role with what this port supports.
2450 		 *
2451 		 * We only do this to meet expectations. That is, if the
2452 		 * user has specified they want initiator role, and we
2453 		 * don't support it, that's an error we return back upstream.
2454 		 */
2455 
2456 		mpt->cap = MPT_ROLE_NONE;
2457 		if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2458 			mpt->cap |= MPT_ROLE_INITIATOR;
2459 		}
2460 		if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2461 			mpt->cap |= MPT_ROLE_TARGET;
2462 		}
2463 		if (mpt->cap == MPT_ROLE_NONE) {
2464 			mpt_prt(mpt, "port does not support either target or "
2465 			    "initiator role\n");
2466 			return (ENXIO);
2467 		}
2468 
2469 		if ((mpt->role & MPT_ROLE_INITIATOR) &&
2470 		    (mpt->cap & MPT_ROLE_INITIATOR) == 0) {
2471 			mpt_prt(mpt, "port does not support initiator role\n");
2472 			return (ENXIO);
2473 		}
2474 
2475 		if ((mpt->role & MPT_ROLE_TARGET) &&
2476 		    (mpt->cap & MPT_ROLE_TARGET) == 0) {
2477 			mpt_prt(mpt, "port does not support target role\n");
2478 			return (ENXIO);
2479 		}
2480 
2481 		if (mpt_enable_ioc(mpt, 0) != MPT_OK) {
2482 			mpt_prt(mpt, "unable to initialize IOC\n");
2483 			return (ENXIO);
2484 		}
2485 
2486 		/*
2487 		 * Read IOC configuration information.
2488 		 */
2489 		mpt_read_config_info_ioc(mpt);
2490 
2491 		/* Everything worked */
2492 		break;
2493 	}
2494 
2495 	if (try >= MPT_MAX_TRYS) {
2496 		mpt_prt(mpt, "failed to initialize IOC");
2497 		return (EIO);
2498 	}
2499 
2500 	return (0);
2501 }
2502 
2503 static int
2504 mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2505 {
2506 	uint32_t pptr;
2507 	int val;
2508 
2509 	if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2510 		mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2511 		return (EIO);
2512 	}
2513 
2514 	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2515 
2516 	if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2517 		mpt_prt(mpt, "IOC failed to go to run state\n");
2518 		return (ENXIO);
2519 	}
2520 	mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2521 
2522 	/*
2523 	 * Give it reply buffers
2524 	 *
2525 	 * Do *not* exceed global credits.
2526 	 */
2527 	for (val = 0, pptr = mpt->reply_phys;
2528 	    (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2529 	     pptr += MPT_REPLY_SIZE) {
2530 		mpt_free_reply(mpt, pptr);
2531 		if (++val == mpt->mpt_global_credits - 1)
2532 			break;
2533 	}
2534 
2535 
2536 	/*
2537 	 * Enable the port if asked. This is only done if we're resetting
2538 	 * the IOC after initial startup.
2539 	 */
2540 	if (portenable) {
2541 		/*
2542 		 * Enable asynchronous event reporting
2543 		 */
2544 		mpt_send_event_request(mpt, 1);
2545 
2546 		if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2547 			mpt_prt(mpt, "failed to enable port 0\n");
2548 			return (ENXIO);
2549 		}
2550 	}
2551 	return (MPT_OK);
2552 }
2553