xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision f4b37ed0f8b307b1f3f0f630ca725d68f1dff30d)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
108 
109 #include <sys/callout.h>
110 #include <sys/kthread.h>
111 #include <sys/sysctl.h>
112 
113 static void mpt_poll(struct cam_sim *);
114 static timeout_t mpt_timeout;
115 static void mpt_action(struct cam_sim *, union ccb *);
116 static int
117 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
118 static void mpt_setwidth(struct mpt_softc *, int, int);
119 static void mpt_setsync(struct mpt_softc *, int, int, int);
120 static int mpt_update_spi_config(struct mpt_softc *, int);
121 
122 static mpt_reply_handler_t mpt_scsi_reply_handler;
123 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
124 static mpt_reply_handler_t mpt_fc_els_reply_handler;
125 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
126 					MSG_DEFAULT_REPLY *);
127 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
128 static int mpt_fc_reset_link(struct mpt_softc *, int);
129 
130 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
131 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
132 static void mpt_recovery_thread(void *arg);
133 static void mpt_recover_commands(struct mpt_softc *mpt);
134 
135 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
136     u_int, u_int, u_int, int);
137 
138 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
139 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
140 static int mpt_add_els_buffers(struct mpt_softc *mpt);
141 static int mpt_add_target_commands(struct mpt_softc *mpt);
142 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
143 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
145 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
146 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
147 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
148     uint8_t, uint8_t const *);
149 static void
150 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
151     tgt_resource_t *, int);
152 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
153 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
154 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
155 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
156 
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
161 
162 static mpt_probe_handler_t	mpt_cam_probe;
163 static mpt_attach_handler_t	mpt_cam_attach;
164 static mpt_enable_handler_t	mpt_cam_enable;
165 static mpt_ready_handler_t	mpt_cam_ready;
166 static mpt_event_handler_t	mpt_cam_event;
167 static mpt_reset_handler_t	mpt_cam_ioc_reset;
168 static mpt_detach_handler_t	mpt_cam_detach;
169 
170 static struct mpt_personality mpt_cam_personality =
171 {
172 	.name		= "mpt_cam",
173 	.probe		= mpt_cam_probe,
174 	.attach		= mpt_cam_attach,
175 	.enable		= mpt_cam_enable,
176 	.ready		= mpt_cam_ready,
177 	.event		= mpt_cam_event,
178 	.reset		= mpt_cam_ioc_reset,
179 	.detach		= mpt_cam_detach,
180 };
181 
182 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
183 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
184 
185 int mpt_enable_sata_wc = -1;
186 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
187 
188 static int
189 mpt_cam_probe(struct mpt_softc *mpt)
190 {
191 	int role;
192 
193 	/*
194 	 * Only attach to nodes that support the initiator or target role
195 	 * (or want to) or have RAID physical devices that need CAM pass-thru
196 	 * support.
197 	 */
198 	if (mpt->do_cfg_role) {
199 		role = mpt->cfg_role;
200 	} else {
201 		role = mpt->role;
202 	}
203 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
204 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
205 		return (0);
206 	}
207 	return (ENODEV);
208 }
209 
210 static int
211 mpt_cam_attach(struct mpt_softc *mpt)
212 {
213 	struct cam_devq *devq;
214 	mpt_handler_t	 handler;
215 	int		 maxq;
216 	int		 error;
217 
218 	MPT_LOCK(mpt);
219 	TAILQ_INIT(&mpt->request_timeout_list);
220 	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
221 	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
222 
223 	handler.reply_handler = mpt_scsi_reply_handler;
224 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
225 				     &scsi_io_handler_id);
226 	if (error != 0) {
227 		MPT_UNLOCK(mpt);
228 		goto cleanup;
229 	}
230 
231 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
232 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 				     &scsi_tmf_handler_id);
234 	if (error != 0) {
235 		MPT_UNLOCK(mpt);
236 		goto cleanup;
237 	}
238 
239 	/*
240 	 * If we're fibre channel and could support target mode, we register
241 	 * an ELS reply handler and give it resources.
242 	 */
243 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
244 		handler.reply_handler = mpt_fc_els_reply_handler;
245 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
246 		    &fc_els_handler_id);
247 		if (error != 0) {
248 			MPT_UNLOCK(mpt);
249 			goto cleanup;
250 		}
251 		if (mpt_add_els_buffers(mpt) == FALSE) {
252 			error = ENOMEM;
253 			MPT_UNLOCK(mpt);
254 			goto cleanup;
255 		}
256 		maxq -= mpt->els_cmds_allocated;
257 	}
258 
259 	/*
260 	 * If we support target mode, we register a reply handler for it,
261 	 * but don't add command resources until we actually enable target
262 	 * mode.
263 	 */
264 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
265 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
266 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
267 		    &mpt->scsi_tgt_handler_id);
268 		if (error != 0) {
269 			MPT_UNLOCK(mpt);
270 			goto cleanup;
271 		}
272 	}
273 
274 	if (mpt->is_sas) {
275 		handler.reply_handler = mpt_sata_pass_reply_handler;
276 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
277 		    &sata_pass_handler_id);
278 		if (error != 0) {
279 			MPT_UNLOCK(mpt);
280 			goto cleanup;
281 		}
282 	}
283 
284 	/*
285 	 * We keep one request reserved for timeout TMF requests.
286 	 */
287 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
288 	if (mpt->tmf_req == NULL) {
289 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
290 		error = ENOMEM;
291 		MPT_UNLOCK(mpt);
292 		goto cleanup;
293 	}
294 
295 	/*
296 	 * Mark the request as free even though not on the free list.
297 	 * There is only one TMF request allowed to be outstanding at
298 	 * a time and the TMF routines perform their own allocation
299 	 * tracking using the standard state flags.
300 	 */
301 	mpt->tmf_req->state = REQ_STATE_FREE;
302 	maxq--;
303 
304 	/*
305 	 * The rest of this is CAM foo, for which we need to drop our lock
306 	 */
307 	MPT_UNLOCK(mpt);
308 
309 	if (mpt_spawn_recovery_thread(mpt) != 0) {
310 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
311 		error = ENOMEM;
312 		goto cleanup;
313 	}
314 
315 	/*
316 	 * Create the device queue for our SIM(s).
317 	 */
318 	devq = cam_simq_alloc(maxq);
319 	if (devq == NULL) {
320 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
321 		error = ENOMEM;
322 		goto cleanup;
323 	}
324 
325 	/*
326 	 * Construct our SIM entry.
327 	 */
328 	mpt->sim =
329 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
330 	if (mpt->sim == NULL) {
331 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
332 		cam_simq_free(devq);
333 		error = ENOMEM;
334 		goto cleanup;
335 	}
336 
337 	/*
338 	 * Register exactly this bus.
339 	 */
340 	MPT_LOCK(mpt);
341 	if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
342 		mpt_prt(mpt, "Bus registration Failed!\n");
343 		error = ENOMEM;
344 		MPT_UNLOCK(mpt);
345 		goto cleanup;
346 	}
347 
348 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
349 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
350 		mpt_prt(mpt, "Unable to allocate Path!\n");
351 		error = ENOMEM;
352 		MPT_UNLOCK(mpt);
353 		goto cleanup;
354 	}
355 	MPT_UNLOCK(mpt);
356 
357 	/*
358 	 * Only register a second bus for RAID physical
359 	 * devices if the controller supports RAID.
360 	 */
361 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
362 		return (0);
363 	}
364 
365 	/*
366 	 * Create a "bus" to export all hidden disks to CAM.
367 	 */
368 	mpt->phydisk_sim =
369 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
370 	if (mpt->phydisk_sim == NULL) {
371 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
372 		error = ENOMEM;
373 		goto cleanup;
374 	}
375 
376 	/*
377 	 * Register this bus.
378 	 */
379 	MPT_LOCK(mpt);
380 	if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
381 	    CAM_SUCCESS) {
382 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
383 		error = ENOMEM;
384 		MPT_UNLOCK(mpt);
385 		goto cleanup;
386 	}
387 
388 	if (xpt_create_path(&mpt->phydisk_path, NULL,
389 	    cam_sim_path(mpt->phydisk_sim),
390 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
391 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
392 		error = ENOMEM;
393 		MPT_UNLOCK(mpt);
394 		goto cleanup;
395 	}
396 	MPT_UNLOCK(mpt);
397 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
398 	return (0);
399 
400 cleanup:
401 	mpt_cam_detach(mpt);
402 	return (error);
403 }
404 
405 /*
406  * Read FC configuration information
407  */
408 static int
409 mpt_read_config_info_fc(struct mpt_softc *mpt)
410 {
411 	struct sysctl_ctx_list *ctx;
412 	struct sysctl_oid *tree;
413 	char *topology = NULL;
414 	int rv;
415 
416 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
417 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
418 	if (rv) {
419 		return (-1);
420 	}
421 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
422 		 mpt->mpt_fcport_page0.Header.PageVersion,
423 		 mpt->mpt_fcport_page0.Header.PageLength,
424 		 mpt->mpt_fcport_page0.Header.PageNumber,
425 		 mpt->mpt_fcport_page0.Header.PageType);
426 
427 
428 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
429 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
430 	if (rv) {
431 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
432 		return (-1);
433 	}
434 	mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
435 
436 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
437 
438 	switch (mpt->mpt_fcport_page0.Flags &
439 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
440 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
441 		mpt->mpt_fcport_speed = 0;
442 		topology = "<NO LOOP>";
443 		break;
444 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
445 		topology = "N-Port";
446 		break;
447 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
448 		topology = "NL-Port";
449 		break;
450 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
451 		topology = "F-Port";
452 		break;
453 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
454 		topology = "FL-Port";
455 		break;
456 	default:
457 		mpt->mpt_fcport_speed = 0;
458 		topology = "?";
459 		break;
460 	}
461 
462 	mpt_lprt(mpt, MPT_PRT_INFO,
463 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
464 	    "Speed %u-Gbit\n", topology,
465 	    mpt->mpt_fcport_page0.WWNN.High,
466 	    mpt->mpt_fcport_page0.WWNN.Low,
467 	    mpt->mpt_fcport_page0.WWPN.High,
468 	    mpt->mpt_fcport_page0.WWPN.Low,
469 	    mpt->mpt_fcport_speed);
470 	MPT_UNLOCK(mpt);
471 	ctx = device_get_sysctl_ctx(mpt->dev);
472 	tree = device_get_sysctl_tree(mpt->dev);
473 
474 	snprintf(mpt->scinfo.fc.wwnn, sizeof (mpt->scinfo.fc.wwnn),
475 	    "0x%08x%08x", mpt->mpt_fcport_page0.WWNN.High,
476 	    mpt->mpt_fcport_page0.WWNN.Low);
477 
478 	snprintf(mpt->scinfo.fc.wwpn, sizeof (mpt->scinfo.fc.wwpn),
479 	    "0x%08x%08x", mpt->mpt_fcport_page0.WWPN.High,
480 	    mpt->mpt_fcport_page0.WWPN.Low);
481 
482 	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
483 	    "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
484 	    "World Wide Node Name");
485 
486 	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
487 	     "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
488 	     "World Wide Port Name");
489 
490 	MPT_LOCK(mpt);
491 	return (0);
492 }
493 
494 /*
495  * Set FC configuration information.
496  */
497 static int
498 mpt_set_initial_config_fc(struct mpt_softc *mpt)
499 {
500 	CONFIG_PAGE_FC_PORT_1 fc;
501 	U32 fl;
502 	int r, doit = 0;
503 	int role;
504 
505 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
506 	    &fc.Header, FALSE, 5000);
507 	if (r) {
508 		mpt_prt(mpt, "failed to read FC page 1 header\n");
509 		return (mpt_fc_reset_link(mpt, 1));
510 	}
511 
512 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
513 	    &fc.Header, sizeof (fc), FALSE, 5000);
514 	if (r) {
515 		mpt_prt(mpt, "failed to read FC page 1\n");
516 		return (mpt_fc_reset_link(mpt, 1));
517 	}
518 	mpt2host_config_page_fc_port_1(&fc);
519 
520 	/*
521 	 * Check our flags to make sure we support the role we want.
522 	 */
523 	doit = 0;
524 	role = 0;
525 	fl = fc.Flags;
526 
527 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
528 		role |= MPT_ROLE_INITIATOR;
529 	}
530 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
531 		role |= MPT_ROLE_TARGET;
532 	}
533 
534 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
535 
536 	if (mpt->do_cfg_role == 0) {
537 		role = mpt->cfg_role;
538 	} else {
539 		mpt->do_cfg_role = 0;
540 	}
541 
542 	if (role != mpt->cfg_role) {
543 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
544 			if ((role & MPT_ROLE_INITIATOR) == 0) {
545 				mpt_prt(mpt, "adding initiator role\n");
546 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
547 				doit++;
548 			} else {
549 				mpt_prt(mpt, "keeping initiator role\n");
550 			}
551 		} else if (role & MPT_ROLE_INITIATOR) {
552 			mpt_prt(mpt, "removing initiator role\n");
553 			doit++;
554 		}
555 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
556 			if ((role & MPT_ROLE_TARGET) == 0) {
557 				mpt_prt(mpt, "adding target role\n");
558 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
559 				doit++;
560 			} else {
561 				mpt_prt(mpt, "keeping target role\n");
562 			}
563 		} else if (role & MPT_ROLE_TARGET) {
564 			mpt_prt(mpt, "removing target role\n");
565 			doit++;
566 		}
567 		mpt->role = mpt->cfg_role;
568 	}
569 
570 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
571 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
572 			mpt_prt(mpt, "adding OXID option\n");
573 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
574 			doit++;
575 		}
576 	}
577 
578 	if (doit) {
579 		fc.Flags = fl;
580 		host2mpt_config_page_fc_port_1(&fc);
581 		r = mpt_write_cfg_page(mpt,
582 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
583 		    sizeof(fc), FALSE, 5000);
584 		if (r != 0) {
585 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
586 			return (0);
587 		}
588 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
589 		    "effect until next reboot or IOC reset\n");
590 	}
591 	return (0);
592 }
593 
594 static int
595 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
596 {
597 	ConfigExtendedPageHeader_t hdr;
598 	struct mptsas_phyinfo *phyinfo;
599 	SasIOUnitPage0_t *buffer;
600 	int error, len, i;
601 
602 	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
603 				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
604 				       &hdr, 0, 10000);
605 	if (error)
606 		goto out;
607 	if (hdr.ExtPageLength == 0) {
608 		error = ENXIO;
609 		goto out;
610 	}
611 
612 	len = hdr.ExtPageLength * 4;
613 	buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
614 	if (buffer == NULL) {
615 		error = ENOMEM;
616 		goto out;
617 	}
618 
619 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
620 				     0, &hdr, buffer, len, 0, 10000);
621 	if (error) {
622 		free(buffer, M_DEVBUF);
623 		goto out;
624 	}
625 
626 	portinfo->num_phys = buffer->NumPhys;
627 	portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
628 	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
629 	if (portinfo->phy_info == NULL) {
630 		free(buffer, M_DEVBUF);
631 		error = ENOMEM;
632 		goto out;
633 	}
634 
635 	for (i = 0; i < portinfo->num_phys; i++) {
636 		phyinfo = &portinfo->phy_info[i];
637 		phyinfo->phy_num = i;
638 		phyinfo->port_id = buffer->PhyData[i].Port;
639 		phyinfo->negotiated_link_rate =
640 		    buffer->PhyData[i].NegotiatedLinkRate;
641 		phyinfo->handle =
642 		    le16toh(buffer->PhyData[i].ControllerDevHandle);
643 	}
644 
645 	free(buffer, M_DEVBUF);
646 out:
647 	return (error);
648 }
649 
650 static int
651 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
652 	uint32_t form, uint32_t form_specific)
653 {
654 	ConfigExtendedPageHeader_t hdr;
655 	SasPhyPage0_t *buffer;
656 	int error;
657 
658 	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
659 				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
660 				       0, 10000);
661 	if (error)
662 		goto out;
663 	if (hdr.ExtPageLength == 0) {
664 		error = ENXIO;
665 		goto out;
666 	}
667 
668 	buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
669 	if (buffer == NULL) {
670 		error = ENOMEM;
671 		goto out;
672 	}
673 
674 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
675 				     form + form_specific, &hdr, buffer,
676 				     sizeof(SasPhyPage0_t), 0, 10000);
677 	if (error) {
678 		free(buffer, M_DEVBUF);
679 		goto out;
680 	}
681 
682 	phy_info->hw_link_rate = buffer->HwLinkRate;
683 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
684 	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
685 	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
686 
687 	free(buffer, M_DEVBUF);
688 out:
689 	return (error);
690 }
691 
692 static int
693 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
694 	uint32_t form, uint32_t form_specific)
695 {
696 	ConfigExtendedPageHeader_t hdr;
697 	SasDevicePage0_t *buffer;
698 	uint64_t sas_address;
699 	int error = 0;
700 
701 	bzero(device_info, sizeof(*device_info));
702 	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
703 				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
704 				       &hdr, 0, 10000);
705 	if (error)
706 		goto out;
707 	if (hdr.ExtPageLength == 0) {
708 		error = ENXIO;
709 		goto out;
710 	}
711 
712 	buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
713 	if (buffer == NULL) {
714 		error = ENOMEM;
715 		goto out;
716 	}
717 
718 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
719 				     form + form_specific, &hdr, buffer,
720 				     sizeof(SasDevicePage0_t), 0, 10000);
721 	if (error) {
722 		free(buffer, M_DEVBUF);
723 		goto out;
724 	}
725 
726 	device_info->dev_handle = le16toh(buffer->DevHandle);
727 	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
728 	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
729 	device_info->slot = le16toh(buffer->Slot);
730 	device_info->phy_num = buffer->PhyNum;
731 	device_info->physical_port = buffer->PhysicalPort;
732 	device_info->target_id = buffer->TargetID;
733 	device_info->bus = buffer->Bus;
734 	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
735 	device_info->sas_address = le64toh(sas_address);
736 	device_info->device_info = le32toh(buffer->DeviceInfo);
737 
738 	free(buffer, M_DEVBUF);
739 out:
740 	return (error);
741 }
742 
743 /*
744  * Read SAS configuration information. Nothing to do yet.
745  */
746 static int
747 mpt_read_config_info_sas(struct mpt_softc *mpt)
748 {
749 	struct mptsas_portinfo *portinfo;
750 	struct mptsas_phyinfo *phyinfo;
751 	int error, i;
752 
753 	portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
754 	if (portinfo == NULL)
755 		return (ENOMEM);
756 
757 	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
758 	if (error) {
759 		free(portinfo, M_DEVBUF);
760 		return (0);
761 	}
762 
763 	for (i = 0; i < portinfo->num_phys; i++) {
764 		phyinfo = &portinfo->phy_info[i];
765 		error = mptsas_sas_phy_pg0(mpt, phyinfo,
766 		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
767 		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
768 		if (error)
769 			break;
770 		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
771 		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
772 		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
773 		    phyinfo->handle);
774 		if (error)
775 			break;
776 		phyinfo->identify.phy_num = phyinfo->phy_num = i;
777 		if (phyinfo->attached.dev_handle)
778 			error = mptsas_sas_device_pg0(mpt,
779 			    &phyinfo->attached,
780 			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
781 			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
782 			    phyinfo->attached.dev_handle);
783 		if (error)
784 			break;
785 	}
786 	mpt->sas_portinfo = portinfo;
787 	return (0);
788 }
789 
790 static void
791 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
792 	int enabled)
793 {
794 	SataPassthroughRequest_t	*pass;
795 	request_t *req;
796 	int error, status;
797 
798 	req = mpt_get_request(mpt, 0);
799 	if (req == NULL)
800 		return;
801 
802 	pass = req->req_vbuf;
803 	bzero(pass, sizeof(SataPassthroughRequest_t));
804 	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
805 	pass->TargetID = devinfo->target_id;
806 	pass->Bus = devinfo->bus;
807 	pass->PassthroughFlags = 0;
808 	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
809 	pass->DataLength = 0;
810 	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
811 	pass->CommandFIS[0] = 0x27;
812 	pass->CommandFIS[1] = 0x80;
813 	pass->CommandFIS[2] = 0xef;
814 	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
815 	pass->CommandFIS[7] = 0x40;
816 	pass->CommandFIS[15] = 0x08;
817 
818 	mpt_check_doorbell(mpt);
819 	mpt_send_cmd(mpt, req);
820 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
821 			     10 * 1000);
822 	if (error) {
823 		mpt_free_request(mpt, req);
824 		printf("error %d sending passthrough\n", error);
825 		return;
826 	}
827 
828 	status = le16toh(req->IOCStatus);
829 	if (status != MPI_IOCSTATUS_SUCCESS) {
830 		mpt_free_request(mpt, req);
831 		printf("IOCSTATUS %d\n", status);
832 		return;
833 	}
834 
835 	mpt_free_request(mpt, req);
836 }
837 
838 /*
839  * Set SAS configuration information. Nothing to do yet.
840  */
841 static int
842 mpt_set_initial_config_sas(struct mpt_softc *mpt)
843 {
844 	struct mptsas_phyinfo *phyinfo;
845 	int i;
846 
847 	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
848 		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
849 			phyinfo = &mpt->sas_portinfo->phy_info[i];
850 			if (phyinfo->attached.dev_handle == 0)
851 				continue;
852 			if ((phyinfo->attached.device_info &
853 			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
854 				continue;
855 			if (bootverbose)
856 				device_printf(mpt->dev,
857 				    "%sabling SATA WC on phy %d\n",
858 				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
859 			mptsas_set_sata_wc(mpt, &phyinfo->attached,
860 					   mpt_enable_sata_wc);
861 		}
862 	}
863 
864 	return (0);
865 }
866 
867 static int
868 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
869  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
870 {
871 
872 	if (req != NULL) {
873 		if (reply_frame != NULL) {
874 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
875 		}
876 		req->state &= ~REQ_STATE_QUEUED;
877 		req->state |= REQ_STATE_DONE;
878 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
879 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
880 			wakeup(req);
881 		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
882 			/*
883 			 * Whew- we can free this request (late completion)
884 			 */
885 			mpt_free_request(mpt, req);
886 		}
887 	}
888 
889 	return (TRUE);
890 }
891 
892 /*
893  * Read SCSI configuration information
894  */
895 static int
896 mpt_read_config_info_spi(struct mpt_softc *mpt)
897 {
898 	int rv, i;
899 
900 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
901 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
902 	if (rv) {
903 		return (-1);
904 	}
905 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
906 	    mpt->mpt_port_page0.Header.PageVersion,
907 	    mpt->mpt_port_page0.Header.PageLength,
908 	    mpt->mpt_port_page0.Header.PageNumber,
909 	    mpt->mpt_port_page0.Header.PageType);
910 
911 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
912 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
913 	if (rv) {
914 		return (-1);
915 	}
916 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
917 	    mpt->mpt_port_page1.Header.PageVersion,
918 	    mpt->mpt_port_page1.Header.PageLength,
919 	    mpt->mpt_port_page1.Header.PageNumber,
920 	    mpt->mpt_port_page1.Header.PageType);
921 
922 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
923 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
924 	if (rv) {
925 		return (-1);
926 	}
927 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
928 	    mpt->mpt_port_page2.Header.PageVersion,
929 	    mpt->mpt_port_page2.Header.PageLength,
930 	    mpt->mpt_port_page2.Header.PageNumber,
931 	    mpt->mpt_port_page2.Header.PageType);
932 
933 	for (i = 0; i < 16; i++) {
934 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
935 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
936 		if (rv) {
937 			return (-1);
938 		}
939 		mpt_lprt(mpt, MPT_PRT_DEBUG,
940 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
941 		    mpt->mpt_dev_page0[i].Header.PageVersion,
942 		    mpt->mpt_dev_page0[i].Header.PageLength,
943 		    mpt->mpt_dev_page0[i].Header.PageNumber,
944 		    mpt->mpt_dev_page0[i].Header.PageType);
945 
946 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
947 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
948 		if (rv) {
949 			return (-1);
950 		}
951 		mpt_lprt(mpt, MPT_PRT_DEBUG,
952 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
953 		    mpt->mpt_dev_page1[i].Header.PageVersion,
954 		    mpt->mpt_dev_page1[i].Header.PageLength,
955 		    mpt->mpt_dev_page1[i].Header.PageNumber,
956 		    mpt->mpt_dev_page1[i].Header.PageType);
957 	}
958 
959 	/*
960 	 * At this point, we don't *have* to fail. As long as we have
961 	 * valid config header information, we can (barely) lurch
962 	 * along.
963 	 */
964 
965 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
966 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
967 	if (rv) {
968 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
969 	} else {
970 		mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
971 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
972 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
973 		    mpt->mpt_port_page0.Capabilities,
974 		    mpt->mpt_port_page0.PhysicalInterface);
975 	}
976 
977 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
978 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
979 	if (rv) {
980 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
981 	} else {
982 		mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
983 		mpt_lprt(mpt, MPT_PRT_DEBUG,
984 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
985 		    mpt->mpt_port_page1.Configuration,
986 		    mpt->mpt_port_page1.OnBusTimerValue);
987 	}
988 
989 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
990 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
991 	if (rv) {
992 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
993 	} else {
994 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
995 		    "Port Page 2: Flags %x Settings %x\n",
996 		    mpt->mpt_port_page2.PortFlags,
997 		    mpt->mpt_port_page2.PortSettings);
998 		mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
999 		for (i = 0; i < 16; i++) {
1000 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1001 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1002 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1003 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1004 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1005 		}
1006 	}
1007 
1008 	for (i = 0; i < 16; i++) {
1009 		rv = mpt_read_cur_cfg_page(mpt, i,
1010 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1011 		    FALSE, 5000);
1012 		if (rv) {
1013 			mpt_prt(mpt,
1014 			    "cannot read SPI Target %d Device Page 0\n", i);
1015 			continue;
1016 		}
1017 		mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1018 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1019 		    "target %d page 0: Negotiated Params %x Information %x\n",
1020 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1021 		    mpt->mpt_dev_page0[i].Information);
1022 
1023 		rv = mpt_read_cur_cfg_page(mpt, i,
1024 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1025 		    FALSE, 5000);
1026 		if (rv) {
1027 			mpt_prt(mpt,
1028 			    "cannot read SPI Target %d Device Page 1\n", i);
1029 			continue;
1030 		}
1031 		mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1032 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1033 		    "target %d page 1: Requested Params %x Configuration %x\n",
1034 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
1035 		    mpt->mpt_dev_page1[i].Configuration);
1036 	}
1037 	return (0);
1038 }
1039 
1040 /*
1041  * Validate SPI configuration information.
1042  *
1043  * In particular, validate SPI Port Page 1.
1044  */
1045 static int
1046 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1047 {
1048 	int error, i, pp1val;
1049 
1050 	mpt->mpt_disc_enable = 0xff;
1051 	mpt->mpt_tag_enable = 0;
1052 
1053 	pp1val = ((1 << mpt->mpt_ini_id) <<
1054 	    MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1055 	if (mpt->mpt_port_page1.Configuration != pp1val) {
1056 		CONFIG_PAGE_SCSI_PORT_1 tmp;
1057 
1058 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1059 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1060 		tmp = mpt->mpt_port_page1;
1061 		tmp.Configuration = pp1val;
1062 		host2mpt_config_page_scsi_port_1(&tmp);
1063 		error = mpt_write_cur_cfg_page(mpt, 0,
1064 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1065 		if (error) {
1066 			return (-1);
1067 		}
1068 		error = mpt_read_cur_cfg_page(mpt, 0,
1069 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1070 		if (error) {
1071 			return (-1);
1072 		}
1073 		mpt2host_config_page_scsi_port_1(&tmp);
1074 		if (tmp.Configuration != pp1val) {
1075 			mpt_prt(mpt,
1076 			    "failed to reset SPI Port Page 1 Config value\n");
1077 			return (-1);
1078 		}
1079 		mpt->mpt_port_page1 = tmp;
1080 	}
1081 
1082 	/*
1083 	 * The purpose of this exercise is to get
1084 	 * all targets back to async/narrow.
1085 	 *
1086 	 * We skip this step if the BIOS has already negotiated
1087 	 * speeds with the targets.
1088 	 */
1089 	i = mpt->mpt_port_page2.PortSettings &
1090 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1091 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1092 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1093 		    "honoring BIOS transfer negotiations\n");
1094 	} else {
1095 		for (i = 0; i < 16; i++) {
1096 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
1097 			mpt->mpt_dev_page1[i].Configuration = 0;
1098 			(void) mpt_update_spi_config(mpt, i);
1099 		}
1100 	}
1101 	return (0);
1102 }
1103 
1104 static int
1105 mpt_cam_enable(struct mpt_softc *mpt)
1106 {
1107 	int error;
1108 
1109 	MPT_LOCK(mpt);
1110 
1111 	error = EIO;
1112 	if (mpt->is_fc) {
1113 		if (mpt_read_config_info_fc(mpt)) {
1114 			goto out;
1115 		}
1116 		if (mpt_set_initial_config_fc(mpt)) {
1117 			goto out;
1118 		}
1119 	} else if (mpt->is_sas) {
1120 		if (mpt_read_config_info_sas(mpt)) {
1121 			goto out;
1122 		}
1123 		if (mpt_set_initial_config_sas(mpt)) {
1124 			goto out;
1125 		}
1126 	} else if (mpt->is_spi) {
1127 		if (mpt_read_config_info_spi(mpt)) {
1128 			goto out;
1129 		}
1130 		if (mpt_set_initial_config_spi(mpt)) {
1131 			goto out;
1132 		}
1133 	}
1134 	error = 0;
1135 
1136 out:
1137 	MPT_UNLOCK(mpt);
1138 	return (error);
1139 }
1140 
1141 static void
1142 mpt_cam_ready(struct mpt_softc *mpt)
1143 {
1144 
1145 	/*
1146 	 * If we're in target mode, hang out resources now
1147 	 * so we don't cause the world to hang talking to us.
1148 	 */
1149 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1150 		/*
1151 		 * Try to add some target command resources
1152 		 */
1153 		MPT_LOCK(mpt);
1154 		if (mpt_add_target_commands(mpt) == FALSE) {
1155 			mpt_prt(mpt, "failed to add target commands\n");
1156 		}
1157 		MPT_UNLOCK(mpt);
1158 	}
1159 	mpt->ready = 1;
1160 }
1161 
1162 static void
1163 mpt_cam_detach(struct mpt_softc *mpt)
1164 {
1165 	mpt_handler_t handler;
1166 
1167 	MPT_LOCK(mpt);
1168 	mpt->ready = 0;
1169 	mpt_terminate_recovery_thread(mpt);
1170 
1171 	handler.reply_handler = mpt_scsi_reply_handler;
1172 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1173 			       scsi_io_handler_id);
1174 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
1175 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1176 			       scsi_tmf_handler_id);
1177 	handler.reply_handler = mpt_fc_els_reply_handler;
1178 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1179 			       fc_els_handler_id);
1180 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
1181 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1182 			       mpt->scsi_tgt_handler_id);
1183 	handler.reply_handler = mpt_sata_pass_reply_handler;
1184 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1185 			       sata_pass_handler_id);
1186 
1187 	if (mpt->tmf_req != NULL) {
1188 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1189 		mpt_free_request(mpt, mpt->tmf_req);
1190 		mpt->tmf_req = NULL;
1191 	}
1192 	if (mpt->sas_portinfo != NULL) {
1193 		free(mpt->sas_portinfo, M_DEVBUF);
1194 		mpt->sas_portinfo = NULL;
1195 	}
1196 
1197 	if (mpt->sim != NULL) {
1198 		xpt_free_path(mpt->path);
1199 		xpt_bus_deregister(cam_sim_path(mpt->sim));
1200 		cam_sim_free(mpt->sim, TRUE);
1201 		mpt->sim = NULL;
1202 	}
1203 
1204 	if (mpt->phydisk_sim != NULL) {
1205 		xpt_free_path(mpt->phydisk_path);
1206 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1207 		cam_sim_free(mpt->phydisk_sim, TRUE);
1208 		mpt->phydisk_sim = NULL;
1209 	}
1210 	MPT_UNLOCK(mpt);
1211 }
1212 
1213 /* This routine is used after a system crash to dump core onto the swap device.
1214  */
1215 static void
1216 mpt_poll(struct cam_sim *sim)
1217 {
1218 	struct mpt_softc *mpt;
1219 
1220 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1221 	mpt_intr(mpt);
1222 }
1223 
1224 /*
1225  * Watchdog timeout routine for SCSI requests.
1226  */
1227 static void
1228 mpt_timeout(void *arg)
1229 {
1230 	union ccb	 *ccb;
1231 	struct mpt_softc *mpt;
1232 	request_t	 *req;
1233 
1234 	ccb = (union ccb *)arg;
1235 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1236 
1237 	MPT_LOCK_ASSERT(mpt);
1238 	req = ccb->ccb_h.ccb_req_ptr;
1239 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1240 	    req->serno, ccb, req->ccb);
1241 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1242 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1243 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1244 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1245 		req->state |= REQ_STATE_TIMEDOUT;
1246 		mpt_wakeup_recovery_thread(mpt);
1247 	}
1248 }
1249 
1250 /*
1251  * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
1252  * directly.
1253  *
1254  * Takes a list of physical segments and builds the SGL for SCSI IO command
1255  * and forwards the commard to the IOC after one last check that CAM has not
1256  * aborted the transaction.
1257  */
1258 static void
1259 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1260 {
1261 	request_t *req, *trq;
1262 	char *mpt_off;
1263 	union ccb *ccb;
1264 	struct mpt_softc *mpt;
1265 	bus_addr_t chain_list_addr;
1266 	int first_lim, seg, this_seg_lim;
1267 	uint32_t addr, cur_off, flags, nxt_off, tf;
1268 	void *sglp = NULL;
1269 	MSG_REQUEST_HEADER *hdrp;
1270 	SGE_SIMPLE64 *se;
1271 	SGE_CHAIN64 *ce;
1272 	int istgt = 0;
1273 
1274 	req = (request_t *)arg;
1275 	ccb = req->ccb;
1276 
1277 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1278 	req = ccb->ccb_h.ccb_req_ptr;
1279 
1280 	hdrp = req->req_vbuf;
1281 	mpt_off = req->req_vbuf;
1282 
1283 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1284 		error = EFBIG;
1285 	}
1286 
1287 	if (error == 0) {
1288 		switch (hdrp->Function) {
1289 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1290 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1291 			istgt = 0;
1292 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1293 			break;
1294 		case MPI_FUNCTION_TARGET_ASSIST:
1295 			istgt = 1;
1296 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1297 			break;
1298 		default:
1299 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1300 			    hdrp->Function);
1301 			error = EINVAL;
1302 			break;
1303 		}
1304 	}
1305 
1306 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1307 		error = EFBIG;
1308 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1309 		    nseg, mpt->max_seg_cnt);
1310 	}
1311 
1312 bad:
1313 	if (error != 0) {
1314 		if (error != EFBIG && error != ENOMEM) {
1315 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1316 		}
1317 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1318 			cam_status status;
1319 			mpt_freeze_ccb(ccb);
1320 			if (error == EFBIG) {
1321 				status = CAM_REQ_TOO_BIG;
1322 			} else if (error == ENOMEM) {
1323 				if (mpt->outofbeer == 0) {
1324 					mpt->outofbeer = 1;
1325 					xpt_freeze_simq(mpt->sim, 1);
1326 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1327 					    "FREEZEQ\n");
1328 				}
1329 				status = CAM_REQUEUE_REQ;
1330 			} else {
1331 				status = CAM_REQ_CMP_ERR;
1332 			}
1333 			mpt_set_ccb_status(ccb, status);
1334 		}
1335 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1336 			request_t *cmd_req =
1337 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1338 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1339 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1340 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1341 		}
1342 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1343 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1344 		xpt_done(ccb);
1345 		mpt_free_request(mpt, req);
1346 		return;
1347 	}
1348 
1349 	/*
1350 	 * No data to transfer?
1351 	 * Just make a single simple SGL with zero length.
1352 	 */
1353 
1354 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1355 		int tidx = ((char *)sglp) - mpt_off;
1356 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1357 	}
1358 
1359 	if (nseg == 0) {
1360 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1361 		MPI_pSGE_SET_FLAGS(se1,
1362 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1363 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1364 		se1->FlagsLength = htole32(se1->FlagsLength);
1365 		goto out;
1366 	}
1367 
1368 
1369 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1370 	if (istgt == 0) {
1371 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1372 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1373 		}
1374 	} else {
1375 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1376 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1377 		}
1378 	}
1379 
1380 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1381 		bus_dmasync_op_t op;
1382 		if (istgt == 0) {
1383 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1384 				op = BUS_DMASYNC_PREREAD;
1385 			} else {
1386 				op = BUS_DMASYNC_PREWRITE;
1387 			}
1388 		} else {
1389 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1390 				op = BUS_DMASYNC_PREWRITE;
1391 			} else {
1392 				op = BUS_DMASYNC_PREREAD;
1393 			}
1394 		}
1395 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1396 	}
1397 
1398 	/*
1399 	 * Okay, fill in what we can at the end of the command frame.
1400 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1401 	 * the command frame.
1402 	 *
1403 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1404 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1405 	 * that.
1406 	 */
1407 
1408 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1409 		first_lim = nseg;
1410 	} else {
1411 		/*
1412 		 * Leave room for CHAIN element
1413 		 */
1414 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1415 	}
1416 
1417 	se = (SGE_SIMPLE64 *) sglp;
1418 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1419 		tf = flags;
1420 		memset(se, 0, sizeof (*se));
1421 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1422 		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1423 		if (sizeof(bus_addr_t) > 4) {
1424 			addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1425 			/* SAS1078 36GB limitation WAR */
1426 			if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1427 			    MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1428 				addr |= (1U << 31);
1429 				tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1430 			}
1431 			se->Address.High = htole32(addr);
1432 		}
1433 		if (seg == first_lim - 1) {
1434 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1435 		}
1436 		if (seg == nseg - 1) {
1437 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1438 				MPI_SGE_FLAGS_END_OF_BUFFER;
1439 		}
1440 		MPI_pSGE_SET_FLAGS(se, tf);
1441 		se->FlagsLength = htole32(se->FlagsLength);
1442 	}
1443 
1444 	if (seg == nseg) {
1445 		goto out;
1446 	}
1447 
1448 	/*
1449 	 * Tell the IOC where to find the first chain element.
1450 	 */
1451 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1452 	nxt_off = MPT_RQSL(mpt);
1453 	trq = req;
1454 
1455 	/*
1456 	 * Make up the rest of the data segments out of a chain element
1457 	 * (contained in the current request frame) which points to
1458 	 * SIMPLE64 elements in the next request frame, possibly ending
1459 	 * with *another* chain element (if there's more).
1460 	 */
1461 	while (seg < nseg) {
1462 		/*
1463 		 * Point to the chain descriptor. Note that the chain
1464 		 * descriptor is at the end of the *previous* list (whether
1465 		 * chain or simple).
1466 		 */
1467 		ce = (SGE_CHAIN64 *) se;
1468 
1469 		/*
1470 		 * Before we change our current pointer, make  sure we won't
1471 		 * overflow the request area with this frame. Note that we
1472 		 * test against 'greater than' here as it's okay in this case
1473 		 * to have next offset be just outside the request area.
1474 		 */
1475 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1476 			nxt_off = MPT_REQUEST_AREA;
1477 			goto next_chain;
1478 		}
1479 
1480 		/*
1481 		 * Set our SGE element pointer to the beginning of the chain
1482 		 * list and update our next chain list offset.
1483 		 */
1484 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1485 		cur_off = nxt_off;
1486 		nxt_off += MPT_RQSL(mpt);
1487 
1488 		/*
1489 		 * Now initialize the chain descriptor.
1490 		 */
1491 		memset(ce, 0, sizeof (*ce));
1492 
1493 		/*
1494 		 * Get the physical address of the chain list.
1495 		 */
1496 		chain_list_addr = trq->req_pbuf;
1497 		chain_list_addr += cur_off;
1498 		if (sizeof (bus_addr_t) > 4) {
1499 			ce->Address.High =
1500 			    htole32(((uint64_t)chain_list_addr) >> 32);
1501 		}
1502 		ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1503 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1504 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1505 
1506 		/*
1507 		 * If we have more than a frame's worth of segments left,
1508 		 * set up the chain list to have the last element be another
1509 		 * chain descriptor.
1510 		 */
1511 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1512 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1513 			/*
1514 			 * The length of the chain is the length in bytes of the
1515 			 * number of segments plus the next chain element.
1516 			 *
1517 			 * The next chain descriptor offset is the length,
1518 			 * in words, of the number of segments.
1519 			 */
1520 			ce->Length = (this_seg_lim - seg) *
1521 			    sizeof (SGE_SIMPLE64);
1522 			ce->NextChainOffset = ce->Length >> 2;
1523 			ce->Length += sizeof (SGE_CHAIN64);
1524 		} else {
1525 			this_seg_lim = nseg;
1526 			ce->Length = (this_seg_lim - seg) *
1527 			    sizeof (SGE_SIMPLE64);
1528 		}
1529 		ce->Length = htole16(ce->Length);
1530 
1531 		/*
1532 		 * Fill in the chain list SGE elements with our segment data.
1533 		 *
1534 		 * If we're the last element in this chain list, set the last
1535 		 * element flag. If we're the completely last element period,
1536 		 * set the end of list and end of buffer flags.
1537 		 */
1538 		while (seg < this_seg_lim) {
1539 			tf = flags;
1540 			memset(se, 0, sizeof (*se));
1541 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1542 			se->Address.Low = htole32(dm_segs->ds_addr &
1543 			    0xffffffff);
1544 			if (sizeof (bus_addr_t) > 4) {
1545 				addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1546 				/* SAS1078 36GB limitation WAR */
1547 				if (mpt->is_1078 &&
1548 				    (((uint64_t)dm_segs->ds_addr +
1549 				    MPI_SGE_LENGTH(se->FlagsLength)) >>
1550 				    32) == 9) {
1551 					addr |= (1U << 31);
1552 					tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1553 				}
1554 				se->Address.High = htole32(addr);
1555 			}
1556 			if (seg == this_seg_lim - 1) {
1557 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1558 			}
1559 			if (seg == nseg - 1) {
1560 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1561 					MPI_SGE_FLAGS_END_OF_BUFFER;
1562 			}
1563 			MPI_pSGE_SET_FLAGS(se, tf);
1564 			se->FlagsLength = htole32(se->FlagsLength);
1565 			se++;
1566 			seg++;
1567 			dm_segs++;
1568 		}
1569 
1570     next_chain:
1571 		/*
1572 		 * If we have more segments to do and we've used up all of
1573 		 * the space in a request area, go allocate another one
1574 		 * and chain to that.
1575 		 */
1576 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1577 			request_t *nrq;
1578 
1579 			nrq = mpt_get_request(mpt, FALSE);
1580 
1581 			if (nrq == NULL) {
1582 				error = ENOMEM;
1583 				goto bad;
1584 			}
1585 
1586 			/*
1587 			 * Append the new request area on the tail of our list.
1588 			 */
1589 			if ((trq = req->chain) == NULL) {
1590 				req->chain = nrq;
1591 			} else {
1592 				while (trq->chain != NULL) {
1593 					trq = trq->chain;
1594 				}
1595 				trq->chain = nrq;
1596 			}
1597 			trq = nrq;
1598 			mpt_off = trq->req_vbuf;
1599 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1600 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1601 			}
1602 			nxt_off = 0;
1603 		}
1604 	}
1605 out:
1606 
1607 	/*
1608 	 * Last time we need to check if this CCB needs to be aborted.
1609 	 */
1610 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1611 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1612 			request_t *cmd_req =
1613 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1614 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1615 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1616 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1617 		}
1618 		mpt_prt(mpt,
1619 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1620 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1621 		if (nseg) {
1622 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1623 		}
1624 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1625 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1626 		xpt_done(ccb);
1627 		mpt_free_request(mpt, req);
1628 		return;
1629 	}
1630 
1631 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1632 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1633 		mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
1634 		    mpt_timeout, ccb);
1635 	}
1636 	if (mpt->verbose > MPT_PRT_DEBUG) {
1637 		int nc = 0;
1638 		mpt_print_request(req->req_vbuf);
1639 		for (trq = req->chain; trq; trq = trq->chain) {
1640 			printf("  Additional Chain Area %d\n", nc++);
1641 			mpt_dump_sgl(trq->req_vbuf, 0);
1642 		}
1643 	}
1644 
1645 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1646 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1647 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1648 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1649 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1650 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1651 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1652 		} else {
1653 			tgt->state = TGT_STATE_MOVING_DATA;
1654 		}
1655 #else
1656 		tgt->state = TGT_STATE_MOVING_DATA;
1657 #endif
1658 	}
1659 	mpt_send_cmd(mpt, req);
1660 }
1661 
1662 static void
1663 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1664 {
1665 	request_t *req, *trq;
1666 	char *mpt_off;
1667 	union ccb *ccb;
1668 	struct mpt_softc *mpt;
1669 	int seg, first_lim;
1670 	uint32_t flags, nxt_off;
1671 	void *sglp = NULL;
1672 	MSG_REQUEST_HEADER *hdrp;
1673 	SGE_SIMPLE32 *se;
1674 	SGE_CHAIN32 *ce;
1675 	int istgt = 0;
1676 
1677 	req = (request_t *)arg;
1678 	ccb = req->ccb;
1679 
1680 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1681 	req = ccb->ccb_h.ccb_req_ptr;
1682 
1683 	hdrp = req->req_vbuf;
1684 	mpt_off = req->req_vbuf;
1685 
1686 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1687 		error = EFBIG;
1688 	}
1689 
1690 	if (error == 0) {
1691 		switch (hdrp->Function) {
1692 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1693 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1694 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1695 			break;
1696 		case MPI_FUNCTION_TARGET_ASSIST:
1697 			istgt = 1;
1698 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1699 			break;
1700 		default:
1701 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1702 			    hdrp->Function);
1703 			error = EINVAL;
1704 			break;
1705 		}
1706 	}
1707 
1708 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1709 		error = EFBIG;
1710 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1711 		    nseg, mpt->max_seg_cnt);
1712 	}
1713 
1714 bad:
1715 	if (error != 0) {
1716 		if (error != EFBIG && error != ENOMEM) {
1717 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1718 		}
1719 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1720 			cam_status status;
1721 			mpt_freeze_ccb(ccb);
1722 			if (error == EFBIG) {
1723 				status = CAM_REQ_TOO_BIG;
1724 			} else if (error == ENOMEM) {
1725 				if (mpt->outofbeer == 0) {
1726 					mpt->outofbeer = 1;
1727 					xpt_freeze_simq(mpt->sim, 1);
1728 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1729 					    "FREEZEQ\n");
1730 				}
1731 				status = CAM_REQUEUE_REQ;
1732 			} else {
1733 				status = CAM_REQ_CMP_ERR;
1734 			}
1735 			mpt_set_ccb_status(ccb, status);
1736 		}
1737 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1738 			request_t *cmd_req =
1739 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1740 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1741 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1742 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1743 		}
1744 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1745 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1746 		xpt_done(ccb);
1747 		mpt_free_request(mpt, req);
1748 		return;
1749 	}
1750 
1751 	/*
1752 	 * No data to transfer?
1753 	 * Just make a single simple SGL with zero length.
1754 	 */
1755 
1756 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1757 		int tidx = ((char *)sglp) - mpt_off;
1758 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1759 	}
1760 
1761 	if (nseg == 0) {
1762 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1763 		MPI_pSGE_SET_FLAGS(se1,
1764 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1765 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1766 		se1->FlagsLength = htole32(se1->FlagsLength);
1767 		goto out;
1768 	}
1769 
1770 
1771 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1772 	if (istgt == 0) {
1773 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1774 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1775 		}
1776 	} else {
1777 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1778 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1779 		}
1780 	}
1781 
1782 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1783 		bus_dmasync_op_t op;
1784 		if (istgt) {
1785 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1786 				op = BUS_DMASYNC_PREREAD;
1787 			} else {
1788 				op = BUS_DMASYNC_PREWRITE;
1789 			}
1790 		} else {
1791 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1792 				op = BUS_DMASYNC_PREWRITE;
1793 			} else {
1794 				op = BUS_DMASYNC_PREREAD;
1795 			}
1796 		}
1797 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1798 	}
1799 
1800 	/*
1801 	 * Okay, fill in what we can at the end of the command frame.
1802 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1803 	 * the command frame.
1804 	 *
1805 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1806 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1807 	 * that.
1808 	 */
1809 
1810 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1811 		first_lim = nseg;
1812 	} else {
1813 		/*
1814 		 * Leave room for CHAIN element
1815 		 */
1816 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1817 	}
1818 
1819 	se = (SGE_SIMPLE32 *) sglp;
1820 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1821 		uint32_t tf;
1822 
1823 		memset(se, 0,sizeof (*se));
1824 		se->Address = htole32(dm_segs->ds_addr);
1825 
1826 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1827 		tf = flags;
1828 		if (seg == first_lim - 1) {
1829 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1830 		}
1831 		if (seg == nseg - 1) {
1832 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1833 				MPI_SGE_FLAGS_END_OF_BUFFER;
1834 		}
1835 		MPI_pSGE_SET_FLAGS(se, tf);
1836 		se->FlagsLength = htole32(se->FlagsLength);
1837 	}
1838 
1839 	if (seg == nseg) {
1840 		goto out;
1841 	}
1842 
1843 	/*
1844 	 * Tell the IOC where to find the first chain element.
1845 	 */
1846 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1847 	nxt_off = MPT_RQSL(mpt);
1848 	trq = req;
1849 
1850 	/*
1851 	 * Make up the rest of the data segments out of a chain element
1852 	 * (contained in the current request frame) which points to
1853 	 * SIMPLE32 elements in the next request frame, possibly ending
1854 	 * with *another* chain element (if there's more).
1855 	 */
1856 	while (seg < nseg) {
1857 		int this_seg_lim;
1858 		uint32_t tf, cur_off;
1859 		bus_addr_t chain_list_addr;
1860 
1861 		/*
1862 		 * Point to the chain descriptor. Note that the chain
1863 		 * descriptor is at the end of the *previous* list (whether
1864 		 * chain or simple).
1865 		 */
1866 		ce = (SGE_CHAIN32 *) se;
1867 
1868 		/*
1869 		 * Before we change our current pointer, make  sure we won't
1870 		 * overflow the request area with this frame. Note that we
1871 		 * test against 'greater than' here as it's okay in this case
1872 		 * to have next offset be just outside the request area.
1873 		 */
1874 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1875 			nxt_off = MPT_REQUEST_AREA;
1876 			goto next_chain;
1877 		}
1878 
1879 		/*
1880 		 * Set our SGE element pointer to the beginning of the chain
1881 		 * list and update our next chain list offset.
1882 		 */
1883 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1884 		cur_off = nxt_off;
1885 		nxt_off += MPT_RQSL(mpt);
1886 
1887 		/*
1888 		 * Now initialize the chain descriptor.
1889 		 */
1890 		memset(ce, 0, sizeof (*ce));
1891 
1892 		/*
1893 		 * Get the physical address of the chain list.
1894 		 */
1895 		chain_list_addr = trq->req_pbuf;
1896 		chain_list_addr += cur_off;
1897 
1898 
1899 
1900 		ce->Address = htole32(chain_list_addr);
1901 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1902 
1903 
1904 		/*
1905 		 * If we have more than a frame's worth of segments left,
1906 		 * set up the chain list to have the last element be another
1907 		 * chain descriptor.
1908 		 */
1909 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1910 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1911 			/*
1912 			 * The length of the chain is the length in bytes of the
1913 			 * number of segments plus the next chain element.
1914 			 *
1915 			 * The next chain descriptor offset is the length,
1916 			 * in words, of the number of segments.
1917 			 */
1918 			ce->Length = (this_seg_lim - seg) *
1919 			    sizeof (SGE_SIMPLE32);
1920 			ce->NextChainOffset = ce->Length >> 2;
1921 			ce->Length += sizeof (SGE_CHAIN32);
1922 		} else {
1923 			this_seg_lim = nseg;
1924 			ce->Length = (this_seg_lim - seg) *
1925 			    sizeof (SGE_SIMPLE32);
1926 		}
1927 		ce->Length = htole16(ce->Length);
1928 
1929 		/*
1930 		 * Fill in the chain list SGE elements with our segment data.
1931 		 *
1932 		 * If we're the last element in this chain list, set the last
1933 		 * element flag. If we're the completely last element period,
1934 		 * set the end of list and end of buffer flags.
1935 		 */
1936 		while (seg < this_seg_lim) {
1937 			memset(se, 0, sizeof (*se));
1938 			se->Address = htole32(dm_segs->ds_addr);
1939 
1940 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1941 			tf = flags;
1942 			if (seg == this_seg_lim - 1) {
1943 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1944 			}
1945 			if (seg == nseg - 1) {
1946 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1947 					MPI_SGE_FLAGS_END_OF_BUFFER;
1948 			}
1949 			MPI_pSGE_SET_FLAGS(se, tf);
1950 			se->FlagsLength = htole32(se->FlagsLength);
1951 			se++;
1952 			seg++;
1953 			dm_segs++;
1954 		}
1955 
1956     next_chain:
1957 		/*
1958 		 * If we have more segments to do and we've used up all of
1959 		 * the space in a request area, go allocate another one
1960 		 * and chain to that.
1961 		 */
1962 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1963 			request_t *nrq;
1964 
1965 			nrq = mpt_get_request(mpt, FALSE);
1966 
1967 			if (nrq == NULL) {
1968 				error = ENOMEM;
1969 				goto bad;
1970 			}
1971 
1972 			/*
1973 			 * Append the new request area on the tail of our list.
1974 			 */
1975 			if ((trq = req->chain) == NULL) {
1976 				req->chain = nrq;
1977 			} else {
1978 				while (trq->chain != NULL) {
1979 					trq = trq->chain;
1980 				}
1981 				trq->chain = nrq;
1982 			}
1983 			trq = nrq;
1984 			mpt_off = trq->req_vbuf;
1985 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1986 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1987 			}
1988 			nxt_off = 0;
1989 		}
1990 	}
1991 out:
1992 
1993 	/*
1994 	 * Last time we need to check if this CCB needs to be aborted.
1995 	 */
1996 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1997 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1998 			request_t *cmd_req =
1999 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2000 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2001 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2002 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2003 		}
2004 		mpt_prt(mpt,
2005 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2006 		    ccb->ccb_h.status & CAM_STATUS_MASK);
2007 		if (nseg) {
2008 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2009 		}
2010 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2011 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2012 		xpt_done(ccb);
2013 		mpt_free_request(mpt, req);
2014 		return;
2015 	}
2016 
2017 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2018 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2019 		mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
2020 		    mpt_timeout, ccb);
2021 	}
2022 	if (mpt->verbose > MPT_PRT_DEBUG) {
2023 		int nc = 0;
2024 		mpt_print_request(req->req_vbuf);
2025 		for (trq = req->chain; trq; trq = trq->chain) {
2026 			printf("  Additional Chain Area %d\n", nc++);
2027 			mpt_dump_sgl(trq->req_vbuf, 0);
2028 		}
2029 	}
2030 
2031 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2032 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2033 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2034 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
2035 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2036 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2037 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2038 		} else {
2039 			tgt->state = TGT_STATE_MOVING_DATA;
2040 		}
2041 #else
2042 		tgt->state = TGT_STATE_MOVING_DATA;
2043 #endif
2044 	}
2045 	mpt_send_cmd(mpt, req);
2046 }
2047 
2048 static void
2049 mpt_start(struct cam_sim *sim, union ccb *ccb)
2050 {
2051 	request_t *req;
2052 	struct mpt_softc *mpt;
2053 	MSG_SCSI_IO_REQUEST *mpt_req;
2054 	struct ccb_scsiio *csio = &ccb->csio;
2055 	struct ccb_hdr *ccbh = &ccb->ccb_h;
2056 	bus_dmamap_callback_t *cb;
2057 	target_id_t tgt;
2058 	int raid_passthru;
2059 	int error;
2060 
2061 	/* Get the pointer for the physical addapter */
2062 	mpt = ccb->ccb_h.ccb_mpt_ptr;
2063 	raid_passthru = (sim == mpt->phydisk_sim);
2064 
2065 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2066 		if (mpt->outofbeer == 0) {
2067 			mpt->outofbeer = 1;
2068 			xpt_freeze_simq(mpt->sim, 1);
2069 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2070 		}
2071 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2072 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2073 		xpt_done(ccb);
2074 		return;
2075 	}
2076 #ifdef	INVARIANTS
2077 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2078 #endif
2079 
2080 	if (sizeof (bus_addr_t) > 4) {
2081 		cb = mpt_execute_req_a64;
2082 	} else {
2083 		cb = mpt_execute_req;
2084 	}
2085 
2086 	/*
2087 	 * Link the ccb and the request structure so we can find
2088 	 * the other knowing either the request or the ccb
2089 	 */
2090 	req->ccb = ccb;
2091 	ccb->ccb_h.ccb_req_ptr = req;
2092 
2093 	/* Now we build the command for the IOC */
2094 	mpt_req = req->req_vbuf;
2095 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2096 
2097 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2098 	if (raid_passthru) {
2099 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2100 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2101 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2102 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2103 			xpt_done(ccb);
2104 			return;
2105 		}
2106 		mpt_req->Bus = 0;	/* we never set bus here */
2107 	} else {
2108 		tgt = ccb->ccb_h.target_id;
2109 		mpt_req->Bus = 0;	/* XXX */
2110 
2111 	}
2112 	mpt_req->SenseBufferLength =
2113 		(csio->sense_len < MPT_SENSE_SIZE) ?
2114 		 csio->sense_len : MPT_SENSE_SIZE;
2115 
2116 	/*
2117 	 * We use the message context to find the request structure when we
2118 	 * Get the command completion interrupt from the IOC.
2119 	 */
2120 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2121 
2122 	/* Which physical device to do the I/O on */
2123 	mpt_req->TargetID = tgt;
2124 
2125 	/* We assume a single level LUN type */
2126 	if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2127 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2128 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2129 	} else {
2130 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2131 	}
2132 
2133 	/* Set the direction of the transfer */
2134 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2135 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2136 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2137 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2138 	} else {
2139 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2140 	}
2141 
2142 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2143 		switch(ccb->csio.tag_action) {
2144 		case MSG_HEAD_OF_Q_TAG:
2145 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2146 			break;
2147 		case MSG_ACA_TASK:
2148 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2149 			break;
2150 		case MSG_ORDERED_Q_TAG:
2151 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2152 			break;
2153 		case MSG_SIMPLE_Q_TAG:
2154 		default:
2155 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2156 			break;
2157 		}
2158 	} else {
2159 		if (mpt->is_fc || mpt->is_sas) {
2160 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2161 		} else {
2162 			/* XXX No such thing for a target doing packetized. */
2163 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2164 		}
2165 	}
2166 
2167 	if (mpt->is_spi) {
2168 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2169 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2170 		}
2171 	}
2172 	mpt_req->Control = htole32(mpt_req->Control);
2173 
2174 	/* Copy the scsi command block into place */
2175 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2176 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2177 	} else {
2178 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2179 	}
2180 
2181 	mpt_req->CDBLength = csio->cdb_len;
2182 	mpt_req->DataLength = htole32(csio->dxfer_len);
2183 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2184 
2185 	/*
2186 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2187 	 */
2188 	if (mpt->verbose == MPT_PRT_DEBUG) {
2189 		U32 df;
2190 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2191 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2192 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2193 		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2194 		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2195 			mpt_prtc(mpt, "(%s %u byte%s ",
2196 			    (df == MPI_SCSIIO_CONTROL_READ)?
2197 			    "read" : "write",  csio->dxfer_len,
2198 			    (csio->dxfer_len == 1)? ")" : "s)");
2199 		}
2200 		mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt,
2201 		    (uintmax_t)ccb->ccb_h.target_lun, req, req->serno);
2202 	}
2203 
2204 	error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
2205 	    req, 0);
2206 	if (error == EINPROGRESS) {
2207 		/*
2208 		 * So as to maintain ordering, freeze the controller queue
2209 		 * until our mapping is returned.
2210 		 */
2211 		xpt_freeze_simq(mpt->sim, 1);
2212 		ccbh->status |= CAM_RELEASE_SIMQ;
2213 	}
2214 }
2215 
2216 static int
2217 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2218     int sleep_ok)
2219 {
2220 	int   error;
2221 	uint16_t status;
2222 	uint8_t response;
2223 
2224 	error = mpt_scsi_send_tmf(mpt,
2225 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2226 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2227 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2228 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2229 	    0,	/* XXX How do I get the channel ID? */
2230 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2231 	    lun != CAM_LUN_WILDCARD ? lun : 0,
2232 	    0, sleep_ok);
2233 
2234 	if (error != 0) {
2235 		/*
2236 		 * mpt_scsi_send_tmf hard resets on failure, so no
2237 		 * need to do so here.
2238 		 */
2239 		mpt_prt(mpt,
2240 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2241 		return (EIO);
2242 	}
2243 
2244 	/* Wait for bus reset to be processed by the IOC. */
2245 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2246 	    REQ_STATE_DONE, sleep_ok, 5000);
2247 
2248 	status = le16toh(mpt->tmf_req->IOCStatus);
2249 	response = mpt->tmf_req->ResponseCode;
2250 	mpt->tmf_req->state = REQ_STATE_FREE;
2251 
2252 	if (error) {
2253 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2254 		    "Resetting controller.\n");
2255 		mpt_reset(mpt, TRUE);
2256 		return (ETIMEDOUT);
2257 	}
2258 
2259 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2260 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2261 		    "Resetting controller.\n", status);
2262 		mpt_reset(mpt, TRUE);
2263 		return (EIO);
2264 	}
2265 
2266 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2267 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2268 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2269 		    "Resetting controller.\n", response);
2270 		mpt_reset(mpt, TRUE);
2271 		return (EIO);
2272 	}
2273 	return (0);
2274 }
2275 
2276 static int
2277 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2278 {
2279 	int r = 0;
2280 	request_t *req;
2281 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2282 
2283  	req = mpt_get_request(mpt, FALSE);
2284 	if (req == NULL) {
2285 		return (ENOMEM);
2286 	}
2287 	fc = req->req_vbuf;
2288 	memset(fc, 0, sizeof(*fc));
2289 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2290 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2291 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2292 	mpt_send_cmd(mpt, req);
2293 	if (dowait) {
2294 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2295 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2296 		if (r == 0) {
2297 			mpt_free_request(mpt, req);
2298 		}
2299 	}
2300 	return (r);
2301 }
2302 
2303 static int
2304 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2305 	      MSG_EVENT_NOTIFY_REPLY *msg)
2306 {
2307 	uint32_t data0, data1;
2308 
2309 	data0 = le32toh(msg->Data[0]);
2310 	data1 = le32toh(msg->Data[1]);
2311 	switch(msg->Event & 0xFF) {
2312 	case MPI_EVENT_UNIT_ATTENTION:
2313 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2314 		    (data0 >> 8) & 0xff, data0 & 0xff);
2315 		break;
2316 
2317 	case MPI_EVENT_IOC_BUS_RESET:
2318 		/* We generated a bus reset */
2319 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2320 		    (data0 >> 8) & 0xff);
2321 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2322 		break;
2323 
2324 	case MPI_EVENT_EXT_BUS_RESET:
2325 		/* Someone else generated a bus reset */
2326 		mpt_prt(mpt, "External Bus Reset Detected\n");
2327 		/*
2328 		 * These replies don't return EventData like the MPI
2329 		 * spec says they do
2330 		 */
2331 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2332 		break;
2333 
2334 	case MPI_EVENT_RESCAN:
2335 	{
2336 		union ccb *ccb;
2337 		uint32_t pathid;
2338 		/*
2339 		 * In general this means a device has been added to the loop.
2340 		 */
2341 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2342 		if (mpt->ready == 0) {
2343 			break;
2344 		}
2345 		if (mpt->phydisk_sim) {
2346 			pathid = cam_sim_path(mpt->phydisk_sim);
2347 		} else {
2348 			pathid = cam_sim_path(mpt->sim);
2349 		}
2350 		/*
2351 		 * Allocate a CCB, create a wildcard path for this bus,
2352 		 * and schedule a rescan.
2353 		 */
2354 		ccb = xpt_alloc_ccb_nowait();
2355 		if (ccb == NULL) {
2356 			mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2357 			break;
2358 		}
2359 
2360 		if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
2361 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2362 			mpt_prt(mpt, "unable to create path for rescan\n");
2363 			xpt_free_ccb(ccb);
2364 			break;
2365 		}
2366 		xpt_rescan(ccb);
2367 		break;
2368 	}
2369 
2370 	case MPI_EVENT_LINK_STATUS_CHANGE:
2371 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2372 		    (data1 >> 8) & 0xff,
2373 		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2374 		break;
2375 
2376 	case MPI_EVENT_LOOP_STATE_CHANGE:
2377 		switch ((data0 >> 16) & 0xff) {
2378 		case 0x01:
2379 			mpt_prt(mpt,
2380 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2381 			    "(Loop Initialization)\n",
2382 			    (data1 >> 8) & 0xff,
2383 			    (data0 >> 8) & 0xff,
2384 			    (data0     ) & 0xff);
2385 			switch ((data0 >> 8) & 0xff) {
2386 			case 0xF7:
2387 				if ((data0 & 0xff) == 0xF7) {
2388 					mpt_prt(mpt, "Device needs AL_PA\n");
2389 				} else {
2390 					mpt_prt(mpt, "Device %02x doesn't like "
2391 					    "FC performance\n",
2392 					    data0 & 0xFF);
2393 				}
2394 				break;
2395 			case 0xF8:
2396 				if ((data0 & 0xff) == 0xF7) {
2397 					mpt_prt(mpt, "Device had loop failure "
2398 					    "at its receiver prior to acquiring"
2399 					    " AL_PA\n");
2400 				} else {
2401 					mpt_prt(mpt, "Device %02x detected loop"
2402 					    " failure at its receiver\n",
2403 					    data0 & 0xFF);
2404 				}
2405 				break;
2406 			default:
2407 				mpt_prt(mpt, "Device %02x requests that device "
2408 				    "%02x reset itself\n",
2409 				    data0 & 0xFF,
2410 				    (data0 >> 8) & 0xFF);
2411 				break;
2412 			}
2413 			break;
2414 		case 0x02:
2415 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2416 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2417 			    (data1 >> 8) & 0xff, /* Port */
2418 			    (data0 >>  8) & 0xff, /* Character 3 */
2419 			    (data0      ) & 0xff  /* Character 4 */);
2420 			break;
2421 		case 0x03:
2422 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2423 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2424 			    (data1 >> 8) & 0xff, /* Port */
2425 			    (data0 >> 8) & 0xff, /* Character 3 */
2426 			    (data0     ) & 0xff  /* Character 4 */);
2427 			break;
2428 		default:
2429 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2430 			    "FC event (%02x %02x %02x)\n",
2431 			    (data1 >> 8) & 0xff, /* Port */
2432 			    (data0 >> 16) & 0xff, /* Event */
2433 			    (data0 >>  8) & 0xff, /* Character 3 */
2434 			    (data0      ) & 0xff  /* Character 4 */);
2435 		}
2436 		break;
2437 
2438 	case MPI_EVENT_LOGOUT:
2439 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2440 		    (data1 >> 8) & 0xff, data0);
2441 		break;
2442 	case MPI_EVENT_QUEUE_FULL:
2443 	{
2444 		struct cam_sim *sim;
2445 		struct cam_path *tmppath;
2446 		struct ccb_relsim crs;
2447 		PTR_EVENT_DATA_QUEUE_FULL pqf;
2448 		lun_id_t lun_id;
2449 
2450 		pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2451 		pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2452 		if (bootverbose) {
2453 		    mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x "
2454 			"Depth %d\n",
2455 			pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2456 		}
2457 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2458 		    pqf->TargetID) != 0) {
2459 			sim = mpt->phydisk_sim;
2460 		} else {
2461 			sim = mpt->sim;
2462 		}
2463 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2464 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2465 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2466 				mpt_prt(mpt, "unable to create a path to send "
2467 				    "XPT_REL_SIMQ");
2468 				break;
2469 			}
2470 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2471 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2472 			crs.ccb_h.flags = CAM_DEV_QFREEZE;
2473 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2474 			crs.openings = pqf->CurrentDepth - 1;
2475 			xpt_action((union ccb *)&crs);
2476 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2477 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2478 			}
2479 			xpt_free_path(tmppath);
2480 		}
2481 		break;
2482 	}
2483 	case MPI_EVENT_IR_RESYNC_UPDATE:
2484 		mpt_prt(mpt, "IR resync update %d completed\n",
2485 		    (data0 >> 16) & 0xff);
2486 		break;
2487 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2488 	{
2489 		union ccb *ccb;
2490 		struct cam_sim *sim;
2491 		struct cam_path *tmppath;
2492 		PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2493 
2494 		psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2495 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2496 		    psdsc->TargetID) != 0)
2497 			sim = mpt->phydisk_sim;
2498 		else
2499 			sim = mpt->sim;
2500 		switch(psdsc->ReasonCode) {
2501 		case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2502 			ccb = xpt_alloc_ccb_nowait();
2503 			if (ccb == NULL) {
2504 				mpt_prt(mpt,
2505 				    "unable to alloc CCB for rescan\n");
2506 				break;
2507 			}
2508 			if (xpt_create_path(&ccb->ccb_h.path, NULL,
2509 			    cam_sim_path(sim), psdsc->TargetID,
2510 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2511 				mpt_prt(mpt,
2512 				    "unable to create path for rescan\n");
2513 				xpt_free_ccb(ccb);
2514 				break;
2515 			}
2516 			xpt_rescan(ccb);
2517 			break;
2518 		case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2519 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2520 			    psdsc->TargetID, CAM_LUN_WILDCARD) !=
2521 			    CAM_REQ_CMP) {
2522 				mpt_prt(mpt,
2523 				    "unable to create path for async event");
2524 				break;
2525 			}
2526 			xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2527 			xpt_free_path(tmppath);
2528 			break;
2529 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2530 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2531 		case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2532 			break;
2533 		default:
2534 			mpt_lprt(mpt, MPT_PRT_WARN,
2535 			    "SAS device status change: Bus: 0x%02x TargetID: "
2536 			    "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2537 			    psdsc->TargetID, psdsc->ReasonCode);
2538 			break;
2539 		}
2540 		break;
2541 	}
2542 	case MPI_EVENT_SAS_DISCOVERY_ERROR:
2543 	{
2544 		PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2545 
2546 		pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2547 		pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2548 		mpt_lprt(mpt, MPT_PRT_WARN,
2549 		    "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2550 		    pde->Port, pde->DiscoveryStatus);
2551 		break;
2552 	}
2553 	case MPI_EVENT_EVENT_CHANGE:
2554 	case MPI_EVENT_INTEGRATED_RAID:
2555 	case MPI_EVENT_IR2:
2556 	case MPI_EVENT_LOG_ENTRY_ADDED:
2557 	case MPI_EVENT_SAS_DISCOVERY:
2558 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2559 	case MPI_EVENT_SAS_SES:
2560 		break;
2561 	default:
2562 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2563 		    msg->Event & 0xFF);
2564 		return (0);
2565 	}
2566 	return (1);
2567 }
2568 
2569 /*
2570  * Reply path for all SCSI I/O requests, called from our
2571  * interrupt handler by extracting our handler index from
2572  * the MsgContext field of the reply from the IOC.
2573  *
2574  * This routine is optimized for the common case of a
2575  * completion without error.  All exception handling is
2576  * offloaded to non-inlined helper routines to minimize
2577  * cache footprint.
2578  */
2579 static int
2580 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2581     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2582 {
2583 	MSG_SCSI_IO_REQUEST *scsi_req;
2584 	union ccb *ccb;
2585 
2586 	if (req->state == REQ_STATE_FREE) {
2587 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2588 		return (TRUE);
2589 	}
2590 
2591 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2592 	ccb = req->ccb;
2593 	if (ccb == NULL) {
2594 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2595 		    req, req->serno);
2596 		return (TRUE);
2597 	}
2598 
2599 	mpt_req_untimeout(req, mpt_timeout, ccb);
2600 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2601 
2602 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2603 		bus_dmasync_op_t op;
2604 
2605 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2606 			op = BUS_DMASYNC_POSTREAD;
2607 		else
2608 			op = BUS_DMASYNC_POSTWRITE;
2609 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2610 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2611 	}
2612 
2613 	if (reply_frame == NULL) {
2614 		/*
2615 		 * Context only reply, completion without error status.
2616 		 */
2617 		ccb->csio.resid = 0;
2618 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2619 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2620 	} else {
2621 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2622 	}
2623 
2624 	if (mpt->outofbeer) {
2625 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2626 		mpt->outofbeer = 0;
2627 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2628 	}
2629 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2630 		struct scsi_inquiry_data *iq =
2631 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2632 		if (scsi_req->Function ==
2633 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2634 			/*
2635 			 * Fake out the device type so that only the
2636 			 * pass-thru device will attach.
2637 			 */
2638 			iq->device &= ~0x1F;
2639 			iq->device |= T_NODEVICE;
2640 		}
2641 	}
2642 	if (mpt->verbose == MPT_PRT_DEBUG) {
2643 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2644 		    req, req->serno);
2645 	}
2646 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2647 	xpt_done(ccb);
2648 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2649 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2650 	} else {
2651 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2652 		    req, req->serno);
2653 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2654 	}
2655 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2656 	    ("CCB req needed wakeup"));
2657 #ifdef	INVARIANTS
2658 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2659 #endif
2660 	mpt_free_request(mpt, req);
2661 	return (TRUE);
2662 }
2663 
2664 static int
2665 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2666     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2667 {
2668 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2669 
2670 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2671 #ifdef	INVARIANTS
2672 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2673 #endif
2674 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2675 	/* Record IOC Status and Response Code of TMF for any waiters. */
2676 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2677 	req->ResponseCode = tmf_reply->ResponseCode;
2678 
2679 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2680 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2681 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2682 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2683 		req->state |= REQ_STATE_DONE;
2684 		wakeup(req);
2685 	} else {
2686 		mpt->tmf_req->state = REQ_STATE_FREE;
2687 	}
2688 	return (TRUE);
2689 }
2690 
2691 /*
2692  * XXX: Move to definitions file
2693  */
2694 #define	ELS	0x22
2695 #define	FC4LS	0x32
2696 #define	ABTS	0x81
2697 #define	BA_ACC	0x84
2698 
2699 #define	LS_RJT	0x01
2700 #define	LS_ACC	0x02
2701 #define	PLOGI	0x03
2702 #define	LOGO	0x05
2703 #define SRR	0x14
2704 #define PRLI	0x20
2705 #define PRLO	0x21
2706 #define ADISC	0x52
2707 #define RSCN	0x61
2708 
2709 static void
2710 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2711     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2712 {
2713 	uint32_t fl;
2714 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2715 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2716 
2717 	/*
2718 	 * We are going to reuse the ELS request to send this response back.
2719 	 */
2720 	rsp = &tmp;
2721 	memset(rsp, 0, sizeof(*rsp));
2722 
2723 #ifdef	USE_IMMEDIATE_LINK_DATA
2724 	/*
2725 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2726 	 */
2727 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2728 #endif
2729 	rsp->RspLength = length;
2730 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2731 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2732 
2733 	/*
2734 	 * Copy over information from the original reply frame to
2735 	 * it's correct place in the response.
2736 	 */
2737 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2738 
2739 	/*
2740 	 * And now copy back the temporary area to the original frame.
2741 	 */
2742 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2743 	rsp = req->req_vbuf;
2744 
2745 #ifdef	USE_IMMEDIATE_LINK_DATA
2746 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2747 #else
2748 {
2749 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2750 	bus_addr_t paddr = req->req_pbuf;
2751 	paddr += MPT_RQSL(mpt);
2752 
2753 	fl =
2754 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2755 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2756 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2757 		MPI_SGE_FLAGS_END_OF_LIST	|
2758 		MPI_SGE_FLAGS_END_OF_BUFFER;
2759 	fl <<= MPI_SGE_FLAGS_SHIFT;
2760 	fl |= (length);
2761 	se->FlagsLength = htole32(fl);
2762 	se->Address = htole32((uint32_t) paddr);
2763 }
2764 #endif
2765 
2766 	/*
2767 	 * Send it on...
2768 	 */
2769 	mpt_send_cmd(mpt, req);
2770 }
2771 
2772 static int
2773 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2774     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2775 {
2776 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2777 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2778 	U8 rctl;
2779 	U8 type;
2780 	U8 cmd;
2781 	U16 status = le16toh(reply_frame->IOCStatus);
2782 	U32 *elsbuf;
2783 	int ioindex;
2784 	int do_refresh = TRUE;
2785 
2786 #ifdef	INVARIANTS
2787 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2788 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2789 	    req, req->serno, rp->Function));
2790 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2791 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2792 	} else {
2793 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2794 	}
2795 #endif
2796 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2797 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2798 	    req, req->serno, reply_frame, reply_frame->Function);
2799 
2800 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2801 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2802 		    status, reply_frame->Function);
2803 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2804 			/*
2805 			 * XXX: to get around shutdown issue
2806 			 */
2807 			mpt->disabled = 1;
2808 			return (TRUE);
2809 		}
2810 		return (TRUE);
2811 	}
2812 
2813 	/*
2814 	 * If the function of a link service response, we recycle the
2815 	 * response to be a refresh for a new link service request.
2816 	 *
2817 	 * The request pointer is bogus in this case and we have to fetch
2818 	 * it based upon the TransactionContext.
2819 	 */
2820 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2821 		/* Freddie Uncle Charlie Katie */
2822 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2823 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2824 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2825 				break;
2826 			}
2827 
2828 		KASSERT(ioindex < mpt->els_cmds_allocated,
2829 		    ("can't find my mommie!"));
2830 
2831 		/* remove from active list as we're going to re-post it */
2832 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2833 		req->state &= ~REQ_STATE_QUEUED;
2834 		req->state |= REQ_STATE_DONE;
2835 		mpt_fc_post_els(mpt, req, ioindex);
2836 		return (TRUE);
2837 	}
2838 
2839 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2840 		/* remove from active list as we're done */
2841 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2842 		req->state &= ~REQ_STATE_QUEUED;
2843 		req->state |= REQ_STATE_DONE;
2844 		if (req->state & REQ_STATE_TIMEDOUT) {
2845 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2846 			    "Sync Primitive Send Completed After Timeout\n");
2847 			mpt_free_request(mpt, req);
2848 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2849 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2850 			    "Async Primitive Send Complete\n");
2851 			mpt_free_request(mpt, req);
2852 		} else {
2853 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2854 			    "Sync Primitive Send Complete- Waking Waiter\n");
2855 			wakeup(req);
2856 		}
2857 		return (TRUE);
2858 	}
2859 
2860 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2861 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2862 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2863 		    rp->MsgLength, rp->MsgFlags);
2864 		return (TRUE);
2865 	}
2866 
2867 	if (rp->MsgLength <= 5) {
2868 		/*
2869 		 * This is just a ack of an original ELS buffer post
2870 		 */
2871 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2872 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2873 		return (TRUE);
2874 	}
2875 
2876 
2877 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2878 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2879 
2880 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2881 	cmd = be32toh(elsbuf[0]) >> 24;
2882 
2883 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2884 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2885 		return (TRUE);
2886 	}
2887 
2888 	ioindex = le32toh(rp->TransactionContext);
2889 	req = mpt->els_cmd_ptrs[ioindex];
2890 
2891 	if (rctl == ELS && type == 1) {
2892 		switch (cmd) {
2893 		case PRLI:
2894 			/*
2895 			 * Send back a PRLI ACC
2896 			 */
2897 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2898 			    le32toh(rp->Wwn.PortNameHigh),
2899 			    le32toh(rp->Wwn.PortNameLow));
2900 			elsbuf[0] = htobe32(0x02100014);
2901 			elsbuf[1] |= htobe32(0x00000100);
2902 			elsbuf[4] = htobe32(0x00000002);
2903 			if (mpt->role & MPT_ROLE_TARGET)
2904 				elsbuf[4] |= htobe32(0x00000010);
2905 			if (mpt->role & MPT_ROLE_INITIATOR)
2906 				elsbuf[4] |= htobe32(0x00000020);
2907 			/* remove from active list as we're done */
2908 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2909 			req->state &= ~REQ_STATE_QUEUED;
2910 			req->state |= REQ_STATE_DONE;
2911 			mpt_fc_els_send_response(mpt, req, rp, 20);
2912 			do_refresh = FALSE;
2913 			break;
2914 		case PRLO:
2915 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2916 			elsbuf[0] = htobe32(0x02100014);
2917 			elsbuf[1] = htobe32(0x08000100);
2918 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2919 			    le32toh(rp->Wwn.PortNameHigh),
2920 			    le32toh(rp->Wwn.PortNameLow));
2921 			/* remove from active list as we're done */
2922 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2923 			req->state &= ~REQ_STATE_QUEUED;
2924 			req->state |= REQ_STATE_DONE;
2925 			mpt_fc_els_send_response(mpt, req, rp, 20);
2926 			do_refresh = FALSE;
2927 			break;
2928 		default:
2929 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2930 			break;
2931 		}
2932 	} else if (rctl == ABTS && type == 0) {
2933 		uint16_t rx_id = le16toh(rp->Rxid);
2934 		uint16_t ox_id = le16toh(rp->Oxid);
2935 		request_t *tgt_req = NULL;
2936 
2937 		mpt_prt(mpt,
2938 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2939 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2940 		    le32toh(rp->Wwn.PortNameLow));
2941 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2942 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2943 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2944 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2945 		} else {
2946 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2947 		}
2948 		if (tgt_req) {
2949 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2950 			union ccb *ccb;
2951 			uint32_t ct_id;
2952 
2953 			/*
2954 			 * Check to make sure we have the correct command
2955 			 * The reply descriptor in the target state should
2956 			 * should contain an IoIndex that should match the
2957 			 * RX_ID.
2958 			 *
2959 			 * It'd be nice to have OX_ID to crosscheck with
2960 			 * as well.
2961 			 */
2962 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2963 
2964 			if (ct_id != rx_id) {
2965 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2966 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2967 				    rx_id, ct_id);
2968 				goto skip;
2969 			}
2970 
2971 			ccb = tgt->ccb;
2972 			if (ccb) {
2973 				mpt_prt(mpt,
2974 				    "CCB (%p): lun %jx flags %x status %x\n",
2975 				    ccb, (uintmax_t)ccb->ccb_h.target_lun,
2976 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2977 			}
2978 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2979 			    "%x nxfers %x\n", tgt->state,
2980 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2981 			    tgt->nxfers);
2982   skip:
2983 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2984 				mpt_prt(mpt, "unable to start TargetAbort\n");
2985 			}
2986 		} else {
2987 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2988 		}
2989 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2990 		elsbuf[0] = htobe32(0);
2991 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2992 		elsbuf[2] = htobe32(0x000ffff);
2993 		/*
2994 		 * Dork with the reply frame so that the response to it
2995 		 * will be correct.
2996 		 */
2997 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2998 		/* remove from active list as we're done */
2999 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3000 		req->state &= ~REQ_STATE_QUEUED;
3001 		req->state |= REQ_STATE_DONE;
3002 		mpt_fc_els_send_response(mpt, req, rp, 12);
3003 		do_refresh = FALSE;
3004 	} else {
3005 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3006 	}
3007 	if (do_refresh == TRUE) {
3008 		/* remove from active list as we're done */
3009 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3010 		req->state &= ~REQ_STATE_QUEUED;
3011 		req->state |= REQ_STATE_DONE;
3012 		mpt_fc_post_els(mpt, req, ioindex);
3013 	}
3014 	return (TRUE);
3015 }
3016 
3017 /*
3018  * Clean up all SCSI Initiator personality state in response
3019  * to a controller reset.
3020  */
3021 static void
3022 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3023 {
3024 
3025 	/*
3026 	 * The pending list is already run down by
3027 	 * the generic handler.  Perform the same
3028 	 * operation on the timed out request list.
3029 	 */
3030 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3031 				   MPI_IOCSTATUS_INVALID_STATE);
3032 
3033 	/*
3034 	 * XXX: We need to repost ELS and Target Command Buffers?
3035 	 */
3036 
3037 	/*
3038 	 * Inform the XPT that a bus reset has occurred.
3039 	 */
3040 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
3041 }
3042 
3043 /*
3044  * Parse additional completion information in the reply
3045  * frame for SCSI I/O requests.
3046  */
3047 static int
3048 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3049 			     MSG_DEFAULT_REPLY *reply_frame)
3050 {
3051 	union ccb *ccb;
3052 	MSG_SCSI_IO_REPLY *scsi_io_reply;
3053 	u_int ioc_status;
3054 	u_int sstate;
3055 
3056 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3057 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3058 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3059 		("MPT SCSI I/O Handler called with incorrect reply type"));
3060 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3061 		("MPT SCSI I/O Handler called with continuation reply"));
3062 
3063 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3064 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
3065 	ioc_status &= MPI_IOCSTATUS_MASK;
3066 	sstate = scsi_io_reply->SCSIState;
3067 
3068 	ccb = req->ccb;
3069 	ccb->csio.resid =
3070 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3071 
3072 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3073 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3074 		uint32_t sense_returned;
3075 
3076 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3077 
3078 		sense_returned = le32toh(scsi_io_reply->SenseCount);
3079 		if (sense_returned < ccb->csio.sense_len)
3080 			ccb->csio.sense_resid = ccb->csio.sense_len -
3081 						sense_returned;
3082 		else
3083 			ccb->csio.sense_resid = 0;
3084 
3085 		bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3086 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3087 		    min(ccb->csio.sense_len, sense_returned));
3088 	}
3089 
3090 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3091 		/*
3092 		 * Tag messages rejected, but non-tagged retry
3093 		 * was successful.
3094 XXXX
3095 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3096 		 */
3097 	}
3098 
3099 	switch(ioc_status) {
3100 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3101 		/*
3102 		 * XXX
3103 		 * Linux driver indicates that a zero
3104 		 * transfer length with this error code
3105 		 * indicates a CRC error.
3106 		 *
3107 		 * No need to swap the bytes for checking
3108 		 * against zero.
3109 		 */
3110 		if (scsi_io_reply->TransferCount == 0) {
3111 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3112 			break;
3113 		}
3114 		/* FALLTHROUGH */
3115 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3116 	case MPI_IOCSTATUS_SUCCESS:
3117 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3118 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3119 			/*
3120 			 * Status was never returned for this transaction.
3121 			 */
3122 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3123 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3124 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3125 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3126 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3127 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3128 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3129 
3130 			/* XXX Handle SPI-Packet and FCP-2 response info. */
3131 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3132 		} else
3133 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3134 		break;
3135 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3136 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3137 		break;
3138 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3139 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3140 		break;
3141 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3142 		/*
3143 		 * Since selection timeouts and "device really not
3144 		 * there" are grouped into this error code, report
3145 		 * selection timeout.  Selection timeouts are
3146 		 * typically retried before giving up on the device
3147 		 * whereas "device not there" errors are considered
3148 		 * unretryable.
3149 		 */
3150 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3151 		break;
3152 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3153 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3154 		break;
3155 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3156 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3157 		break;
3158 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3159 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3160 		break;
3161 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3162 		ccb->ccb_h.status = CAM_UA_TERMIO;
3163 		break;
3164 	case MPI_IOCSTATUS_INVALID_STATE:
3165 		/*
3166 		 * The IOC has been reset.  Emulate a bus reset.
3167 		 */
3168 		/* FALLTHROUGH */
3169 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3170 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3171 		break;
3172 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3173 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3174 		/*
3175 		 * Don't clobber any timeout status that has
3176 		 * already been set for this transaction.  We
3177 		 * want the SCSI layer to be able to differentiate
3178 		 * between the command we aborted due to timeout
3179 		 * and any innocent bystanders.
3180 		 */
3181 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3182 			break;
3183 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3184 		break;
3185 
3186 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3187 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3188 		break;
3189 	case MPI_IOCSTATUS_BUSY:
3190 		mpt_set_ccb_status(ccb, CAM_BUSY);
3191 		break;
3192 	case MPI_IOCSTATUS_INVALID_FUNCTION:
3193 	case MPI_IOCSTATUS_INVALID_SGL:
3194 	case MPI_IOCSTATUS_INTERNAL_ERROR:
3195 	case MPI_IOCSTATUS_INVALID_FIELD:
3196 	default:
3197 		/* XXX
3198 		 * Some of the above may need to kick
3199 		 * of a recovery action!!!!
3200 		 */
3201 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3202 		break;
3203 	}
3204 
3205 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3206 		mpt_freeze_ccb(ccb);
3207 	}
3208 
3209 	return (TRUE);
3210 }
3211 
3212 static void
3213 mpt_action(struct cam_sim *sim, union ccb *ccb)
3214 {
3215 	struct mpt_softc *mpt;
3216 	struct ccb_trans_settings *cts;
3217 	target_id_t tgt;
3218 	lun_id_t lun;
3219 	int raid_passthru;
3220 
3221 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3222 
3223 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
3224 	raid_passthru = (sim == mpt->phydisk_sim);
3225 	MPT_LOCK_ASSERT(mpt);
3226 
3227 	tgt = ccb->ccb_h.target_id;
3228 	lun = ccb->ccb_h.target_lun;
3229 	if (raid_passthru &&
3230 	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
3231 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
3232 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
3233 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3234 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3235 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3236 			xpt_done(ccb);
3237 			return;
3238 		}
3239 	}
3240 	ccb->ccb_h.ccb_mpt_ptr = mpt;
3241 
3242 	switch (ccb->ccb_h.func_code) {
3243 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
3244 		/*
3245 		 * Do a couple of preliminary checks...
3246 		 */
3247 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3248 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3249 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3250 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3251 				break;
3252 			}
3253 		}
3254 		/* Max supported CDB length is 16 bytes */
3255 		/* XXX Unless we implement the new 32byte message type */
3256 		if (ccb->csio.cdb_len >
3257 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3258 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3259 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3260 			break;
3261 		}
3262 #ifdef	MPT_TEST_MULTIPATH
3263 		if (mpt->failure_id == ccb->ccb_h.target_id) {
3264 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3265 			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3266 			break;
3267 		}
3268 #endif
3269 		ccb->csio.scsi_status = SCSI_STATUS_OK;
3270 		mpt_start(sim, ccb);
3271 		return;
3272 
3273 	case XPT_RESET_BUS:
3274 		if (raid_passthru) {
3275 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3276 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3277 			break;
3278 		}
3279 	case XPT_RESET_DEV:
3280 		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3281 			if (bootverbose) {
3282 				xpt_print(ccb->ccb_h.path, "reset bus\n");
3283 			}
3284 		} else {
3285 			xpt_print(ccb->ccb_h.path, "reset device\n");
3286 		}
3287 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3288 
3289 		/*
3290 		 * mpt_bus_reset is always successful in that it
3291 		 * will fall back to a hard reset should a bus
3292 		 * reset attempt fail.
3293 		 */
3294 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3295 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3296 		break;
3297 
3298 	case XPT_ABORT:
3299 	{
3300 		union ccb *accb = ccb->cab.abort_ccb;
3301 		switch (accb->ccb_h.func_code) {
3302 		case XPT_ACCEPT_TARGET_IO:
3303 		case XPT_IMMEDIATE_NOTIFY:
3304 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3305 			break;
3306 		case XPT_CONT_TARGET_IO:
3307 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3308 			ccb->ccb_h.status = CAM_UA_ABORT;
3309 			break;
3310 		case XPT_SCSI_IO:
3311 			ccb->ccb_h.status = CAM_UA_ABORT;
3312 			break;
3313 		default:
3314 			ccb->ccb_h.status = CAM_REQ_INVALID;
3315 			break;
3316 		}
3317 		break;
3318 	}
3319 
3320 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3321 
3322 #define	DP_DISC_ENABLE	0x1
3323 #define	DP_DISC_DISABL	0x2
3324 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3325 
3326 #define	DP_TQING_ENABLE	0x4
3327 #define	DP_TQING_DISABL	0x8
3328 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3329 
3330 #define	DP_WIDE		0x10
3331 #define	DP_NARROW	0x20
3332 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3333 
3334 #define	DP_SYNC		0x40
3335 
3336 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3337 	{
3338 		struct ccb_trans_settings_scsi *scsi;
3339 		struct ccb_trans_settings_spi *spi;
3340 		uint8_t dval;
3341 		u_int period;
3342 		u_int offset;
3343 		int i, j;
3344 
3345 		cts = &ccb->cts;
3346 
3347 		if (mpt->is_fc || mpt->is_sas) {
3348 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3349 			break;
3350 		}
3351 
3352 		scsi = &cts->proto_specific.scsi;
3353 		spi = &cts->xport_specific.spi;
3354 
3355 		/*
3356 		 * We can be called just to valid transport and proto versions
3357 		 */
3358 		if (scsi->valid == 0 && spi->valid == 0) {
3359 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3360 			break;
3361 		}
3362 
3363 		/*
3364 		 * Skip attempting settings on RAID volume disks.
3365 		 * Other devices on the bus get the normal treatment.
3366 		 */
3367 		if (mpt->phydisk_sim && raid_passthru == 0 &&
3368 		    mpt_is_raid_volume(mpt, tgt) != 0) {
3369 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3370 			    "no transfer settings for RAID vols\n");
3371 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3372 			break;
3373 		}
3374 
3375 		i = mpt->mpt_port_page2.PortSettings &
3376 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3377 		j = mpt->mpt_port_page2.PortFlags &
3378 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3379 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3380 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3381 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3382 			    "honoring BIOS transfer negotiations\n");
3383 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3384 			break;
3385 		}
3386 
3387 		dval = 0;
3388 		period = 0;
3389 		offset = 0;
3390 
3391 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3392 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3393 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3394 		}
3395 
3396 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3397 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3398 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3399 		}
3400 
3401 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3402 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3403 			    DP_WIDE : DP_NARROW;
3404 		}
3405 
3406 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3407 			dval |= DP_SYNC;
3408 			offset = spi->sync_offset;
3409 		} else {
3410 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3411 			    &mpt->mpt_dev_page1[tgt];
3412 			offset = ptr->RequestedParameters;
3413 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3414 	    		offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3415 		}
3416 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3417 			dval |= DP_SYNC;
3418 			period = spi->sync_period;
3419 		} else {
3420 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3421 			    &mpt->mpt_dev_page1[tgt];
3422 			period = ptr->RequestedParameters;
3423 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3424 	    		period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3425 		}
3426 
3427 		if (dval & DP_DISC_ENABLE) {
3428 			mpt->mpt_disc_enable |= (1 << tgt);
3429 		} else if (dval & DP_DISC_DISABL) {
3430 			mpt->mpt_disc_enable &= ~(1 << tgt);
3431 		}
3432 		if (dval & DP_TQING_ENABLE) {
3433 			mpt->mpt_tag_enable |= (1 << tgt);
3434 		} else if (dval & DP_TQING_DISABL) {
3435 			mpt->mpt_tag_enable &= ~(1 << tgt);
3436 		}
3437 		if (dval & DP_WIDTH) {
3438 			mpt_setwidth(mpt, tgt, 1);
3439 		}
3440 		if (dval & DP_SYNC) {
3441 			mpt_setsync(mpt, tgt, period, offset);
3442 		}
3443 		if (dval == 0) {
3444 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3445 			break;
3446 		}
3447 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3448 		    "set [%d]: 0x%x period 0x%x offset %d\n",
3449 		    tgt, dval, period, offset);
3450 		if (mpt_update_spi_config(mpt, tgt)) {
3451 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3452 		} else {
3453 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3454 		}
3455 		break;
3456 	}
3457 	case XPT_GET_TRAN_SETTINGS:
3458 	{
3459 		struct ccb_trans_settings_scsi *scsi;
3460 		cts = &ccb->cts;
3461 		cts->protocol = PROTO_SCSI;
3462 		if (mpt->is_fc) {
3463 			struct ccb_trans_settings_fc *fc =
3464 			    &cts->xport_specific.fc;
3465 			cts->protocol_version = SCSI_REV_SPC;
3466 			cts->transport = XPORT_FC;
3467 			cts->transport_version = 0;
3468 			fc->valid = CTS_FC_VALID_SPEED;
3469 			fc->bitrate = 100000;
3470 		} else if (mpt->is_sas) {
3471 			struct ccb_trans_settings_sas *sas =
3472 			    &cts->xport_specific.sas;
3473 			cts->protocol_version = SCSI_REV_SPC2;
3474 			cts->transport = XPORT_SAS;
3475 			cts->transport_version = 0;
3476 			sas->valid = CTS_SAS_VALID_SPEED;
3477 			sas->bitrate = 300000;
3478 		} else {
3479 			cts->protocol_version = SCSI_REV_2;
3480 			cts->transport = XPORT_SPI;
3481 			cts->transport_version = 2;
3482 			if (mpt_get_spi_settings(mpt, cts) != 0) {
3483 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3484 				break;
3485 			}
3486 		}
3487 		scsi = &cts->proto_specific.scsi;
3488 		scsi->valid = CTS_SCSI_VALID_TQ;
3489 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3490 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3491 		break;
3492 	}
3493 	case XPT_CALC_GEOMETRY:
3494 	{
3495 		struct ccb_calc_geometry *ccg;
3496 
3497 		ccg = &ccb->ccg;
3498 		if (ccg->block_size == 0) {
3499 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3500 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3501 			break;
3502 		}
3503 		cam_calc_geometry(ccg, /* extended */ 1);
3504 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3505 		break;
3506 	}
3507 	case XPT_PATH_INQ:		/* Path routing inquiry */
3508 	{
3509 		struct ccb_pathinq *cpi = &ccb->cpi;
3510 
3511 		cpi->version_num = 1;
3512 		cpi->target_sprt = 0;
3513 		cpi->hba_eng_cnt = 0;
3514 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3515 		cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3516 		/*
3517 		 * FC cards report MAX_DEVICES of 512, but
3518 		 * the MSG_SCSI_IO_REQUEST target id field
3519 		 * is only 8 bits. Until we fix the driver
3520 		 * to support 'channels' for bus overflow,
3521 		 * just limit it.
3522 		 */
3523 		if (cpi->max_target > 255) {
3524 			cpi->max_target = 255;
3525 		}
3526 
3527 		/*
3528 		 * VMware ESX reports > 16 devices and then dies when we probe.
3529 		 */
3530 		if (mpt->is_spi && cpi->max_target > 15) {
3531 			cpi->max_target = 15;
3532 		}
3533 		if (mpt->is_spi)
3534 			cpi->max_lun = 7;
3535 		else
3536 			cpi->max_lun = MPT_MAX_LUNS;
3537 		cpi->initiator_id = mpt->mpt_ini_id;
3538 		cpi->bus_id = cam_sim_bus(sim);
3539 
3540 		/*
3541 		 * The base speed is the speed of the underlying connection.
3542 		 */
3543 		cpi->protocol = PROTO_SCSI;
3544 		if (mpt->is_fc) {
3545 			cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
3546 			cpi->base_transfer_speed = 100000;
3547 			cpi->hba_inquiry = PI_TAG_ABLE;
3548 			cpi->transport = XPORT_FC;
3549 			cpi->transport_version = 0;
3550 			cpi->protocol_version = SCSI_REV_SPC;
3551 		} else if (mpt->is_sas) {
3552 			cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
3553 			cpi->base_transfer_speed = 300000;
3554 			cpi->hba_inquiry = PI_TAG_ABLE;
3555 			cpi->transport = XPORT_SAS;
3556 			cpi->transport_version = 0;
3557 			cpi->protocol_version = SCSI_REV_SPC2;
3558 		} else {
3559 			cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
3560 			cpi->base_transfer_speed = 3300;
3561 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3562 			cpi->transport = XPORT_SPI;
3563 			cpi->transport_version = 2;
3564 			cpi->protocol_version = SCSI_REV_2;
3565 		}
3566 
3567 		/*
3568 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3569 		 * wide and restrict it to one lun.
3570 		 */
3571 		if (raid_passthru) {
3572 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3573 			cpi->initiator_id = cpi->max_target + 1;
3574 			cpi->max_lun = 0;
3575 		}
3576 
3577 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3578 			cpi->hba_misc |= PIM_NOINITIATOR;
3579 		}
3580 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3581 			cpi->target_sprt =
3582 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3583 		} else {
3584 			cpi->target_sprt = 0;
3585 		}
3586 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3587 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3588 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3589 		cpi->unit_number = cam_sim_unit(sim);
3590 		cpi->ccb_h.status = CAM_REQ_CMP;
3591 		break;
3592 	}
3593 	case XPT_EN_LUN:		/* Enable LUN as a target */
3594 	{
3595 		int result;
3596 
3597 		if (ccb->cel.enable)
3598 			result = mpt_enable_lun(mpt,
3599 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3600 		else
3601 			result = mpt_disable_lun(mpt,
3602 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3603 		if (result == 0) {
3604 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3605 		} else {
3606 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3607 		}
3608 		break;
3609 	}
3610 	case XPT_NOTIFY_ACKNOWLEDGE:	/* recycle notify ack */
3611 	case XPT_IMMEDIATE_NOTIFY:	/* Add Immediate Notify Resource */
3612 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3613 	{
3614 		tgt_resource_t *trtp;
3615 		lun_id_t lun = ccb->ccb_h.target_lun;
3616 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3617 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3618 
3619 		if (lun == CAM_LUN_WILDCARD) {
3620 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3621 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3622 				break;
3623 			}
3624 			trtp = &mpt->trt_wildcard;
3625 		} else if (lun >= MPT_MAX_LUNS) {
3626 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3627 			break;
3628 		} else {
3629 			trtp = &mpt->trt[lun];
3630 		}
3631 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3632 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3633 			    "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun);
3634 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3635 			    sim_links.stqe);
3636 		} else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
3637 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3638 			    "Put FREE INOT lun %jx\n", (uintmax_t)lun);
3639 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3640 			    sim_links.stqe);
3641 		} else {
3642 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3643 		}
3644 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3645 		return;
3646 	}
3647 	case XPT_CONT_TARGET_IO:
3648 		mpt_target_start_io(mpt, ccb);
3649 		return;
3650 
3651 	default:
3652 		ccb->ccb_h.status = CAM_REQ_INVALID;
3653 		break;
3654 	}
3655 	xpt_done(ccb);
3656 }
3657 
3658 static int
3659 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3660 {
3661 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3662 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3663 	target_id_t tgt;
3664 	uint32_t dval, pval, oval;
3665 	int rv;
3666 
3667 	if (IS_CURRENT_SETTINGS(cts) == 0) {
3668 		tgt = cts->ccb_h.target_id;
3669 	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3670 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3671 			return (-1);
3672 		}
3673 	} else {
3674 		tgt = cts->ccb_h.target_id;
3675 	}
3676 
3677 	/*
3678 	 * We aren't looking at Port Page 2 BIOS settings here-
3679 	 * sometimes these have been known to be bogus XXX.
3680 	 *
3681 	 * For user settings, we pick the max from port page 0
3682 	 *
3683 	 * For current settings we read the current settings out from
3684 	 * device page 0 for that target.
3685 	 */
3686 	if (IS_CURRENT_SETTINGS(cts)) {
3687 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3688 		dval = 0;
3689 
3690 		tmp = mpt->mpt_dev_page0[tgt];
3691 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3692 		    sizeof(tmp), FALSE, 5000);
3693 		if (rv) {
3694 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3695 			return (rv);
3696 		}
3697 		mpt2host_config_page_scsi_device_0(&tmp);
3698 
3699 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3700 		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3701 		    tmp.NegotiatedParameters, tmp.Information);
3702 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3703 		    DP_WIDE : DP_NARROW;
3704 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3705 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3706 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3707 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3708 		oval = tmp.NegotiatedParameters;
3709 		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3710 		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3711 		pval = tmp.NegotiatedParameters;
3712 		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3713 		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3714 		mpt->mpt_dev_page0[tgt] = tmp;
3715 	} else {
3716 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3717 		oval = mpt->mpt_port_page0.Capabilities;
3718 		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3719 		pval = mpt->mpt_port_page0.Capabilities;
3720 		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3721 	}
3722 
3723 	spi->valid = 0;
3724 	scsi->valid = 0;
3725 	spi->flags = 0;
3726 	scsi->flags = 0;
3727 	spi->sync_offset = oval;
3728 	spi->sync_period = pval;
3729 	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3730 	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3731 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3732 	if (dval & DP_WIDE) {
3733 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3734 	} else {
3735 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3736 	}
3737 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3738 		scsi->valid = CTS_SCSI_VALID_TQ;
3739 		if (dval & DP_TQING_ENABLE) {
3740 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3741 		}
3742 		spi->valid |= CTS_SPI_VALID_DISC;
3743 		if (dval & DP_DISC_ENABLE) {
3744 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3745 		}
3746 	}
3747 
3748 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3749 	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3750 	    IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
3751 	return (0);
3752 }
3753 
3754 static void
3755 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3756 {
3757 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3758 
3759 	ptr = &mpt->mpt_dev_page1[tgt];
3760 	if (onoff) {
3761 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3762 	} else {
3763 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3764 	}
3765 }
3766 
3767 static void
3768 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3769 {
3770 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3771 
3772 	ptr = &mpt->mpt_dev_page1[tgt];
3773 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3774 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3775 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3776 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3777 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3778 	if (period == 0) {
3779 		return;
3780 	}
3781 	ptr->RequestedParameters |=
3782 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3783 	ptr->RequestedParameters |=
3784 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3785 	if (period < 0xa) {
3786 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3787 	}
3788 	if (period < 0x9) {
3789 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3790 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3791 	}
3792 }
3793 
3794 static int
3795 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3796 {
3797 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3798 	int rv;
3799 
3800 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3801 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3802 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3803 	tmp = mpt->mpt_dev_page1[tgt];
3804 	host2mpt_config_page_scsi_device_1(&tmp);
3805 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3806 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3807 	if (rv) {
3808 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3809 		return (-1);
3810 	}
3811 	return (0);
3812 }
3813 
3814 /****************************** Timeout Recovery ******************************/
3815 static int
3816 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3817 {
3818 	int error;
3819 
3820 	error = kproc_create(mpt_recovery_thread, mpt,
3821 	    &mpt->recovery_thread, /*flags*/0,
3822 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3823 	return (error);
3824 }
3825 
3826 static void
3827 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3828 {
3829 
3830 	if (mpt->recovery_thread == NULL) {
3831 		return;
3832 	}
3833 	mpt->shutdwn_recovery = 1;
3834 	wakeup(mpt);
3835 	/*
3836 	 * Sleep on a slightly different location
3837 	 * for this interlock just for added safety.
3838 	 */
3839 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3840 }
3841 
3842 static void
3843 mpt_recovery_thread(void *arg)
3844 {
3845 	struct mpt_softc *mpt;
3846 
3847 	mpt = (struct mpt_softc *)arg;
3848 	MPT_LOCK(mpt);
3849 	for (;;) {
3850 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3851 			if (mpt->shutdwn_recovery == 0) {
3852 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3853 			}
3854 		}
3855 		if (mpt->shutdwn_recovery != 0) {
3856 			break;
3857 		}
3858 		mpt_recover_commands(mpt);
3859 	}
3860 	mpt->recovery_thread = NULL;
3861 	wakeup(&mpt->recovery_thread);
3862 	MPT_UNLOCK(mpt);
3863 	kproc_exit(0);
3864 }
3865 
3866 static int
3867 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3868     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3869 {
3870 	MSG_SCSI_TASK_MGMT *tmf_req;
3871 	int		    error;
3872 
3873 	/*
3874 	 * Wait for any current TMF request to complete.
3875 	 * We're only allowed to issue one TMF at a time.
3876 	 */
3877 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3878 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3879 	if (error != 0) {
3880 		mpt_reset(mpt, TRUE);
3881 		return (ETIMEDOUT);
3882 	}
3883 
3884 	mpt_assign_serno(mpt, mpt->tmf_req);
3885 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3886 
3887 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3888 	memset(tmf_req, 0, sizeof(*tmf_req));
3889 	tmf_req->TargetID = target;
3890 	tmf_req->Bus = channel;
3891 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3892 	tmf_req->TaskType = type;
3893 	tmf_req->MsgFlags = flags;
3894 	tmf_req->MsgContext =
3895 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3896 	if (lun > MPT_MAX_LUNS) {
3897 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3898 		tmf_req->LUN[1] = lun & 0xff;
3899 	} else {
3900 		tmf_req->LUN[1] = lun;
3901 	}
3902 	tmf_req->TaskMsgContext = abort_ctx;
3903 
3904 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3905 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3906 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3907 	if (mpt->verbose > MPT_PRT_DEBUG) {
3908 		mpt_print_request(tmf_req);
3909 	}
3910 
3911 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3912 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3913 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3914 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3915 	if (error != MPT_OK) {
3916 		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3917 		mpt->tmf_req->state = REQ_STATE_FREE;
3918 		mpt_reset(mpt, TRUE);
3919 	}
3920 	return (error);
3921 }
3922 
3923 /*
3924  * When a command times out, it is placed on the requeust_timeout_list
3925  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3926  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3927  * the timedout transactions.  The next TMF is issued either by the
3928  * completion handler of the current TMF waking our recovery thread,
3929  * or the TMF timeout handler causing a hard reset sequence.
3930  */
3931 static void
3932 mpt_recover_commands(struct mpt_softc *mpt)
3933 {
3934 	request_t	   *req;
3935 	union ccb	   *ccb;
3936 	int		    error;
3937 
3938 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3939 		/*
3940 		 * No work to do- leave.
3941 		 */
3942 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3943 		return;
3944 	}
3945 
3946 	/*
3947 	 * Flush any commands whose completion coincides with their timeout.
3948 	 */
3949 	mpt_intr(mpt);
3950 
3951 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3952 		/*
3953 		 * The timedout commands have already
3954 		 * completed.  This typically means
3955 		 * that either the timeout value was on
3956 		 * the hairy edge of what the device
3957 		 * requires or - more likely - interrupts
3958 		 * are not happening.
3959 		 */
3960 		mpt_prt(mpt, "Timedout requests already complete. "
3961 		    "Interrupts may not be functioning.\n");
3962 		mpt_enable_ints(mpt);
3963 		return;
3964 	}
3965 
3966 	/*
3967 	 * We have no visibility into the current state of the
3968 	 * controller, so attempt to abort the commands in the
3969 	 * order they timed-out. For initiator commands, we
3970 	 * depend on the reply handler pulling requests off
3971 	 * the timeout list.
3972 	 */
3973 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3974 		uint16_t status;
3975 		uint8_t response;
3976 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3977 
3978 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3979 		    req, req->serno, hdrp->Function);
3980 		ccb = req->ccb;
3981 		if (ccb == NULL) {
3982 			mpt_prt(mpt, "null ccb in timed out request. "
3983 			    "Resetting Controller.\n");
3984 			mpt_reset(mpt, TRUE);
3985 			continue;
3986 		}
3987 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3988 
3989 		/*
3990 		 * Check to see if this is not an initiator command and
3991 		 * deal with it differently if it is.
3992 		 */
3993 		switch (hdrp->Function) {
3994 		case MPI_FUNCTION_SCSI_IO_REQUEST:
3995 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
3996 			break;
3997 		default:
3998 			/*
3999 			 * XXX: FIX ME: need to abort target assists...
4000 			 */
4001 			mpt_prt(mpt, "just putting it back on the pend q\n");
4002 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4003 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4004 			    links);
4005 			continue;
4006 		}
4007 
4008 		error = mpt_scsi_send_tmf(mpt,
4009 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4010 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4011 		    htole32(req->index | scsi_io_handler_id), TRUE);
4012 
4013 		if (error != 0) {
4014 			/*
4015 			 * mpt_scsi_send_tmf hard resets on failure, so no
4016 			 * need to do so here.  Our queue should be emptied
4017 			 * by the hard reset.
4018 			 */
4019 			continue;
4020 		}
4021 
4022 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4023 		    REQ_STATE_DONE, TRUE, 500);
4024 
4025 		status = le16toh(mpt->tmf_req->IOCStatus);
4026 		response = mpt->tmf_req->ResponseCode;
4027 		mpt->tmf_req->state = REQ_STATE_FREE;
4028 
4029 		if (error != 0) {
4030 			/*
4031 			 * If we've errored out,, reset the controller.
4032 			 */
4033 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4034 			    "Resetting controller\n");
4035 			mpt_reset(mpt, TRUE);
4036 			continue;
4037 		}
4038 
4039 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4040 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4041 			    "Resetting controller.\n", status);
4042 			mpt_reset(mpt, TRUE);
4043 			continue;
4044 		}
4045 
4046 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4047 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4048 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4049 			    "Resetting controller.\n", response);
4050 			mpt_reset(mpt, TRUE);
4051 			continue;
4052 		}
4053 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4054 	}
4055 }
4056 
4057 /************************ Target Mode Support ****************************/
4058 static void
4059 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4060 {
4061 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4062 	PTR_SGE_TRANSACTION32 tep;
4063 	PTR_SGE_SIMPLE32 se;
4064 	bus_addr_t paddr;
4065 	uint32_t fl;
4066 
4067 	paddr = req->req_pbuf;
4068 	paddr += MPT_RQSL(mpt);
4069 
4070 	fc = req->req_vbuf;
4071 	memset(fc, 0, MPT_REQUEST_AREA);
4072 	fc->BufferCount = 1;
4073 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4074 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
4075 
4076 	/*
4077 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
4078 	 * consist of a TE SGL element (with details length of zero)
4079 	 * followed by a SIMPLE SGL element which holds the address
4080 	 * of the buffer.
4081 	 */
4082 
4083 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4084 
4085 	tep->ContextSize = 4;
4086 	tep->Flags = 0;
4087 	tep->TransactionContext[0] = htole32(ioindex);
4088 
4089 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4090 	fl =
4091 		MPI_SGE_FLAGS_HOST_TO_IOC	|
4092 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4093 		MPI_SGE_FLAGS_LAST_ELEMENT	|
4094 		MPI_SGE_FLAGS_END_OF_LIST	|
4095 		MPI_SGE_FLAGS_END_OF_BUFFER;
4096 	fl <<= MPI_SGE_FLAGS_SHIFT;
4097 	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4098 	se->FlagsLength = htole32(fl);
4099 	se->Address = htole32((uint32_t) paddr);
4100 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4101 	    "add ELS index %d ioindex %d for %p:%u\n",
4102 	    req->index, ioindex, req, req->serno);
4103 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4104 	    ("mpt_fc_post_els: request not locked"));
4105 	mpt_send_cmd(mpt, req);
4106 }
4107 
4108 static void
4109 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4110 {
4111 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4112 	PTR_CMD_BUFFER_DESCRIPTOR cb;
4113 	bus_addr_t paddr;
4114 
4115 	paddr = req->req_pbuf;
4116 	paddr += MPT_RQSL(mpt);
4117 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4118 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4119 
4120 	fc = req->req_vbuf;
4121 	fc->BufferCount = 1;
4122 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4123 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4124 
4125 	cb = &fc->Buffer[0];
4126 	cb->IoIndex = htole16(ioindex);
4127 	cb->u.PhysicalAddress32 = htole32((U32) paddr);
4128 
4129 	mpt_check_doorbell(mpt);
4130 	mpt_send_cmd(mpt, req);
4131 }
4132 
4133 static int
4134 mpt_add_els_buffers(struct mpt_softc *mpt)
4135 {
4136 	int i;
4137 
4138 	if (mpt->is_fc == 0) {
4139 		return (TRUE);
4140 	}
4141 
4142 	if (mpt->els_cmds_allocated) {
4143 		return (TRUE);
4144 	}
4145 
4146 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4147 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4148 
4149 	if (mpt->els_cmd_ptrs == NULL) {
4150 		return (FALSE);
4151 	}
4152 
4153 	/*
4154 	 * Feed the chip some ELS buffer resources
4155 	 */
4156 	for (i = 0; i < MPT_MAX_ELS; i++) {
4157 		request_t *req = mpt_get_request(mpt, FALSE);
4158 		if (req == NULL) {
4159 			break;
4160 		}
4161 		req->state |= REQ_STATE_LOCKED;
4162 		mpt->els_cmd_ptrs[i] = req;
4163 		mpt_fc_post_els(mpt, req, i);
4164 	}
4165 
4166 	if (i == 0) {
4167 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
4168 		free(mpt->els_cmd_ptrs, M_DEVBUF);
4169 		mpt->els_cmd_ptrs = NULL;
4170 		return (FALSE);
4171 	}
4172 	if (i != MPT_MAX_ELS) {
4173 		mpt_lprt(mpt, MPT_PRT_INFO,
4174 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4175 	}
4176 	mpt->els_cmds_allocated = i;
4177 	return(TRUE);
4178 }
4179 
4180 static int
4181 mpt_add_target_commands(struct mpt_softc *mpt)
4182 {
4183 	int i, max;
4184 
4185 	if (mpt->tgt_cmd_ptrs) {
4186 		return (TRUE);
4187 	}
4188 
4189 	max = MPT_MAX_REQUESTS(mpt) >> 1;
4190 	if (max > mpt->mpt_max_tgtcmds) {
4191 		max = mpt->mpt_max_tgtcmds;
4192 	}
4193 	mpt->tgt_cmd_ptrs =
4194 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4195 	if (mpt->tgt_cmd_ptrs == NULL) {
4196 		mpt_prt(mpt,
4197 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4198 		return (FALSE);
4199 	}
4200 
4201 	for (i = 0; i < max; i++) {
4202 		request_t *req;
4203 
4204 		req = mpt_get_request(mpt, FALSE);
4205 		if (req == NULL) {
4206 			break;
4207 		}
4208 		req->state |= REQ_STATE_LOCKED;
4209 		mpt->tgt_cmd_ptrs[i] = req;
4210 		mpt_post_target_command(mpt, req, i);
4211 	}
4212 
4213 
4214 	if (i == 0) {
4215 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4216 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4217 		mpt->tgt_cmd_ptrs = NULL;
4218 		return (FALSE);
4219 	}
4220 
4221 	mpt->tgt_cmds_allocated = i;
4222 
4223 	if (i < max) {
4224 		mpt_lprt(mpt, MPT_PRT_INFO,
4225 		    "added %d of %d target bufs\n", i, max);
4226 	}
4227 	return (i);
4228 }
4229 
4230 static int
4231 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4232 {
4233 
4234 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4235 		mpt->twildcard = 1;
4236 	} else if (lun >= MPT_MAX_LUNS) {
4237 		return (EINVAL);
4238 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4239 		return (EINVAL);
4240 	}
4241 	if (mpt->tenabled == 0) {
4242 		if (mpt->is_fc) {
4243 			(void) mpt_fc_reset_link(mpt, 0);
4244 		}
4245 		mpt->tenabled = 1;
4246 	}
4247 	if (lun == CAM_LUN_WILDCARD) {
4248 		mpt->trt_wildcard.enabled = 1;
4249 	} else {
4250 		mpt->trt[lun].enabled = 1;
4251 	}
4252 	return (0);
4253 }
4254 
4255 static int
4256 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4257 {
4258 	int i;
4259 
4260 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4261 		mpt->twildcard = 0;
4262 	} else if (lun >= MPT_MAX_LUNS) {
4263 		return (EINVAL);
4264 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4265 		return (EINVAL);
4266 	}
4267 	if (lun == CAM_LUN_WILDCARD) {
4268 		mpt->trt_wildcard.enabled = 0;
4269 	} else {
4270 		mpt->trt[lun].enabled = 0;
4271 	}
4272 	for (i = 0; i < MPT_MAX_LUNS; i++) {
4273 		if (mpt->trt[lun].enabled) {
4274 			break;
4275 		}
4276 	}
4277 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4278 		if (mpt->is_fc) {
4279 			(void) mpt_fc_reset_link(mpt, 0);
4280 		}
4281 		mpt->tenabled = 0;
4282 	}
4283 	return (0);
4284 }
4285 
4286 /*
4287  * Called with MPT lock held
4288  */
4289 static void
4290 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4291 {
4292 	struct ccb_scsiio *csio = &ccb->csio;
4293 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4294 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4295 
4296 	switch (tgt->state) {
4297 	case TGT_STATE_IN_CAM:
4298 		break;
4299 	case TGT_STATE_MOVING_DATA:
4300 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4301 		xpt_freeze_simq(mpt->sim, 1);
4302 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4303 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4304 		xpt_done(ccb);
4305 		return;
4306 	default:
4307 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4308 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4309 		mpt_tgt_dump_req_state(mpt, cmd_req);
4310 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4311 		xpt_done(ccb);
4312 		return;
4313 	}
4314 
4315 	if (csio->dxfer_len) {
4316 		bus_dmamap_callback_t *cb;
4317 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4318 		request_t *req;
4319 		int error;
4320 
4321 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4322 		    ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4323 
4324 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4325 			if (mpt->outofbeer == 0) {
4326 				mpt->outofbeer = 1;
4327 				xpt_freeze_simq(mpt->sim, 1);
4328 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4329 			}
4330 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4331 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4332 			xpt_done(ccb);
4333 			return;
4334 		}
4335 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4336 		if (sizeof (bus_addr_t) > 4) {
4337 			cb = mpt_execute_req_a64;
4338 		} else {
4339 			cb = mpt_execute_req;
4340 		}
4341 
4342 		req->ccb = ccb;
4343 		ccb->ccb_h.ccb_req_ptr = req;
4344 
4345 		/*
4346 		 * Record the currently active ccb and the
4347 		 * request for it in our target state area.
4348 		 */
4349 		tgt->ccb = ccb;
4350 		tgt->req = req;
4351 
4352 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4353 		ta = req->req_vbuf;
4354 
4355 		if (mpt->is_sas) {
4356 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4357 			     cmd_req->req_vbuf;
4358 			ta->QueueTag = ssp->InitiatorTag;
4359 		} else if (mpt->is_spi) {
4360 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4361 			     cmd_req->req_vbuf;
4362 			ta->QueueTag = sp->Tag;
4363 		}
4364 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4365 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4366 		ta->ReplyWord = htole32(tgt->reply_desc);
4367 		if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4368 			ta->LUN[0] =
4369 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4370 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4371 		} else {
4372 			ta->LUN[1] = csio->ccb_h.target_lun;
4373 		}
4374 
4375 		ta->RelativeOffset = tgt->bytes_xfered;
4376 		ta->DataLength = ccb->csio.dxfer_len;
4377 		if (ta->DataLength > tgt->resid) {
4378 			ta->DataLength = tgt->resid;
4379 		}
4380 
4381 		/*
4382 		 * XXX Should be done after data transfer completes?
4383 		 */
4384 		tgt->resid -= csio->dxfer_len;
4385 		tgt->bytes_xfered += csio->dxfer_len;
4386 
4387 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4388 			ta->TargetAssistFlags |=
4389 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4390 		}
4391 
4392 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4393 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4394 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4395 			ta->TargetAssistFlags |=
4396 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4397 		}
4398 #endif
4399 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4400 
4401 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4402 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4403 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4404 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4405 
4406 		error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
4407 		    cb, req, 0);
4408 		if (error == EINPROGRESS) {
4409 			xpt_freeze_simq(mpt->sim, 1);
4410 			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4411 		}
4412 	} else {
4413 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4414 
4415 		/*
4416 		 * XXX: I don't know why this seems to happen, but
4417 		 * XXX: completing the CCB seems to make things happy.
4418 		 * XXX: This seems to happen if the initiator requests
4419 		 * XXX: enough data that we have to do multiple CTIOs.
4420 		 */
4421 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4422 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4423 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4424 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4425 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4426 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4427 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4428 			xpt_done(ccb);
4429 			return;
4430 		}
4431 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4432 			sp = sense;
4433 			memcpy(sp, &csio->sense_data,
4434 			   min(csio->sense_len, MPT_SENSE_SIZE));
4435 		}
4436 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4437 	}
4438 }
4439 
4440 static void
4441 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4442     uint32_t lun, int send, uint8_t *data, size_t length)
4443 {
4444 	mpt_tgt_state_t *tgt;
4445 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4446 	SGE_SIMPLE32 *se;
4447 	uint32_t flags;
4448 	uint8_t *dptr;
4449 	bus_addr_t pptr;
4450 	request_t *req;
4451 
4452 	/*
4453 	 * We enter with resid set to the data load for the command.
4454 	 */
4455 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4456 	if (length == 0 || tgt->resid == 0) {
4457 		tgt->resid = 0;
4458 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4459 		return;
4460 	}
4461 
4462 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4463 		mpt_prt(mpt, "out of resources- dropping local response\n");
4464 		return;
4465 	}
4466 	tgt->is_local = 1;
4467 
4468 
4469 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4470 	ta = req->req_vbuf;
4471 
4472 	if (mpt->is_sas) {
4473 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4474 		ta->QueueTag = ssp->InitiatorTag;
4475 	} else if (mpt->is_spi) {
4476 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4477 		ta->QueueTag = sp->Tag;
4478 	}
4479 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4480 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4481 	ta->ReplyWord = htole32(tgt->reply_desc);
4482 	if (lun > MPT_MAX_LUNS) {
4483 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4484 		ta->LUN[1] = lun & 0xff;
4485 	} else {
4486 		ta->LUN[1] = lun;
4487 	}
4488 	ta->RelativeOffset = 0;
4489 	ta->DataLength = length;
4490 
4491 	dptr = req->req_vbuf;
4492 	dptr += MPT_RQSL(mpt);
4493 	pptr = req->req_pbuf;
4494 	pptr += MPT_RQSL(mpt);
4495 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4496 
4497 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4498 	memset(se, 0,sizeof (*se));
4499 
4500 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4501 	if (send) {
4502 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4503 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4504 	}
4505 	se->Address = pptr;
4506 	MPI_pSGE_SET_LENGTH(se, length);
4507 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4508 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4509 	MPI_pSGE_SET_FLAGS(se, flags);
4510 
4511 	tgt->ccb = NULL;
4512 	tgt->req = req;
4513 	tgt->resid -= length;
4514 	tgt->bytes_xfered = length;
4515 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4516 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4517 #else
4518 	tgt->state = TGT_STATE_MOVING_DATA;
4519 #endif
4520 	mpt_send_cmd(mpt, req);
4521 }
4522 
4523 /*
4524  * Abort queued up CCBs
4525  */
4526 static cam_status
4527 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4528 {
4529 	struct mpt_hdr_stailq *lp;
4530 	struct ccb_hdr *srch;
4531 	int found = 0;
4532 	union ccb *accb = ccb->cab.abort_ccb;
4533 	tgt_resource_t *trtp;
4534 
4535 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4536 
4537 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4538 		trtp = &mpt->trt_wildcard;
4539 	} else {
4540 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4541 	}
4542 
4543 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4544 		lp = &trtp->atios;
4545 	} else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
4546 		lp = &trtp->inots;
4547 	} else {
4548 		return (CAM_REQ_INVALID);
4549 	}
4550 
4551 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4552 		if (srch == &accb->ccb_h) {
4553 			found = 1;
4554 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4555 			break;
4556 		}
4557 	}
4558 	if (found) {
4559 		accb->ccb_h.status = CAM_REQ_ABORTED;
4560 		xpt_done(accb);
4561 		return (CAM_REQ_CMP);
4562 	}
4563 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4564 	return (CAM_PATH_INVALID);
4565 }
4566 
4567 /*
4568  * Ask the MPT to abort the current target command
4569  */
4570 static int
4571 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4572 {
4573 	int error;
4574 	request_t *req;
4575 	PTR_MSG_TARGET_MODE_ABORT abtp;
4576 
4577 	req = mpt_get_request(mpt, FALSE);
4578 	if (req == NULL) {
4579 		return (-1);
4580 	}
4581 	abtp = req->req_vbuf;
4582 	memset(abtp, 0, sizeof (*abtp));
4583 
4584 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4585 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4586 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4587 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4588 	error = 0;
4589 	if (mpt->is_fc || mpt->is_sas) {
4590 		mpt_send_cmd(mpt, req);
4591 	} else {
4592 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4593 	}
4594 	return (error);
4595 }
4596 
4597 /*
4598  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4599  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4600  * FC929 to set bogus FC_RSP fields (nonzero residuals
4601  * but w/o RESID fields set). This causes QLogic initiators
4602  * to think maybe that a frame was lost.
4603  *
4604  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4605  * we use allocated requests to do TARGET_ASSIST and we
4606  * need to know when to release them.
4607  */
4608 
4609 static void
4610 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4611     uint8_t status, uint8_t const *sense_data)
4612 {
4613 	uint8_t *cmd_vbuf;
4614 	mpt_tgt_state_t *tgt;
4615 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4616 	request_t *req;
4617 	bus_addr_t paddr;
4618 	int resplen = 0;
4619 	uint32_t fl;
4620 
4621 	cmd_vbuf = cmd_req->req_vbuf;
4622 	cmd_vbuf += MPT_RQSL(mpt);
4623 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4624 
4625 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4626 		if (mpt->outofbeer == 0) {
4627 			mpt->outofbeer = 1;
4628 			xpt_freeze_simq(mpt->sim, 1);
4629 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4630 		}
4631 		if (ccb) {
4632 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4633 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4634 			xpt_done(ccb);
4635 		} else {
4636 			mpt_prt(mpt,
4637 			    "could not allocate status request- dropping\n");
4638 		}
4639 		return;
4640 	}
4641 	req->ccb = ccb;
4642 	if (ccb) {
4643 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4644 		ccb->ccb_h.ccb_req_ptr = req;
4645 	}
4646 
4647 	/*
4648 	 * Record the currently active ccb, if any, and the
4649 	 * request for it in our target state area.
4650 	 */
4651 	tgt->ccb = ccb;
4652 	tgt->req = req;
4653 	tgt->state = TGT_STATE_SENDING_STATUS;
4654 
4655 	tp = req->req_vbuf;
4656 	paddr = req->req_pbuf;
4657 	paddr += MPT_RQSL(mpt);
4658 
4659 	memset(tp, 0, sizeof (*tp));
4660 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4661 	if (mpt->is_fc) {
4662 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4663 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4664 		uint8_t *sts_vbuf;
4665 		uint32_t *rsp;
4666 
4667 		sts_vbuf = req->req_vbuf;
4668 		sts_vbuf += MPT_RQSL(mpt);
4669 		rsp = (uint32_t *) sts_vbuf;
4670 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4671 
4672 		/*
4673 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4674 		 * It has to be big-endian in memory and is organized
4675 		 * in 32 bit words, which are much easier to deal with
4676 		 * as words which are swizzled as needed.
4677 		 *
4678 		 * All we're filling here is the FC_RSP payload.
4679 		 * We may just have the chip synthesize it if
4680 		 * we have no residual and an OK status.
4681 		 *
4682 		 */
4683 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4684 
4685 		rsp[2] = status;
4686 		if (tgt->resid) {
4687 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4688 			rsp[3] = htobe32(tgt->resid);
4689 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4690 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4691 #endif
4692 		}
4693 		if (status == SCSI_STATUS_CHECK_COND) {
4694 			int i;
4695 
4696 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4697 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4698 			if (sense_data) {
4699 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4700 			} else {
4701 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4702 				    "TION but no sense data?\n");
4703 				memset(&rsp, 0, MPT_SENSE_SIZE);
4704 			}
4705 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4706 				rsp[i] = htobe32(rsp[i]);
4707 			}
4708 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4709 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4710 #endif
4711 		}
4712 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4713 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4714 #endif
4715 		rsp[2] = htobe32(rsp[2]);
4716 	} else if (mpt->is_sas) {
4717 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4718 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4719 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4720 	} else {
4721 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4722 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4723 		tp->StatusCode = status;
4724 		tp->QueueTag = htole16(sp->Tag);
4725 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4726 	}
4727 
4728 	tp->ReplyWord = htole32(tgt->reply_desc);
4729 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4730 
4731 #ifdef	WE_CAN_USE_AUTO_REPOST
4732 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4733 #endif
4734 	if (status == SCSI_STATUS_OK && resplen == 0) {
4735 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4736 	} else {
4737 		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4738 		fl =
4739 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4740 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4741 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4742 			MPI_SGE_FLAGS_END_OF_LIST	|
4743 			MPI_SGE_FLAGS_END_OF_BUFFER;
4744 		fl <<= MPI_SGE_FLAGS_SHIFT;
4745 		fl |= resplen;
4746 		tp->StatusDataSGE.FlagsLength = htole32(fl);
4747 	}
4748 
4749 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4750 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4751 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4752 	    req->serno, tgt->resid);
4753 	if (ccb) {
4754 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4755 		mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb);
4756 	}
4757 	mpt_send_cmd(mpt, req);
4758 }
4759 
4760 static void
4761 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4762     tgt_resource_t *trtp, int init_id)
4763 {
4764 	struct ccb_immediate_notify *inot;
4765 	mpt_tgt_state_t *tgt;
4766 
4767 	tgt = MPT_TGT_STATE(mpt, req);
4768 	inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
4769 	if (inot == NULL) {
4770 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4771 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4772 		return;
4773 	}
4774 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4775 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4776 	    "Get FREE INOT %p lun %jx\n", inot,
4777 	    (uintmax_t)inot->ccb_h.target_lun);
4778 
4779 	inot->initiator_id = init_id;	/* XXX */
4780 	/*
4781 	 * This is a somewhat grotesque attempt to map from task management
4782 	 * to old style SCSI messages. God help us all.
4783 	 */
4784 	switch (fc) {
4785 	case MPT_ABORT_TASK_SET:
4786 		inot->arg = MSG_ABORT_TAG;
4787 		break;
4788 	case MPT_CLEAR_TASK_SET:
4789 		inot->arg = MSG_CLEAR_TASK_SET;
4790 		break;
4791 	case MPT_TARGET_RESET:
4792 		inot->arg = MSG_TARGET_RESET;
4793 		break;
4794 	case MPT_CLEAR_ACA:
4795 		inot->arg = MSG_CLEAR_ACA;
4796 		break;
4797 	case MPT_TERMINATE_TASK:
4798 		inot->arg = MSG_ABORT_TAG;
4799 		break;
4800 	default:
4801 		inot->arg = MSG_NOOP;
4802 		break;
4803 	}
4804 	/*
4805 	 * XXX KDM we need the sequence/tag number for the target of the
4806 	 * task management operation, especially if it is an abort.
4807 	 */
4808 	tgt->ccb = (union ccb *) inot;
4809 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4810 	xpt_done((union ccb *)inot);
4811 }
4812 
4813 static void
4814 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4815 {
4816 	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4817 	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4818 	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
4819 	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
4820 	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
4821 	     '0',  '0',  '0',  '1'
4822 	};
4823 	struct ccb_accept_tio *atiop;
4824 	lun_id_t lun;
4825 	int tag_action = 0;
4826 	mpt_tgt_state_t *tgt;
4827 	tgt_resource_t *trtp = NULL;
4828 	U8 *lunptr;
4829 	U8 *vbuf;
4830 	U16 itag;
4831 	U16 ioindex;
4832 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4833 	uint8_t *cdbp;
4834 
4835 	/*
4836 	 * Stash info for the current command where we can get at it later.
4837 	 */
4838 	vbuf = req->req_vbuf;
4839 	vbuf += MPT_RQSL(mpt);
4840 
4841 	/*
4842 	 * Get our state pointer set up.
4843 	 */
4844 	tgt = MPT_TGT_STATE(mpt, req);
4845 	if (tgt->state != TGT_STATE_LOADED) {
4846 		mpt_tgt_dump_req_state(mpt, req);
4847 		panic("bad target state in mpt_scsi_tgt_atio");
4848 	}
4849 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4850 	tgt->state = TGT_STATE_IN_CAM;
4851 	tgt->reply_desc = reply_desc;
4852 	ioindex = GET_IO_INDEX(reply_desc);
4853 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4854 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4855 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4856 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4857 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4858 	}
4859 	if (mpt->is_fc) {
4860 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4861 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4862 		if (fc->FcpCntl[2]) {
4863 			/*
4864 			 * Task Management Request
4865 			 */
4866 			switch (fc->FcpCntl[2]) {
4867 			case 0x2:
4868 				fct = MPT_ABORT_TASK_SET;
4869 				break;
4870 			case 0x4:
4871 				fct = MPT_CLEAR_TASK_SET;
4872 				break;
4873 			case 0x20:
4874 				fct = MPT_TARGET_RESET;
4875 				break;
4876 			case 0x40:
4877 				fct = MPT_CLEAR_ACA;
4878 				break;
4879 			case 0x80:
4880 				fct = MPT_TERMINATE_TASK;
4881 				break;
4882 			default:
4883 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4884 				    fc->FcpCntl[2]);
4885 				mpt_scsi_tgt_status(mpt, 0, req,
4886 				    SCSI_STATUS_OK, 0);
4887 				return;
4888 			}
4889 		} else {
4890 			switch (fc->FcpCntl[1]) {
4891 			case 0:
4892 				tag_action = MSG_SIMPLE_Q_TAG;
4893 				break;
4894 			case 1:
4895 				tag_action = MSG_HEAD_OF_Q_TAG;
4896 				break;
4897 			case 2:
4898 				tag_action = MSG_ORDERED_Q_TAG;
4899 				break;
4900 			default:
4901 				/*
4902 				 * Bah. Ignore Untagged Queing and ACA
4903 				 */
4904 				tag_action = MSG_SIMPLE_Q_TAG;
4905 				break;
4906 			}
4907 		}
4908 		tgt->resid = be32toh(fc->FcpDl);
4909 		cdbp = fc->FcpCdb;
4910 		lunptr = fc->FcpLun;
4911 		itag = be16toh(fc->OptionalOxid);
4912 	} else if (mpt->is_sas) {
4913 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4914 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4915 		cdbp = ssp->CDB;
4916 		lunptr = ssp->LogicalUnitNumber;
4917 		itag = ssp->InitiatorTag;
4918 	} else {
4919 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4920 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4921 		cdbp = sp->CDB;
4922 		lunptr = sp->LogicalUnitNumber;
4923 		itag = sp->Tag;
4924 	}
4925 
4926 	/*
4927 	 * Generate a simple lun
4928 	 */
4929 	switch (lunptr[0] & 0xc0) {
4930 	case 0x40:
4931 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4932 		break;
4933 	case 0:
4934 		lun = lunptr[1];
4935 		break;
4936 	default:
4937 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4938 		lun = 0xffff;
4939 		break;
4940 	}
4941 
4942 	/*
4943 	 * Deal with non-enabled or bad luns here.
4944 	 */
4945 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4946 	    mpt->trt[lun].enabled == 0) {
4947 		if (mpt->twildcard) {
4948 			trtp = &mpt->trt_wildcard;
4949 		} else if (fct == MPT_NIL_TMT_VALUE) {
4950 			/*
4951 			 * In this case, we haven't got an upstream listener
4952 			 * for either a specific lun or wildcard luns. We
4953 			 * have to make some sensible response. For regular
4954 			 * inquiry, just return some NOT HERE inquiry data.
4955 			 * For VPD inquiry, report illegal field in cdb.
4956 			 * For REQUEST SENSE, just return NO SENSE data.
4957 			 * REPORT LUNS gets illegal command.
4958 			 * All other commands get 'no such device'.
4959 			 */
4960 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
4961 			size_t len;
4962 
4963 			memset(buf, 0, MPT_SENSE_SIZE);
4964 			cond = SCSI_STATUS_CHECK_COND;
4965 			buf[0] = 0xf0;
4966 			buf[2] = 0x5;
4967 			buf[7] = 0x8;
4968 			sp = buf;
4969 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4970 
4971 			switch (cdbp[0]) {
4972 			case INQUIRY:
4973 			{
4974 				if (cdbp[1] != 0) {
4975 					buf[12] = 0x26;
4976 					buf[13] = 0x01;
4977 					break;
4978 				}
4979 				len = min(tgt->resid, cdbp[4]);
4980 				len = min(len, sizeof (null_iqd));
4981 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4982 				    "local inquiry %ld bytes\n", (long) len);
4983 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4984 				    null_iqd, len);
4985 				return;
4986 			}
4987 			case REQUEST_SENSE:
4988 			{
4989 				buf[2] = 0x0;
4990 				len = min(tgt->resid, cdbp[4]);
4991 				len = min(len, sizeof (buf));
4992 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4993 				    "local reqsense %ld bytes\n", (long) len);
4994 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4995 				    buf, len);
4996 				return;
4997 			}
4998 			case REPORT_LUNS:
4999 				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5000 				buf[12] = 0x26;
5001 				return;
5002 			default:
5003 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5004 				    "CMD 0x%x to unmanaged lun %jx\n",
5005 				    cdbp[0], (uintmax_t)lun);
5006 				buf[12] = 0x25;
5007 				break;
5008 			}
5009 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5010 			return;
5011 		}
5012 		/* otherwise, leave trtp NULL */
5013 	} else {
5014 		trtp = &mpt->trt[lun];
5015 	}
5016 
5017 	/*
5018 	 * Deal with any task management
5019 	 */
5020 	if (fct != MPT_NIL_TMT_VALUE) {
5021 		if (trtp == NULL) {
5022 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
5023 			    fct);
5024 			mpt_scsi_tgt_status(mpt, 0, req,
5025 			    SCSI_STATUS_OK, 0);
5026 		} else {
5027 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5028 			    GET_INITIATOR_INDEX(reply_desc));
5029 		}
5030 		return;
5031 	}
5032 
5033 
5034 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5035 	if (atiop == NULL) {
5036 		mpt_lprt(mpt, MPT_PRT_WARN,
5037 		    "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun,
5038 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
5039 		mpt_scsi_tgt_status(mpt, NULL, req,
5040 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5041 		    NULL);
5042 		return;
5043 	}
5044 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5045 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5046 	    "Get FREE ATIO %p lun %jx\n", atiop,
5047 	    (uintmax_t)atiop->ccb_h.target_lun);
5048 	atiop->ccb_h.ccb_mpt_ptr = mpt;
5049 	atiop->ccb_h.status = CAM_CDB_RECVD;
5050 	atiop->ccb_h.target_lun = lun;
5051 	atiop->sense_len = 0;
5052 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5053 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5054 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5055 
5056 	/*
5057 	 * The tag we construct here allows us to find the
5058 	 * original request that the command came in with.
5059 	 *
5060 	 * This way we don't have to depend on anything but the
5061 	 * tag to find things when CCBs show back up from CAM.
5062 	 */
5063 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5064 	tgt->tag_id = atiop->tag_id;
5065 	if (tag_action) {
5066 		atiop->tag_action = tag_action;
5067 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
5068 	}
5069 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5070 		int i;
5071 		mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop,
5072 		    (uintmax_t)atiop->ccb_h.target_lun);
5073 		for (i = 0; i < atiop->cdb_len; i++) {
5074 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5075 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
5076 		}
5077 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5078 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5079 	}
5080 
5081 	xpt_done((union ccb *)atiop);
5082 }
5083 
5084 static void
5085 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5086 {
5087 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5088 
5089 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5090 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5091 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5092 	    tgt->tag_id, tgt->state);
5093 }
5094 
5095 static void
5096 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5097 {
5098 
5099 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5100 	    req->index, req->index, req->state);
5101 	mpt_tgt_dump_tgt_state(mpt, req);
5102 }
5103 
5104 static int
5105 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5106     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5107 {
5108 	int dbg;
5109 	union ccb *ccb;
5110 	U16 status;
5111 
5112 	if (reply_frame == NULL) {
5113 		/*
5114 		 * Figure out what the state of the command is.
5115 		 */
5116 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5117 
5118 #ifdef	INVARIANTS
5119 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5120 		if (tgt->req) {
5121 			mpt_req_not_spcl(mpt, tgt->req,
5122 			    "turbo scsi_tgt_reply associated req", __LINE__);
5123 		}
5124 #endif
5125 		switch(tgt->state) {
5126 		case TGT_STATE_LOADED:
5127 			/*
5128 			 * This is a new command starting.
5129 			 */
5130 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5131 			break;
5132 		case TGT_STATE_MOVING_DATA:
5133 		{
5134 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5135 
5136 			ccb = tgt->ccb;
5137 			if (tgt->req == NULL) {
5138 				panic("mpt: turbo target reply with null "
5139 				    "associated request moving data");
5140 				/* NOTREACHED */
5141 			}
5142 			if (ccb == NULL) {
5143 				if (tgt->is_local == 0) {
5144 					panic("mpt: turbo target reply with "
5145 					    "null associated ccb moving data");
5146 					/* NOTREACHED */
5147 				}
5148 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5149 				    "TARGET_ASSIST local done\n");
5150 				TAILQ_REMOVE(&mpt->request_pending_list,
5151 				    tgt->req, links);
5152 				mpt_free_request(mpt, tgt->req);
5153 				tgt->req = NULL;
5154 				mpt_scsi_tgt_status(mpt, NULL, req,
5155 				    0, NULL);
5156 				return (TRUE);
5157 			}
5158 			tgt->ccb = NULL;
5159 			tgt->nxfers++;
5160 			mpt_req_untimeout(req, mpt_timeout, ccb);
5161 			mpt_lprt(mpt, MPT_PRT_DEBUG,
5162 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5163 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5164 			/*
5165 			 * Free the Target Assist Request
5166 			 */
5167 			KASSERT(tgt->req->ccb == ccb,
5168 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5169 			    tgt->req->serno, tgt->req->ccb));
5170 			TAILQ_REMOVE(&mpt->request_pending_list,
5171 			    tgt->req, links);
5172 			mpt_free_request(mpt, tgt->req);
5173 			tgt->req = NULL;
5174 
5175 			/*
5176 			 * Do we need to send status now? That is, are
5177 			 * we done with all our data transfers?
5178 			 */
5179 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5180 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5181 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5182 				KASSERT(ccb->ccb_h.status,
5183 				    ("zero ccb sts at %d", __LINE__));
5184 				tgt->state = TGT_STATE_IN_CAM;
5185 				if (mpt->outofbeer) {
5186 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5187 					mpt->outofbeer = 0;
5188 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5189 				}
5190 				xpt_done(ccb);
5191 				break;
5192 			}
5193 			/*
5194 			 * Otherwise, send status (and sense)
5195 			 */
5196 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5197 				sp = sense;
5198 				memcpy(sp, &ccb->csio.sense_data,
5199 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5200 			}
5201 			mpt_scsi_tgt_status(mpt, ccb, req,
5202 			    ccb->csio.scsi_status, sp);
5203 			break;
5204 		}
5205 		case TGT_STATE_SENDING_STATUS:
5206 		case TGT_STATE_MOVING_DATA_AND_STATUS:
5207 		{
5208 			int ioindex;
5209 			ccb = tgt->ccb;
5210 
5211 			if (tgt->req == NULL) {
5212 				panic("mpt: turbo target reply with null "
5213 				    "associated request sending status");
5214 				/* NOTREACHED */
5215 			}
5216 
5217 			if (ccb) {
5218 				tgt->ccb = NULL;
5219 				if (tgt->state ==
5220 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5221 					tgt->nxfers++;
5222 				}
5223 				mpt_req_untimeout(req, mpt_timeout, ccb);
5224 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5225 					ccb->ccb_h.status |= CAM_SENT_SENSE;
5226 				}
5227 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5228 				    "TARGET_STATUS tag %x sts %x flgs %x req "
5229 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5230 				    ccb->ccb_h.flags, tgt->req);
5231 				/*
5232 				 * Free the Target Send Status Request
5233 				 */
5234 				KASSERT(tgt->req->ccb == ccb,
5235 				    ("tgt->req %p:%u tgt->req->ccb %p",
5236 				    tgt->req, tgt->req->serno, tgt->req->ccb));
5237 				/*
5238 				 * Notify CAM that we're done
5239 				 */
5240 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5241 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5242 				KASSERT(ccb->ccb_h.status,
5243 				    ("ZERO ccb sts at %d", __LINE__));
5244 				tgt->ccb = NULL;
5245 			} else {
5246 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5247 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
5248 				    tgt->req, tgt->req->serno);
5249 			}
5250 			TAILQ_REMOVE(&mpt->request_pending_list,
5251 			    tgt->req, links);
5252 			mpt_free_request(mpt, tgt->req);
5253 			tgt->req = NULL;
5254 
5255 			/*
5256 			 * And re-post the Command Buffer.
5257 			 * This will reset the state.
5258 			 */
5259 			ioindex = GET_IO_INDEX(reply_desc);
5260 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5261 			tgt->is_local = 0;
5262 			mpt_post_target_command(mpt, req, ioindex);
5263 
5264 			/*
5265 			 * And post a done for anyone who cares
5266 			 */
5267 			if (ccb) {
5268 				if (mpt->outofbeer) {
5269 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5270 					mpt->outofbeer = 0;
5271 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5272 				}
5273 				xpt_done(ccb);
5274 			}
5275 			break;
5276 		}
5277 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5278 			tgt->state = TGT_STATE_LOADED;
5279 			break;
5280 		default:
5281 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5282 			    "Reply Function\n", tgt->state);
5283 		}
5284 		return (TRUE);
5285 	}
5286 
5287 	status = le16toh(reply_frame->IOCStatus);
5288 	if (status != MPI_IOCSTATUS_SUCCESS) {
5289 		dbg = MPT_PRT_ERROR;
5290 	} else {
5291 		dbg = MPT_PRT_DEBUG1;
5292 	}
5293 
5294 	mpt_lprt(mpt, dbg,
5295 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5296 	     req, req->serno, reply_frame, reply_frame->Function, status);
5297 
5298 	switch (reply_frame->Function) {
5299 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5300 	{
5301 		mpt_tgt_state_t *tgt;
5302 #ifdef	INVARIANTS
5303 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5304 #endif
5305 		if (status != MPI_IOCSTATUS_SUCCESS) {
5306 			/*
5307 			 * XXX What to do?
5308 			 */
5309 			break;
5310 		}
5311 		tgt = MPT_TGT_STATE(mpt, req);
5312 		KASSERT(tgt->state == TGT_STATE_LOADING,
5313 		    ("bad state 0x%x on reply to buffer post", tgt->state));
5314 		mpt_assign_serno(mpt, req);
5315 		tgt->state = TGT_STATE_LOADED;
5316 		break;
5317 	}
5318 	case MPI_FUNCTION_TARGET_ASSIST:
5319 #ifdef	INVARIANTS
5320 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5321 #endif
5322 		mpt_prt(mpt, "target assist completion\n");
5323 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5324 		mpt_free_request(mpt, req);
5325 		break;
5326 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5327 #ifdef	INVARIANTS
5328 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5329 #endif
5330 		mpt_prt(mpt, "status send completion\n");
5331 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5332 		mpt_free_request(mpt, req);
5333 		break;
5334 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5335 	{
5336 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5337 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5338 		PTR_MSG_TARGET_MODE_ABORT abtp =
5339 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5340 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5341 #ifdef	INVARIANTS
5342 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5343 #endif
5344 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5345 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5346 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5347 		mpt_free_request(mpt, req);
5348 		break;
5349 	}
5350 	default:
5351 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5352 		    "0x%x\n", reply_frame->Function);
5353 		break;
5354 	}
5355 	return (TRUE);
5356 }
5357