xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision ce6a89e27cd190313be39bb479880aeda4778436)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-3-Clause
6  *
7  * Copyright (c)  2000, 2001 by Greg Ansley
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice immediately at the beginning of the file, without modification,
14  *    this list of conditions, and the following disclaimer.
15  * 2. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 /*-
31  * Copyright (c) 2002, 2006 by Matthew Jacob
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions are
36  * met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
40  *    substantially similar to the "NO WARRANTY" disclaimer below
41  *    ("Disclaimer") and any redistribution must be conditioned upon including
42  *    a substantially similar Disclaimer requirement for further binary
43  *    redistribution.
44  * 3. Neither the names of the above listed copyright holders nor the names
45  *    of any contributors may be used to endorse or promote products derived
46  *    from this software without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
49  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
52  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
53  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
54  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
55  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
56  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
57  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
58  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  *
60  * Support from Chris Ellsworth in order to make SAS adapters work
61  * is gratefully acknowledged.
62  *
63  * Support from LSI-Logic has also gone a great deal toward making this a
64  * workable subsystem and is gratefully acknowledged.
65  */
66 /*-
67  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
68  * Copyright (c) 2005, WHEEL Sp. z o.o.
69  * Copyright (c) 2004, 2005 Justin T. Gibbs
70  * All rights reserved.
71  *
72  * Redistribution and use in source and binary forms, with or without
73  * modification, are permitted provided that the following conditions are
74  * met:
75  * 1. Redistributions of source code must retain the above copyright
76  *    notice, this list of conditions and the following disclaimer.
77  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
78  *    substantially similar to the "NO WARRANTY" disclaimer below
79  *    ("Disclaimer") and any redistribution must be conditioned upon including
80  *    a substantially similar Disclaimer requirement for further binary
81  *    redistribution.
82  * 3. Neither the names of the above listed copyright holders nor the names
83  *    of any contributors may be used to endorse or promote products derived
84  *    from this software without specific prior written permission.
85  *
86  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
87  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
90  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
91  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
92  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
93  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
94  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
95  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
96  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97  */
98 #include <sys/cdefs.h>
99 __FBSDID("$FreeBSD$");
100 
101 #include <dev/mpt/mpt.h>
102 #include <dev/mpt/mpt_cam.h>
103 #include <dev/mpt/mpt_raid.h>
104 
105 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
106 #include "dev/mpt/mpilib/mpi_init.h"
107 #include "dev/mpt/mpilib/mpi_targ.h"
108 #include "dev/mpt/mpilib/mpi_fc.h"
109 #include "dev/mpt/mpilib/mpi_sas.h"
110 
111 #include <sys/callout.h>
112 #include <sys/kthread.h>
113 #include <sys/sysctl.h>
114 
115 static void mpt_poll(struct cam_sim *);
116 static callout_func_t mpt_timeout;
117 static void mpt_action(struct cam_sim *, union ccb *);
118 static int
119 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
120 static void mpt_setwidth(struct mpt_softc *, int, int);
121 static void mpt_setsync(struct mpt_softc *, int, int, int);
122 static int mpt_update_spi_config(struct mpt_softc *, int);
123 
124 static mpt_reply_handler_t mpt_scsi_reply_handler;
125 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
126 static mpt_reply_handler_t mpt_fc_els_reply_handler;
127 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
128 					MSG_DEFAULT_REPLY *);
129 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
130 static int mpt_fc_reset_link(struct mpt_softc *, int);
131 
132 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
133 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
134 static void mpt_recovery_thread(void *arg);
135 static void mpt_recover_commands(struct mpt_softc *mpt);
136 
137 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
138     target_id_t, lun_id_t, u_int, int);
139 
140 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
141 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
142 static int mpt_add_els_buffers(struct mpt_softc *mpt);
143 static int mpt_add_target_commands(struct mpt_softc *mpt);
144 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
145 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
146 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
147 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
148 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
149 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
150     uint8_t, uint8_t const *, u_int);
151 static void
152 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
153     tgt_resource_t *, int);
154 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
155 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
156 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
157 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
158 
159 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
160 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
161 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
162 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
163 
164 static mpt_probe_handler_t	mpt_cam_probe;
165 static mpt_attach_handler_t	mpt_cam_attach;
166 static mpt_enable_handler_t	mpt_cam_enable;
167 static mpt_ready_handler_t	mpt_cam_ready;
168 static mpt_event_handler_t	mpt_cam_event;
169 static mpt_reset_handler_t	mpt_cam_ioc_reset;
170 static mpt_detach_handler_t	mpt_cam_detach;
171 
172 static struct mpt_personality mpt_cam_personality =
173 {
174 	.name		= "mpt_cam",
175 	.probe		= mpt_cam_probe,
176 	.attach		= mpt_cam_attach,
177 	.enable		= mpt_cam_enable,
178 	.ready		= mpt_cam_ready,
179 	.event		= mpt_cam_event,
180 	.reset		= mpt_cam_ioc_reset,
181 	.detach		= mpt_cam_detach,
182 };
183 
184 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
185 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
186 
187 int mpt_enable_sata_wc = -1;
188 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
189 
190 static int
191 mpt_cam_probe(struct mpt_softc *mpt)
192 {
193 	int role;
194 
195 	/*
196 	 * Only attach to nodes that support the initiator or target role
197 	 * (or want to) or have RAID physical devices that need CAM pass-thru
198 	 * support.
199 	 */
200 	if (mpt->do_cfg_role) {
201 		role = mpt->cfg_role;
202 	} else {
203 		role = mpt->role;
204 	}
205 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
206 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
207 		return (0);
208 	}
209 	return (ENODEV);
210 }
211 
212 static int
213 mpt_cam_attach(struct mpt_softc *mpt)
214 {
215 	struct cam_devq *devq;
216 	mpt_handler_t	 handler;
217 	int		 maxq;
218 	int		 error;
219 
220 	MPT_LOCK(mpt);
221 	TAILQ_INIT(&mpt->request_timeout_list);
222 	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
223 	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
224 
225 	handler.reply_handler = mpt_scsi_reply_handler;
226 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
227 				     &scsi_io_handler_id);
228 	if (error != 0) {
229 		MPT_UNLOCK(mpt);
230 		goto cleanup;
231 	}
232 
233 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
234 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
235 				     &scsi_tmf_handler_id);
236 	if (error != 0) {
237 		MPT_UNLOCK(mpt);
238 		goto cleanup;
239 	}
240 
241 	/*
242 	 * If we're fibre channel and could support target mode, we register
243 	 * an ELS reply handler and give it resources.
244 	 */
245 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
246 		handler.reply_handler = mpt_fc_els_reply_handler;
247 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
248 		    &fc_els_handler_id);
249 		if (error != 0) {
250 			MPT_UNLOCK(mpt);
251 			goto cleanup;
252 		}
253 		if (mpt_add_els_buffers(mpt) == FALSE) {
254 			error = ENOMEM;
255 			MPT_UNLOCK(mpt);
256 			goto cleanup;
257 		}
258 		maxq -= mpt->els_cmds_allocated;
259 	}
260 
261 	/*
262 	 * If we support target mode, we register a reply handler for it,
263 	 * but don't add command resources until we actually enable target
264 	 * mode.
265 	 */
266 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
267 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
268 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
269 		    &mpt->scsi_tgt_handler_id);
270 		if (error != 0) {
271 			MPT_UNLOCK(mpt);
272 			goto cleanup;
273 		}
274 	}
275 
276 	if (mpt->is_sas) {
277 		handler.reply_handler = mpt_sata_pass_reply_handler;
278 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
279 		    &sata_pass_handler_id);
280 		if (error != 0) {
281 			MPT_UNLOCK(mpt);
282 			goto cleanup;
283 		}
284 	}
285 
286 	/*
287 	 * We keep one request reserved for timeout TMF requests.
288 	 */
289 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
290 	if (mpt->tmf_req == NULL) {
291 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
292 		error = ENOMEM;
293 		MPT_UNLOCK(mpt);
294 		goto cleanup;
295 	}
296 
297 	/*
298 	 * Mark the request as free even though not on the free list.
299 	 * There is only one TMF request allowed to be outstanding at
300 	 * a time and the TMF routines perform their own allocation
301 	 * tracking using the standard state flags.
302 	 */
303 	mpt->tmf_req->state = REQ_STATE_FREE;
304 	maxq--;
305 
306 	/*
307 	 * The rest of this is CAM foo, for which we need to drop our lock
308 	 */
309 	MPT_UNLOCK(mpt);
310 
311 	if (mpt_spawn_recovery_thread(mpt) != 0) {
312 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
313 		error = ENOMEM;
314 		goto cleanup;
315 	}
316 
317 	/*
318 	 * Create the device queue for our SIM(s).
319 	 */
320 	devq = cam_simq_alloc(maxq);
321 	if (devq == NULL) {
322 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
323 		error = ENOMEM;
324 		goto cleanup;
325 	}
326 
327 	/*
328 	 * Construct our SIM entry.
329 	 */
330 	mpt->sim =
331 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
332 	if (mpt->sim == NULL) {
333 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
334 		cam_simq_free(devq);
335 		error = ENOMEM;
336 		goto cleanup;
337 	}
338 
339 	/*
340 	 * Register exactly this bus.
341 	 */
342 	MPT_LOCK(mpt);
343 	if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
344 		mpt_prt(mpt, "Bus registration Failed!\n");
345 		error = ENOMEM;
346 		MPT_UNLOCK(mpt);
347 		goto cleanup;
348 	}
349 
350 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
351 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
352 		mpt_prt(mpt, "Unable to allocate Path!\n");
353 		error = ENOMEM;
354 		MPT_UNLOCK(mpt);
355 		goto cleanup;
356 	}
357 	MPT_UNLOCK(mpt);
358 
359 	/*
360 	 * Only register a second bus for RAID physical
361 	 * devices if the controller supports RAID.
362 	 */
363 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
364 		return (0);
365 	}
366 
367 	/*
368 	 * Create a "bus" to export all hidden disks to CAM.
369 	 */
370 	mpt->phydisk_sim =
371 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
372 	if (mpt->phydisk_sim == NULL) {
373 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
374 		error = ENOMEM;
375 		goto cleanup;
376 	}
377 
378 	/*
379 	 * Register this bus.
380 	 */
381 	MPT_LOCK(mpt);
382 	if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
383 	    CAM_SUCCESS) {
384 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
385 		error = ENOMEM;
386 		MPT_UNLOCK(mpt);
387 		goto cleanup;
388 	}
389 
390 	if (xpt_create_path(&mpt->phydisk_path, NULL,
391 	    cam_sim_path(mpt->phydisk_sim),
392 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
393 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
394 		error = ENOMEM;
395 		MPT_UNLOCK(mpt);
396 		goto cleanup;
397 	}
398 	MPT_UNLOCK(mpt);
399 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
400 	return (0);
401 
402 cleanup:
403 	mpt_cam_detach(mpt);
404 	return (error);
405 }
406 
407 /*
408  * Read FC configuration information
409  */
410 static int
411 mpt_read_config_info_fc(struct mpt_softc *mpt)
412 {
413 	struct sysctl_ctx_list *ctx;
414 	struct sysctl_oid *tree;
415 	char *topology = NULL;
416 	int rv;
417 
418 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
419 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
420 	if (rv) {
421 		return (-1);
422 	}
423 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
424 		 mpt->mpt_fcport_page0.Header.PageVersion,
425 		 mpt->mpt_fcport_page0.Header.PageLength,
426 		 mpt->mpt_fcport_page0.Header.PageNumber,
427 		 mpt->mpt_fcport_page0.Header.PageType);
428 
429 
430 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
431 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
432 	if (rv) {
433 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
434 		return (-1);
435 	}
436 	mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
437 
438 	switch (mpt->mpt_fcport_page0.CurrentSpeed) {
439 	case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT:
440 		mpt->mpt_fcport_speed = 1;
441 		break;
442 	case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT:
443 		mpt->mpt_fcport_speed = 2;
444 		break;
445 	case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT:
446 		mpt->mpt_fcport_speed = 10;
447 		break;
448 	case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT:
449 		mpt->mpt_fcport_speed = 4;
450 		break;
451 	default:
452 		mpt->mpt_fcport_speed = 0;
453 		break;
454 	}
455 
456 	switch (mpt->mpt_fcport_page0.Flags &
457 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
458 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
459 		mpt->mpt_fcport_speed = 0;
460 		topology = "<NO LOOP>";
461 		break;
462 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
463 		topology = "N-Port";
464 		break;
465 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
466 		topology = "NL-Port";
467 		break;
468 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
469 		topology = "F-Port";
470 		break;
471 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
472 		topology = "FL-Port";
473 		break;
474 	default:
475 		mpt->mpt_fcport_speed = 0;
476 		topology = "?";
477 		break;
478 	}
479 
480 	mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32)
481 	    | mpt->mpt_fcport_page0.WWNN.Low;
482 	mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32)
483 	    | mpt->mpt_fcport_page0.WWPN.Low;
484 	mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier;
485 
486 	mpt_lprt(mpt, MPT_PRT_INFO,
487 	    "FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx "
488 	    "Speed %u-Gbit\n", topology,
489 	    (uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn,
490 	    mpt->mpt_fcport_speed);
491 	MPT_UNLOCK(mpt);
492 	ctx = device_get_sysctl_ctx(mpt->dev);
493 	tree = device_get_sysctl_tree(mpt->dev);
494 
495 	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
496 	    "wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn,
497 	    "World Wide Node Name");
498 
499 	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
500 	     "wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn,
501 	     "World Wide Port Name");
502 
503 	MPT_LOCK(mpt);
504 	return (0);
505 }
506 
507 /*
508  * Set FC configuration information.
509  */
510 static int
511 mpt_set_initial_config_fc(struct mpt_softc *mpt)
512 {
513 	CONFIG_PAGE_FC_PORT_1 fc;
514 	U32 fl;
515 	int r, doit = 0;
516 	int role;
517 
518 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
519 	    &fc.Header, FALSE, 5000);
520 	if (r) {
521 		mpt_prt(mpt, "failed to read FC page 1 header\n");
522 		return (mpt_fc_reset_link(mpt, 1));
523 	}
524 
525 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
526 	    &fc.Header, sizeof (fc), FALSE, 5000);
527 	if (r) {
528 		mpt_prt(mpt, "failed to read FC page 1\n");
529 		return (mpt_fc_reset_link(mpt, 1));
530 	}
531 	mpt2host_config_page_fc_port_1(&fc);
532 
533 	/*
534 	 * Check our flags to make sure we support the role we want.
535 	 */
536 	doit = 0;
537 	role = 0;
538 	fl = fc.Flags;
539 
540 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
541 		role |= MPT_ROLE_INITIATOR;
542 	}
543 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
544 		role |= MPT_ROLE_TARGET;
545 	}
546 
547 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
548 
549 	if (mpt->do_cfg_role == 0) {
550 		role = mpt->cfg_role;
551 	} else {
552 		mpt->do_cfg_role = 0;
553 	}
554 
555 	if (role != mpt->cfg_role) {
556 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
557 			if ((role & MPT_ROLE_INITIATOR) == 0) {
558 				mpt_prt(mpt, "adding initiator role\n");
559 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
560 				doit++;
561 			} else {
562 				mpt_prt(mpt, "keeping initiator role\n");
563 			}
564 		} else if (role & MPT_ROLE_INITIATOR) {
565 			mpt_prt(mpt, "removing initiator role\n");
566 			doit++;
567 		}
568 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
569 			if ((role & MPT_ROLE_TARGET) == 0) {
570 				mpt_prt(mpt, "adding target role\n");
571 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
572 				doit++;
573 			} else {
574 				mpt_prt(mpt, "keeping target role\n");
575 			}
576 		} else if (role & MPT_ROLE_TARGET) {
577 			mpt_prt(mpt, "removing target role\n");
578 			doit++;
579 		}
580 		mpt->role = mpt->cfg_role;
581 	}
582 
583 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
584 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
585 			mpt_prt(mpt, "adding OXID option\n");
586 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
587 			doit++;
588 		}
589 	}
590 
591 	if (doit) {
592 		fc.Flags = fl;
593 		host2mpt_config_page_fc_port_1(&fc);
594 		r = mpt_write_cfg_page(mpt,
595 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
596 		    sizeof(fc), FALSE, 5000);
597 		if (r != 0) {
598 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
599 			return (0);
600 		}
601 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
602 		    "effect until next reboot or IOC reset\n");
603 	}
604 	return (0);
605 }
606 
607 static int
608 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
609 {
610 	ConfigExtendedPageHeader_t hdr;
611 	struct mptsas_phyinfo *phyinfo;
612 	SasIOUnitPage0_t *buffer;
613 	int error, len, i;
614 
615 	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
616 				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
617 				       &hdr, 0, 10000);
618 	if (error)
619 		goto out;
620 	if (hdr.ExtPageLength == 0) {
621 		error = ENXIO;
622 		goto out;
623 	}
624 
625 	len = hdr.ExtPageLength * 4;
626 	buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
627 	if (buffer == NULL) {
628 		error = ENOMEM;
629 		goto out;
630 	}
631 
632 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
633 				     0, &hdr, buffer, len, 0, 10000);
634 	if (error) {
635 		free(buffer, M_DEVBUF);
636 		goto out;
637 	}
638 
639 	portinfo->num_phys = buffer->NumPhys;
640 	portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
641 	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
642 	if (portinfo->phy_info == NULL) {
643 		free(buffer, M_DEVBUF);
644 		error = ENOMEM;
645 		goto out;
646 	}
647 
648 	for (i = 0; i < portinfo->num_phys; i++) {
649 		phyinfo = &portinfo->phy_info[i];
650 		phyinfo->phy_num = i;
651 		phyinfo->port_id = buffer->PhyData[i].Port;
652 		phyinfo->negotiated_link_rate =
653 		    buffer->PhyData[i].NegotiatedLinkRate;
654 		phyinfo->handle =
655 		    le16toh(buffer->PhyData[i].ControllerDevHandle);
656 	}
657 
658 	free(buffer, M_DEVBUF);
659 out:
660 	return (error);
661 }
662 
663 static int
664 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
665 	uint32_t form, uint32_t form_specific)
666 {
667 	ConfigExtendedPageHeader_t hdr;
668 	SasPhyPage0_t *buffer;
669 	int error;
670 
671 	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
672 				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
673 				       0, 10000);
674 	if (error)
675 		goto out;
676 	if (hdr.ExtPageLength == 0) {
677 		error = ENXIO;
678 		goto out;
679 	}
680 
681 	buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
682 	if (buffer == NULL) {
683 		error = ENOMEM;
684 		goto out;
685 	}
686 
687 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
688 				     form + form_specific, &hdr, buffer,
689 				     sizeof(SasPhyPage0_t), 0, 10000);
690 	if (error) {
691 		free(buffer, M_DEVBUF);
692 		goto out;
693 	}
694 
695 	phy_info->hw_link_rate = buffer->HwLinkRate;
696 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
697 	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
698 	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
699 
700 	free(buffer, M_DEVBUF);
701 out:
702 	return (error);
703 }
704 
705 static int
706 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
707 	uint32_t form, uint32_t form_specific)
708 {
709 	ConfigExtendedPageHeader_t hdr;
710 	SasDevicePage0_t *buffer;
711 	uint64_t sas_address;
712 	int error = 0;
713 
714 	bzero(device_info, sizeof(*device_info));
715 	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
716 				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
717 				       &hdr, 0, 10000);
718 	if (error)
719 		goto out;
720 	if (hdr.ExtPageLength == 0) {
721 		error = ENXIO;
722 		goto out;
723 	}
724 
725 	buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
726 	if (buffer == NULL) {
727 		error = ENOMEM;
728 		goto out;
729 	}
730 
731 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
732 				     form + form_specific, &hdr, buffer,
733 				     sizeof(SasDevicePage0_t), 0, 10000);
734 	if (error) {
735 		free(buffer, M_DEVBUF);
736 		goto out;
737 	}
738 
739 	device_info->dev_handle = le16toh(buffer->DevHandle);
740 	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
741 	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
742 	device_info->slot = le16toh(buffer->Slot);
743 	device_info->phy_num = buffer->PhyNum;
744 	device_info->physical_port = buffer->PhysicalPort;
745 	device_info->target_id = buffer->TargetID;
746 	device_info->bus = buffer->Bus;
747 	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
748 	device_info->sas_address = le64toh(sas_address);
749 	device_info->device_info = le32toh(buffer->DeviceInfo);
750 
751 	free(buffer, M_DEVBUF);
752 out:
753 	return (error);
754 }
755 
756 /*
757  * Read SAS configuration information. Nothing to do yet.
758  */
759 static int
760 mpt_read_config_info_sas(struct mpt_softc *mpt)
761 {
762 	struct mptsas_portinfo *portinfo;
763 	struct mptsas_phyinfo *phyinfo;
764 	int error, i;
765 
766 	portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
767 	if (portinfo == NULL)
768 		return (ENOMEM);
769 
770 	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
771 	if (error) {
772 		free(portinfo, M_DEVBUF);
773 		return (0);
774 	}
775 
776 	for (i = 0; i < portinfo->num_phys; i++) {
777 		phyinfo = &portinfo->phy_info[i];
778 		error = mptsas_sas_phy_pg0(mpt, phyinfo,
779 		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
780 		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
781 		if (error)
782 			break;
783 		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
784 		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
785 		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
786 		    phyinfo->handle);
787 		if (error)
788 			break;
789 		phyinfo->identify.phy_num = phyinfo->phy_num = i;
790 		if (phyinfo->attached.dev_handle)
791 			error = mptsas_sas_device_pg0(mpt,
792 			    &phyinfo->attached,
793 			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
794 			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
795 			    phyinfo->attached.dev_handle);
796 		if (error)
797 			break;
798 	}
799 	mpt->sas_portinfo = portinfo;
800 	return (0);
801 }
802 
803 static void
804 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
805 	int enabled)
806 {
807 	SataPassthroughRequest_t	*pass;
808 	request_t *req;
809 	int error, status;
810 
811 	req = mpt_get_request(mpt, 0);
812 	if (req == NULL)
813 		return;
814 
815 	pass = req->req_vbuf;
816 	bzero(pass, sizeof(SataPassthroughRequest_t));
817 	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
818 	pass->TargetID = devinfo->target_id;
819 	pass->Bus = devinfo->bus;
820 	pass->PassthroughFlags = 0;
821 	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
822 	pass->DataLength = 0;
823 	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
824 	pass->CommandFIS[0] = 0x27;
825 	pass->CommandFIS[1] = 0x80;
826 	pass->CommandFIS[2] = 0xef;
827 	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
828 	pass->CommandFIS[7] = 0x40;
829 	pass->CommandFIS[15] = 0x08;
830 
831 	mpt_check_doorbell(mpt);
832 	mpt_send_cmd(mpt, req);
833 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
834 			     10 * 1000);
835 	if (error) {
836 		mpt_free_request(mpt, req);
837 		printf("error %d sending passthrough\n", error);
838 		return;
839 	}
840 
841 	status = le16toh(req->IOCStatus);
842 	if (status != MPI_IOCSTATUS_SUCCESS) {
843 		mpt_free_request(mpt, req);
844 		printf("IOCSTATUS %d\n", status);
845 		return;
846 	}
847 
848 	mpt_free_request(mpt, req);
849 }
850 
851 /*
852  * Set SAS configuration information. Nothing to do yet.
853  */
854 static int
855 mpt_set_initial_config_sas(struct mpt_softc *mpt)
856 {
857 	struct mptsas_phyinfo *phyinfo;
858 	int i;
859 
860 	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
861 		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
862 			phyinfo = &mpt->sas_portinfo->phy_info[i];
863 			if (phyinfo->attached.dev_handle == 0)
864 				continue;
865 			if ((phyinfo->attached.device_info &
866 			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
867 				continue;
868 			if (bootverbose)
869 				device_printf(mpt->dev,
870 				    "%sabling SATA WC on phy %d\n",
871 				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
872 			mptsas_set_sata_wc(mpt, &phyinfo->attached,
873 					   mpt_enable_sata_wc);
874 		}
875 	}
876 
877 	return (0);
878 }
879 
880 static int
881 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
882  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
883 {
884 
885 	if (req != NULL) {
886 		if (reply_frame != NULL) {
887 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
888 		}
889 		req->state &= ~REQ_STATE_QUEUED;
890 		req->state |= REQ_STATE_DONE;
891 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
892 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
893 			wakeup(req);
894 		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
895 			/*
896 			 * Whew- we can free this request (late completion)
897 			 */
898 			mpt_free_request(mpt, req);
899 		}
900 	}
901 
902 	return (TRUE);
903 }
904 
905 /*
906  * Read SCSI configuration information
907  */
908 static int
909 mpt_read_config_info_spi(struct mpt_softc *mpt)
910 {
911 	int rv, i;
912 
913 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
914 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
915 	if (rv) {
916 		return (-1);
917 	}
918 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
919 	    mpt->mpt_port_page0.Header.PageVersion,
920 	    mpt->mpt_port_page0.Header.PageLength,
921 	    mpt->mpt_port_page0.Header.PageNumber,
922 	    mpt->mpt_port_page0.Header.PageType);
923 
924 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
925 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
926 	if (rv) {
927 		return (-1);
928 	}
929 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
930 	    mpt->mpt_port_page1.Header.PageVersion,
931 	    mpt->mpt_port_page1.Header.PageLength,
932 	    mpt->mpt_port_page1.Header.PageNumber,
933 	    mpt->mpt_port_page1.Header.PageType);
934 
935 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
936 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
937 	if (rv) {
938 		return (-1);
939 	}
940 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
941 	    mpt->mpt_port_page2.Header.PageVersion,
942 	    mpt->mpt_port_page2.Header.PageLength,
943 	    mpt->mpt_port_page2.Header.PageNumber,
944 	    mpt->mpt_port_page2.Header.PageType);
945 
946 	for (i = 0; i < 16; i++) {
947 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
948 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
949 		if (rv) {
950 			return (-1);
951 		}
952 		mpt_lprt(mpt, MPT_PRT_DEBUG,
953 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
954 		    mpt->mpt_dev_page0[i].Header.PageVersion,
955 		    mpt->mpt_dev_page0[i].Header.PageLength,
956 		    mpt->mpt_dev_page0[i].Header.PageNumber,
957 		    mpt->mpt_dev_page0[i].Header.PageType);
958 
959 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
960 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
961 		if (rv) {
962 			return (-1);
963 		}
964 		mpt_lprt(mpt, MPT_PRT_DEBUG,
965 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
966 		    mpt->mpt_dev_page1[i].Header.PageVersion,
967 		    mpt->mpt_dev_page1[i].Header.PageLength,
968 		    mpt->mpt_dev_page1[i].Header.PageNumber,
969 		    mpt->mpt_dev_page1[i].Header.PageType);
970 	}
971 
972 	/*
973 	 * At this point, we don't *have* to fail. As long as we have
974 	 * valid config header information, we can (barely) lurch
975 	 * along.
976 	 */
977 
978 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
979 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
980 	if (rv) {
981 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
982 	} else {
983 		mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
984 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
985 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
986 		    mpt->mpt_port_page0.Capabilities,
987 		    mpt->mpt_port_page0.PhysicalInterface);
988 	}
989 
990 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
991 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
992 	if (rv) {
993 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
994 	} else {
995 		mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
996 		mpt_lprt(mpt, MPT_PRT_DEBUG,
997 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
998 		    mpt->mpt_port_page1.Configuration,
999 		    mpt->mpt_port_page1.OnBusTimerValue);
1000 	}
1001 
1002 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1003 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
1004 	if (rv) {
1005 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1006 	} else {
1007 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1008 		    "Port Page 2: Flags %x Settings %x\n",
1009 		    mpt->mpt_port_page2.PortFlags,
1010 		    mpt->mpt_port_page2.PortSettings);
1011 		mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1012 		for (i = 0; i < 16; i++) {
1013 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1014 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1015 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1016 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1017 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1018 		}
1019 	}
1020 
1021 	for (i = 0; i < 16; i++) {
1022 		rv = mpt_read_cur_cfg_page(mpt, i,
1023 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1024 		    FALSE, 5000);
1025 		if (rv) {
1026 			mpt_prt(mpt,
1027 			    "cannot read SPI Target %d Device Page 0\n", i);
1028 			continue;
1029 		}
1030 		mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1031 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1032 		    "target %d page 0: Negotiated Params %x Information %x\n",
1033 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1034 		    mpt->mpt_dev_page0[i].Information);
1035 
1036 		rv = mpt_read_cur_cfg_page(mpt, i,
1037 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1038 		    FALSE, 5000);
1039 		if (rv) {
1040 			mpt_prt(mpt,
1041 			    "cannot read SPI Target %d Device Page 1\n", i);
1042 			continue;
1043 		}
1044 		mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1045 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1046 		    "target %d page 1: Requested Params %x Configuration %x\n",
1047 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
1048 		    mpt->mpt_dev_page1[i].Configuration);
1049 	}
1050 	return (0);
1051 }
1052 
1053 /*
1054  * Validate SPI configuration information.
1055  *
1056  * In particular, validate SPI Port Page 1.
1057  */
1058 static int
1059 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1060 {
1061 	int error, i, pp1val;
1062 
1063 	mpt->mpt_disc_enable = 0xff;
1064 	mpt->mpt_tag_enable = 0;
1065 
1066 	pp1val = ((1 << mpt->mpt_ini_id) <<
1067 	    MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1068 	if (mpt->mpt_port_page1.Configuration != pp1val) {
1069 		CONFIG_PAGE_SCSI_PORT_1 tmp;
1070 
1071 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1072 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1073 		tmp = mpt->mpt_port_page1;
1074 		tmp.Configuration = pp1val;
1075 		host2mpt_config_page_scsi_port_1(&tmp);
1076 		error = mpt_write_cur_cfg_page(mpt, 0,
1077 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1078 		if (error) {
1079 			return (-1);
1080 		}
1081 		error = mpt_read_cur_cfg_page(mpt, 0,
1082 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1083 		if (error) {
1084 			return (-1);
1085 		}
1086 		mpt2host_config_page_scsi_port_1(&tmp);
1087 		if (tmp.Configuration != pp1val) {
1088 			mpt_prt(mpt,
1089 			    "failed to reset SPI Port Page 1 Config value\n");
1090 			return (-1);
1091 		}
1092 		mpt->mpt_port_page1 = tmp;
1093 	}
1094 
1095 	/*
1096 	 * The purpose of this exercise is to get
1097 	 * all targets back to async/narrow.
1098 	 *
1099 	 * We skip this step if the BIOS has already negotiated
1100 	 * speeds with the targets.
1101 	 */
1102 	i = mpt->mpt_port_page2.PortSettings &
1103 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1104 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1105 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1106 		    "honoring BIOS transfer negotiations\n");
1107 	} else {
1108 		for (i = 0; i < 16; i++) {
1109 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
1110 			mpt->mpt_dev_page1[i].Configuration = 0;
1111 			(void) mpt_update_spi_config(mpt, i);
1112 		}
1113 	}
1114 	return (0);
1115 }
1116 
1117 static int
1118 mpt_cam_enable(struct mpt_softc *mpt)
1119 {
1120 	int error;
1121 
1122 	MPT_LOCK(mpt);
1123 
1124 	error = EIO;
1125 	if (mpt->is_fc) {
1126 		if (mpt_read_config_info_fc(mpt)) {
1127 			goto out;
1128 		}
1129 		if (mpt_set_initial_config_fc(mpt)) {
1130 			goto out;
1131 		}
1132 	} else if (mpt->is_sas) {
1133 		if (mpt_read_config_info_sas(mpt)) {
1134 			goto out;
1135 		}
1136 		if (mpt_set_initial_config_sas(mpt)) {
1137 			goto out;
1138 		}
1139 	} else if (mpt->is_spi) {
1140 		if (mpt_read_config_info_spi(mpt)) {
1141 			goto out;
1142 		}
1143 		if (mpt_set_initial_config_spi(mpt)) {
1144 			goto out;
1145 		}
1146 	}
1147 	error = 0;
1148 
1149 out:
1150 	MPT_UNLOCK(mpt);
1151 	return (error);
1152 }
1153 
1154 static void
1155 mpt_cam_ready(struct mpt_softc *mpt)
1156 {
1157 
1158 	/*
1159 	 * If we're in target mode, hang out resources now
1160 	 * so we don't cause the world to hang talking to us.
1161 	 */
1162 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1163 		/*
1164 		 * Try to add some target command resources
1165 		 */
1166 		MPT_LOCK(mpt);
1167 		if (mpt_add_target_commands(mpt) == FALSE) {
1168 			mpt_prt(mpt, "failed to add target commands\n");
1169 		}
1170 		MPT_UNLOCK(mpt);
1171 	}
1172 	mpt->ready = 1;
1173 }
1174 
1175 static void
1176 mpt_cam_detach(struct mpt_softc *mpt)
1177 {
1178 	mpt_handler_t handler;
1179 
1180 	MPT_LOCK(mpt);
1181 	mpt->ready = 0;
1182 	mpt_terminate_recovery_thread(mpt);
1183 
1184 	handler.reply_handler = mpt_scsi_reply_handler;
1185 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1186 			       scsi_io_handler_id);
1187 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
1188 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1189 			       scsi_tmf_handler_id);
1190 	handler.reply_handler = mpt_fc_els_reply_handler;
1191 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1192 			       fc_els_handler_id);
1193 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
1194 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1195 			       mpt->scsi_tgt_handler_id);
1196 	handler.reply_handler = mpt_sata_pass_reply_handler;
1197 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1198 			       sata_pass_handler_id);
1199 
1200 	if (mpt->tmf_req != NULL) {
1201 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1202 		mpt_free_request(mpt, mpt->tmf_req);
1203 		mpt->tmf_req = NULL;
1204 	}
1205 	if (mpt->sas_portinfo != NULL) {
1206 		free(mpt->sas_portinfo, M_DEVBUF);
1207 		mpt->sas_portinfo = NULL;
1208 	}
1209 
1210 	if (mpt->sim != NULL) {
1211 		xpt_free_path(mpt->path);
1212 		xpt_bus_deregister(cam_sim_path(mpt->sim));
1213 		cam_sim_free(mpt->sim, TRUE);
1214 		mpt->sim = NULL;
1215 	}
1216 
1217 	if (mpt->phydisk_sim != NULL) {
1218 		xpt_free_path(mpt->phydisk_path);
1219 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1220 		cam_sim_free(mpt->phydisk_sim, TRUE);
1221 		mpt->phydisk_sim = NULL;
1222 	}
1223 	MPT_UNLOCK(mpt);
1224 }
1225 
1226 /* This routine is used after a system crash to dump core onto the swap device.
1227  */
1228 static void
1229 mpt_poll(struct cam_sim *sim)
1230 {
1231 	struct mpt_softc *mpt;
1232 
1233 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1234 	mpt_intr(mpt);
1235 }
1236 
1237 /*
1238  * Watchdog timeout routine for SCSI requests.
1239  */
1240 static void
1241 mpt_timeout(void *arg)
1242 {
1243 	union ccb	 *ccb;
1244 	struct mpt_softc *mpt;
1245 	request_t	 *req;
1246 
1247 	ccb = (union ccb *)arg;
1248 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1249 
1250 	MPT_LOCK_ASSERT(mpt);
1251 	req = ccb->ccb_h.ccb_req_ptr;
1252 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1253 	    req->serno, ccb, req->ccb);
1254 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1255 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1256 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1257 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1258 		req->state |= REQ_STATE_TIMEDOUT;
1259 		mpt_wakeup_recovery_thread(mpt);
1260 	}
1261 }
1262 
1263 /*
1264  * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
1265  * directly.
1266  *
1267  * Takes a list of physical segments and builds the SGL for SCSI IO command
1268  * and forwards the commard to the IOC after one last check that CAM has not
1269  * aborted the transaction.
1270  */
1271 static void
1272 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1273 {
1274 	request_t *req, *trq;
1275 	char *mpt_off;
1276 	union ccb *ccb;
1277 	struct mpt_softc *mpt;
1278 	bus_addr_t chain_list_addr;
1279 	int first_lim, seg, this_seg_lim;
1280 	uint32_t addr, cur_off, flags, nxt_off, tf;
1281 	void *sglp = NULL;
1282 	MSG_REQUEST_HEADER *hdrp;
1283 	SGE_SIMPLE64 *se;
1284 	SGE_CHAIN64 *ce;
1285 	int istgt = 0;
1286 
1287 	req = (request_t *)arg;
1288 	ccb = req->ccb;
1289 
1290 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1291 	req = ccb->ccb_h.ccb_req_ptr;
1292 
1293 	hdrp = req->req_vbuf;
1294 	mpt_off = req->req_vbuf;
1295 
1296 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1297 		error = EFBIG;
1298 	}
1299 
1300 	if (error == 0) {
1301 		switch (hdrp->Function) {
1302 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1303 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1304 			istgt = 0;
1305 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1306 			break;
1307 		case MPI_FUNCTION_TARGET_ASSIST:
1308 			istgt = 1;
1309 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1310 			break;
1311 		default:
1312 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1313 			    hdrp->Function);
1314 			error = EINVAL;
1315 			break;
1316 		}
1317 	}
1318 
1319 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1320 		error = EFBIG;
1321 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1322 		    nseg, mpt->max_seg_cnt);
1323 	}
1324 
1325 bad:
1326 	if (error != 0) {
1327 		if (error != EFBIG && error != ENOMEM) {
1328 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1329 		}
1330 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1331 			cam_status status;
1332 			mpt_freeze_ccb(ccb);
1333 			if (error == EFBIG) {
1334 				status = CAM_REQ_TOO_BIG;
1335 			} else if (error == ENOMEM) {
1336 				if (mpt->outofbeer == 0) {
1337 					mpt->outofbeer = 1;
1338 					xpt_freeze_simq(mpt->sim, 1);
1339 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1340 					    "FREEZEQ\n");
1341 				}
1342 				status = CAM_REQUEUE_REQ;
1343 			} else {
1344 				status = CAM_REQ_CMP_ERR;
1345 			}
1346 			mpt_set_ccb_status(ccb, status);
1347 		}
1348 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1349 			request_t *cmd_req =
1350 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1351 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1352 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1353 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1354 		}
1355 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1356 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1357 		xpt_done(ccb);
1358 		mpt_free_request(mpt, req);
1359 		return;
1360 	}
1361 
1362 	/*
1363 	 * No data to transfer?
1364 	 * Just make a single simple SGL with zero length.
1365 	 */
1366 
1367 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1368 		int tidx = ((char *)sglp) - mpt_off;
1369 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1370 	}
1371 
1372 	if (nseg == 0) {
1373 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1374 		MPI_pSGE_SET_FLAGS(se1,
1375 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1376 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1377 		se1->FlagsLength = htole32(se1->FlagsLength);
1378 		goto out;
1379 	}
1380 
1381 
1382 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1383 	if (istgt == 0) {
1384 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1385 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1386 		}
1387 	} else {
1388 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1389 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1390 		}
1391 	}
1392 
1393 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1394 		bus_dmasync_op_t op;
1395 		if (istgt == 0) {
1396 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1397 				op = BUS_DMASYNC_PREREAD;
1398 			} else {
1399 				op = BUS_DMASYNC_PREWRITE;
1400 			}
1401 		} else {
1402 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1403 				op = BUS_DMASYNC_PREWRITE;
1404 			} else {
1405 				op = BUS_DMASYNC_PREREAD;
1406 			}
1407 		}
1408 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1409 	}
1410 
1411 	/*
1412 	 * Okay, fill in what we can at the end of the command frame.
1413 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1414 	 * the command frame.
1415 	 *
1416 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1417 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1418 	 * that.
1419 	 */
1420 
1421 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1422 		first_lim = nseg;
1423 	} else {
1424 		/*
1425 		 * Leave room for CHAIN element
1426 		 */
1427 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1428 	}
1429 
1430 	se = (SGE_SIMPLE64 *) sglp;
1431 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1432 		tf = flags;
1433 		memset(se, 0, sizeof (*se));
1434 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1435 		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1436 		if (sizeof(bus_addr_t) > 4) {
1437 			addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1438 			/* SAS1078 36GB limitation WAR */
1439 			if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1440 			    MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1441 				addr |= (1U << 31);
1442 				tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1443 			}
1444 			se->Address.High = htole32(addr);
1445 		}
1446 		if (seg == first_lim - 1) {
1447 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1448 		}
1449 		if (seg == nseg - 1) {
1450 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1451 				MPI_SGE_FLAGS_END_OF_BUFFER;
1452 		}
1453 		MPI_pSGE_SET_FLAGS(se, tf);
1454 		se->FlagsLength = htole32(se->FlagsLength);
1455 	}
1456 
1457 	if (seg == nseg) {
1458 		goto out;
1459 	}
1460 
1461 	/*
1462 	 * Tell the IOC where to find the first chain element.
1463 	 */
1464 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1465 	nxt_off = MPT_RQSL(mpt);
1466 	trq = req;
1467 
1468 	/*
1469 	 * Make up the rest of the data segments out of a chain element
1470 	 * (contained in the current request frame) which points to
1471 	 * SIMPLE64 elements in the next request frame, possibly ending
1472 	 * with *another* chain element (if there's more).
1473 	 */
1474 	while (seg < nseg) {
1475 		/*
1476 		 * Point to the chain descriptor. Note that the chain
1477 		 * descriptor is at the end of the *previous* list (whether
1478 		 * chain or simple).
1479 		 */
1480 		ce = (SGE_CHAIN64 *) se;
1481 
1482 		/*
1483 		 * Before we change our current pointer, make  sure we won't
1484 		 * overflow the request area with this frame. Note that we
1485 		 * test against 'greater than' here as it's okay in this case
1486 		 * to have next offset be just outside the request area.
1487 		 */
1488 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1489 			nxt_off = MPT_REQUEST_AREA;
1490 			goto next_chain;
1491 		}
1492 
1493 		/*
1494 		 * Set our SGE element pointer to the beginning of the chain
1495 		 * list and update our next chain list offset.
1496 		 */
1497 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1498 		cur_off = nxt_off;
1499 		nxt_off += MPT_RQSL(mpt);
1500 
1501 		/*
1502 		 * Now initialize the chain descriptor.
1503 		 */
1504 		memset(ce, 0, sizeof (*ce));
1505 
1506 		/*
1507 		 * Get the physical address of the chain list.
1508 		 */
1509 		chain_list_addr = trq->req_pbuf;
1510 		chain_list_addr += cur_off;
1511 		if (sizeof (bus_addr_t) > 4) {
1512 			ce->Address.High =
1513 			    htole32(((uint64_t)chain_list_addr) >> 32);
1514 		}
1515 		ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1516 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1517 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1518 
1519 		/*
1520 		 * If we have more than a frame's worth of segments left,
1521 		 * set up the chain list to have the last element be another
1522 		 * chain descriptor.
1523 		 */
1524 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1525 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1526 			/*
1527 			 * The length of the chain is the length in bytes of the
1528 			 * number of segments plus the next chain element.
1529 			 *
1530 			 * The next chain descriptor offset is the length,
1531 			 * in words, of the number of segments.
1532 			 */
1533 			ce->Length = (this_seg_lim - seg) *
1534 			    sizeof (SGE_SIMPLE64);
1535 			ce->NextChainOffset = ce->Length >> 2;
1536 			ce->Length += sizeof (SGE_CHAIN64);
1537 		} else {
1538 			this_seg_lim = nseg;
1539 			ce->Length = (this_seg_lim - seg) *
1540 			    sizeof (SGE_SIMPLE64);
1541 		}
1542 		ce->Length = htole16(ce->Length);
1543 
1544 		/*
1545 		 * Fill in the chain list SGE elements with our segment data.
1546 		 *
1547 		 * If we're the last element in this chain list, set the last
1548 		 * element flag. If we're the completely last element period,
1549 		 * set the end of list and end of buffer flags.
1550 		 */
1551 		while (seg < this_seg_lim) {
1552 			tf = flags;
1553 			memset(se, 0, sizeof (*se));
1554 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1555 			se->Address.Low = htole32(dm_segs->ds_addr &
1556 			    0xffffffff);
1557 			if (sizeof (bus_addr_t) > 4) {
1558 				addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1559 				/* SAS1078 36GB limitation WAR */
1560 				if (mpt->is_1078 &&
1561 				    (((uint64_t)dm_segs->ds_addr +
1562 				    MPI_SGE_LENGTH(se->FlagsLength)) >>
1563 				    32) == 9) {
1564 					addr |= (1U << 31);
1565 					tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1566 				}
1567 				se->Address.High = htole32(addr);
1568 			}
1569 			if (seg == this_seg_lim - 1) {
1570 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1571 			}
1572 			if (seg == nseg - 1) {
1573 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1574 					MPI_SGE_FLAGS_END_OF_BUFFER;
1575 			}
1576 			MPI_pSGE_SET_FLAGS(se, tf);
1577 			se->FlagsLength = htole32(se->FlagsLength);
1578 			se++;
1579 			seg++;
1580 			dm_segs++;
1581 		}
1582 
1583     next_chain:
1584 		/*
1585 		 * If we have more segments to do and we've used up all of
1586 		 * the space in a request area, go allocate another one
1587 		 * and chain to that.
1588 		 */
1589 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1590 			request_t *nrq;
1591 
1592 			nrq = mpt_get_request(mpt, FALSE);
1593 
1594 			if (nrq == NULL) {
1595 				error = ENOMEM;
1596 				goto bad;
1597 			}
1598 
1599 			/*
1600 			 * Append the new request area on the tail of our list.
1601 			 */
1602 			if ((trq = req->chain) == NULL) {
1603 				req->chain = nrq;
1604 			} else {
1605 				while (trq->chain != NULL) {
1606 					trq = trq->chain;
1607 				}
1608 				trq->chain = nrq;
1609 			}
1610 			trq = nrq;
1611 			mpt_off = trq->req_vbuf;
1612 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1613 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1614 			}
1615 			nxt_off = 0;
1616 		}
1617 	}
1618 out:
1619 
1620 	/*
1621 	 * Last time we need to check if this CCB needs to be aborted.
1622 	 */
1623 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1624 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1625 			request_t *cmd_req =
1626 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1627 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1628 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1629 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1630 		}
1631 		mpt_prt(mpt,
1632 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1633 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1634 		if (nseg) {
1635 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1636 		}
1637 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1638 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1639 		xpt_done(ccb);
1640 		mpt_free_request(mpt, req);
1641 		return;
1642 	}
1643 
1644 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1645 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1646 		mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
1647 		    mpt_timeout, ccb);
1648 	}
1649 	if (mpt->verbose > MPT_PRT_DEBUG) {
1650 		int nc = 0;
1651 		mpt_print_request(req->req_vbuf);
1652 		for (trq = req->chain; trq; trq = trq->chain) {
1653 			printf("  Additional Chain Area %d\n", nc++);
1654 			mpt_dump_sgl(trq->req_vbuf, 0);
1655 		}
1656 	}
1657 
1658 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1659 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1660 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1661 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1662 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1663 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1664 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1665 		} else {
1666 			tgt->state = TGT_STATE_MOVING_DATA;
1667 		}
1668 #else
1669 		tgt->state = TGT_STATE_MOVING_DATA;
1670 #endif
1671 	}
1672 	mpt_send_cmd(mpt, req);
1673 }
1674 
1675 static void
1676 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1677 {
1678 	request_t *req, *trq;
1679 	char *mpt_off;
1680 	union ccb *ccb;
1681 	struct mpt_softc *mpt;
1682 	int seg, first_lim;
1683 	uint32_t flags, nxt_off;
1684 	void *sglp = NULL;
1685 	MSG_REQUEST_HEADER *hdrp;
1686 	SGE_SIMPLE32 *se;
1687 	SGE_CHAIN32 *ce;
1688 	int istgt = 0;
1689 
1690 	req = (request_t *)arg;
1691 	ccb = req->ccb;
1692 
1693 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1694 	req = ccb->ccb_h.ccb_req_ptr;
1695 
1696 	hdrp = req->req_vbuf;
1697 	mpt_off = req->req_vbuf;
1698 
1699 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1700 		error = EFBIG;
1701 	}
1702 
1703 	if (error == 0) {
1704 		switch (hdrp->Function) {
1705 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1706 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1707 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1708 			break;
1709 		case MPI_FUNCTION_TARGET_ASSIST:
1710 			istgt = 1;
1711 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1712 			break;
1713 		default:
1714 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1715 			    hdrp->Function);
1716 			error = EINVAL;
1717 			break;
1718 		}
1719 	}
1720 
1721 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1722 		error = EFBIG;
1723 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1724 		    nseg, mpt->max_seg_cnt);
1725 	}
1726 
1727 bad:
1728 	if (error != 0) {
1729 		if (error != EFBIG && error != ENOMEM) {
1730 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1731 		}
1732 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1733 			cam_status status;
1734 			mpt_freeze_ccb(ccb);
1735 			if (error == EFBIG) {
1736 				status = CAM_REQ_TOO_BIG;
1737 			} else if (error == ENOMEM) {
1738 				if (mpt->outofbeer == 0) {
1739 					mpt->outofbeer = 1;
1740 					xpt_freeze_simq(mpt->sim, 1);
1741 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1742 					    "FREEZEQ\n");
1743 				}
1744 				status = CAM_REQUEUE_REQ;
1745 			} else {
1746 				status = CAM_REQ_CMP_ERR;
1747 			}
1748 			mpt_set_ccb_status(ccb, status);
1749 		}
1750 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1751 			request_t *cmd_req =
1752 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1753 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1754 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1755 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1756 		}
1757 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1758 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1759 		xpt_done(ccb);
1760 		mpt_free_request(mpt, req);
1761 		return;
1762 	}
1763 
1764 	/*
1765 	 * No data to transfer?
1766 	 * Just make a single simple SGL with zero length.
1767 	 */
1768 
1769 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1770 		int tidx = ((char *)sglp) - mpt_off;
1771 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1772 	}
1773 
1774 	if (nseg == 0) {
1775 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1776 		MPI_pSGE_SET_FLAGS(se1,
1777 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1778 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1779 		se1->FlagsLength = htole32(se1->FlagsLength);
1780 		goto out;
1781 	}
1782 
1783 
1784 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1785 	if (istgt == 0) {
1786 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1787 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1788 		}
1789 	} else {
1790 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1791 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1792 		}
1793 	}
1794 
1795 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1796 		bus_dmasync_op_t op;
1797 		if (istgt) {
1798 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1799 				op = BUS_DMASYNC_PREREAD;
1800 			} else {
1801 				op = BUS_DMASYNC_PREWRITE;
1802 			}
1803 		} else {
1804 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1805 				op = BUS_DMASYNC_PREWRITE;
1806 			} else {
1807 				op = BUS_DMASYNC_PREREAD;
1808 			}
1809 		}
1810 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1811 	}
1812 
1813 	/*
1814 	 * Okay, fill in what we can at the end of the command frame.
1815 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1816 	 * the command frame.
1817 	 *
1818 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1819 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1820 	 * that.
1821 	 */
1822 
1823 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1824 		first_lim = nseg;
1825 	} else {
1826 		/*
1827 		 * Leave room for CHAIN element
1828 		 */
1829 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1830 	}
1831 
1832 	se = (SGE_SIMPLE32 *) sglp;
1833 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1834 		uint32_t tf;
1835 
1836 		memset(se, 0,sizeof (*se));
1837 		se->Address = htole32(dm_segs->ds_addr);
1838 
1839 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1840 		tf = flags;
1841 		if (seg == first_lim - 1) {
1842 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1843 		}
1844 		if (seg == nseg - 1) {
1845 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1846 				MPI_SGE_FLAGS_END_OF_BUFFER;
1847 		}
1848 		MPI_pSGE_SET_FLAGS(se, tf);
1849 		se->FlagsLength = htole32(se->FlagsLength);
1850 	}
1851 
1852 	if (seg == nseg) {
1853 		goto out;
1854 	}
1855 
1856 	/*
1857 	 * Tell the IOC where to find the first chain element.
1858 	 */
1859 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1860 	nxt_off = MPT_RQSL(mpt);
1861 	trq = req;
1862 
1863 	/*
1864 	 * Make up the rest of the data segments out of a chain element
1865 	 * (contained in the current request frame) which points to
1866 	 * SIMPLE32 elements in the next request frame, possibly ending
1867 	 * with *another* chain element (if there's more).
1868 	 */
1869 	while (seg < nseg) {
1870 		int this_seg_lim;
1871 		uint32_t tf, cur_off;
1872 		bus_addr_t chain_list_addr;
1873 
1874 		/*
1875 		 * Point to the chain descriptor. Note that the chain
1876 		 * descriptor is at the end of the *previous* list (whether
1877 		 * chain or simple).
1878 		 */
1879 		ce = (SGE_CHAIN32 *) se;
1880 
1881 		/*
1882 		 * Before we change our current pointer, make  sure we won't
1883 		 * overflow the request area with this frame. Note that we
1884 		 * test against 'greater than' here as it's okay in this case
1885 		 * to have next offset be just outside the request area.
1886 		 */
1887 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1888 			nxt_off = MPT_REQUEST_AREA;
1889 			goto next_chain;
1890 		}
1891 
1892 		/*
1893 		 * Set our SGE element pointer to the beginning of the chain
1894 		 * list and update our next chain list offset.
1895 		 */
1896 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1897 		cur_off = nxt_off;
1898 		nxt_off += MPT_RQSL(mpt);
1899 
1900 		/*
1901 		 * Now initialize the chain descriptor.
1902 		 */
1903 		memset(ce, 0, sizeof (*ce));
1904 
1905 		/*
1906 		 * Get the physical address of the chain list.
1907 		 */
1908 		chain_list_addr = trq->req_pbuf;
1909 		chain_list_addr += cur_off;
1910 
1911 
1912 
1913 		ce->Address = htole32(chain_list_addr);
1914 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1915 
1916 
1917 		/*
1918 		 * If we have more than a frame's worth of segments left,
1919 		 * set up the chain list to have the last element be another
1920 		 * chain descriptor.
1921 		 */
1922 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1923 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1924 			/*
1925 			 * The length of the chain is the length in bytes of the
1926 			 * number of segments plus the next chain element.
1927 			 *
1928 			 * The next chain descriptor offset is the length,
1929 			 * in words, of the number of segments.
1930 			 */
1931 			ce->Length = (this_seg_lim - seg) *
1932 			    sizeof (SGE_SIMPLE32);
1933 			ce->NextChainOffset = ce->Length >> 2;
1934 			ce->Length += sizeof (SGE_CHAIN32);
1935 		} else {
1936 			this_seg_lim = nseg;
1937 			ce->Length = (this_seg_lim - seg) *
1938 			    sizeof (SGE_SIMPLE32);
1939 		}
1940 		ce->Length = htole16(ce->Length);
1941 
1942 		/*
1943 		 * Fill in the chain list SGE elements with our segment data.
1944 		 *
1945 		 * If we're the last element in this chain list, set the last
1946 		 * element flag. If we're the completely last element period,
1947 		 * set the end of list and end of buffer flags.
1948 		 */
1949 		while (seg < this_seg_lim) {
1950 			memset(se, 0, sizeof (*se));
1951 			se->Address = htole32(dm_segs->ds_addr);
1952 
1953 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1954 			tf = flags;
1955 			if (seg == this_seg_lim - 1) {
1956 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1957 			}
1958 			if (seg == nseg - 1) {
1959 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1960 					MPI_SGE_FLAGS_END_OF_BUFFER;
1961 			}
1962 			MPI_pSGE_SET_FLAGS(se, tf);
1963 			se->FlagsLength = htole32(se->FlagsLength);
1964 			se++;
1965 			seg++;
1966 			dm_segs++;
1967 		}
1968 
1969     next_chain:
1970 		/*
1971 		 * If we have more segments to do and we've used up all of
1972 		 * the space in a request area, go allocate another one
1973 		 * and chain to that.
1974 		 */
1975 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1976 			request_t *nrq;
1977 
1978 			nrq = mpt_get_request(mpt, FALSE);
1979 
1980 			if (nrq == NULL) {
1981 				error = ENOMEM;
1982 				goto bad;
1983 			}
1984 
1985 			/*
1986 			 * Append the new request area on the tail of our list.
1987 			 */
1988 			if ((trq = req->chain) == NULL) {
1989 				req->chain = nrq;
1990 			} else {
1991 				while (trq->chain != NULL) {
1992 					trq = trq->chain;
1993 				}
1994 				trq->chain = nrq;
1995 			}
1996 			trq = nrq;
1997 			mpt_off = trq->req_vbuf;
1998 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1999 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2000 			}
2001 			nxt_off = 0;
2002 		}
2003 	}
2004 out:
2005 
2006 	/*
2007 	 * Last time we need to check if this CCB needs to be aborted.
2008 	 */
2009 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2010 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2011 			request_t *cmd_req =
2012 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2013 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2014 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2015 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2016 		}
2017 		mpt_prt(mpt,
2018 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2019 		    ccb->ccb_h.status & CAM_STATUS_MASK);
2020 		if (nseg) {
2021 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2022 		}
2023 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2024 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2025 		xpt_done(ccb);
2026 		mpt_free_request(mpt, req);
2027 		return;
2028 	}
2029 
2030 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2031 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2032 		mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
2033 		    mpt_timeout, ccb);
2034 	}
2035 	if (mpt->verbose > MPT_PRT_DEBUG) {
2036 		int nc = 0;
2037 		mpt_print_request(req->req_vbuf);
2038 		for (trq = req->chain; trq; trq = trq->chain) {
2039 			printf("  Additional Chain Area %d\n", nc++);
2040 			mpt_dump_sgl(trq->req_vbuf, 0);
2041 		}
2042 	}
2043 
2044 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2045 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2046 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2047 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
2048 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2049 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2050 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2051 		} else {
2052 			tgt->state = TGT_STATE_MOVING_DATA;
2053 		}
2054 #else
2055 		tgt->state = TGT_STATE_MOVING_DATA;
2056 #endif
2057 	}
2058 	mpt_send_cmd(mpt, req);
2059 }
2060 
2061 static void
2062 mpt_start(struct cam_sim *sim, union ccb *ccb)
2063 {
2064 	request_t *req;
2065 	struct mpt_softc *mpt;
2066 	MSG_SCSI_IO_REQUEST *mpt_req;
2067 	struct ccb_scsiio *csio = &ccb->csio;
2068 	struct ccb_hdr *ccbh = &ccb->ccb_h;
2069 	bus_dmamap_callback_t *cb;
2070 	target_id_t tgt;
2071 	int raid_passthru;
2072 	int error;
2073 
2074 	/* Get the pointer for the physical addapter */
2075 	mpt = ccb->ccb_h.ccb_mpt_ptr;
2076 	raid_passthru = (sim == mpt->phydisk_sim);
2077 
2078 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2079 		if (mpt->outofbeer == 0) {
2080 			mpt->outofbeer = 1;
2081 			xpt_freeze_simq(mpt->sim, 1);
2082 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2083 		}
2084 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2085 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2086 		xpt_done(ccb);
2087 		return;
2088 	}
2089 #ifdef	INVARIANTS
2090 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2091 #endif
2092 
2093 	if (sizeof (bus_addr_t) > 4) {
2094 		cb = mpt_execute_req_a64;
2095 	} else {
2096 		cb = mpt_execute_req;
2097 	}
2098 
2099 	/*
2100 	 * Link the ccb and the request structure so we can find
2101 	 * the other knowing either the request or the ccb
2102 	 */
2103 	req->ccb = ccb;
2104 	ccb->ccb_h.ccb_req_ptr = req;
2105 
2106 	/* Now we build the command for the IOC */
2107 	mpt_req = req->req_vbuf;
2108 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2109 
2110 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2111 	if (raid_passthru) {
2112 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2113 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2114 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2115 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2116 			xpt_done(ccb);
2117 			return;
2118 		}
2119 		mpt_req->Bus = 0;	/* we never set bus here */
2120 	} else {
2121 		tgt = ccb->ccb_h.target_id;
2122 		mpt_req->Bus = 0;	/* XXX */
2123 
2124 	}
2125 	mpt_req->SenseBufferLength =
2126 		(csio->sense_len < MPT_SENSE_SIZE) ?
2127 		 csio->sense_len : MPT_SENSE_SIZE;
2128 
2129 	/*
2130 	 * We use the message context to find the request structure when we
2131 	 * Get the command completion interrupt from the IOC.
2132 	 */
2133 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2134 
2135 	/* Which physical device to do the I/O on */
2136 	mpt_req->TargetID = tgt;
2137 
2138 	be64enc(mpt_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
2139 
2140 	/* Set the direction of the transfer */
2141 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2142 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2143 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2144 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2145 	} else {
2146 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2147 	}
2148 
2149 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2150 		switch(ccb->csio.tag_action) {
2151 		case MSG_HEAD_OF_Q_TAG:
2152 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2153 			break;
2154 		case MSG_ACA_TASK:
2155 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2156 			break;
2157 		case MSG_ORDERED_Q_TAG:
2158 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2159 			break;
2160 		case MSG_SIMPLE_Q_TAG:
2161 		default:
2162 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2163 			break;
2164 		}
2165 	} else {
2166 		if (mpt->is_fc || mpt->is_sas) {
2167 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2168 		} else {
2169 			/* XXX No such thing for a target doing packetized. */
2170 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2171 		}
2172 	}
2173 
2174 	if (mpt->is_spi) {
2175 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2176 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2177 		}
2178 	}
2179 	mpt_req->Control = htole32(mpt_req->Control);
2180 
2181 	/* Copy the scsi command block into place */
2182 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2183 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2184 	} else {
2185 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2186 	}
2187 
2188 	mpt_req->CDBLength = csio->cdb_len;
2189 	mpt_req->DataLength = htole32(csio->dxfer_len);
2190 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2191 
2192 	/*
2193 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2194 	 */
2195 	if (mpt->verbose == MPT_PRT_DEBUG) {
2196 		U32 df;
2197 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2198 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2199 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2200 		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2201 		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2202 			mpt_prtc(mpt, "(%s %u byte%s ",
2203 			    (df == MPI_SCSIIO_CONTROL_READ)?
2204 			    "read" : "write",  csio->dxfer_len,
2205 			    (csio->dxfer_len == 1)? ")" : "s)");
2206 		}
2207 		mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt,
2208 		    (uintmax_t)ccb->ccb_h.target_lun, req, req->serno);
2209 	}
2210 
2211 	error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
2212 	    req, 0);
2213 	if (error == EINPROGRESS) {
2214 		/*
2215 		 * So as to maintain ordering, freeze the controller queue
2216 		 * until our mapping is returned.
2217 		 */
2218 		xpt_freeze_simq(mpt->sim, 1);
2219 		ccbh->status |= CAM_RELEASE_SIMQ;
2220 	}
2221 }
2222 
2223 static int
2224 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2225     int sleep_ok)
2226 {
2227 	int   error;
2228 	uint16_t status;
2229 	uint8_t response;
2230 
2231 	error = mpt_scsi_send_tmf(mpt,
2232 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2233 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2234 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2235 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2236 	    0,	/* XXX How do I get the channel ID? */
2237 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2238 	    lun != CAM_LUN_WILDCARD ? lun : 0,
2239 	    0, sleep_ok);
2240 
2241 	if (error != 0) {
2242 		/*
2243 		 * mpt_scsi_send_tmf hard resets on failure, so no
2244 		 * need to do so here.
2245 		 */
2246 		mpt_prt(mpt,
2247 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2248 		return (EIO);
2249 	}
2250 
2251 	/* Wait for bus reset to be processed by the IOC. */
2252 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2253 	    REQ_STATE_DONE, sleep_ok, 5000);
2254 
2255 	status = le16toh(mpt->tmf_req->IOCStatus);
2256 	response = mpt->tmf_req->ResponseCode;
2257 	mpt->tmf_req->state = REQ_STATE_FREE;
2258 
2259 	if (error) {
2260 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2261 		    "Resetting controller.\n");
2262 		mpt_reset(mpt, TRUE);
2263 		return (ETIMEDOUT);
2264 	}
2265 
2266 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2267 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2268 		    "Resetting controller.\n", status);
2269 		mpt_reset(mpt, TRUE);
2270 		return (EIO);
2271 	}
2272 
2273 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2274 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2275 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2276 		    "Resetting controller.\n", response);
2277 		mpt_reset(mpt, TRUE);
2278 		return (EIO);
2279 	}
2280 	return (0);
2281 }
2282 
2283 static int
2284 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2285 {
2286 	int r = 0;
2287 	request_t *req;
2288 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2289 
2290  	req = mpt_get_request(mpt, FALSE);
2291 	if (req == NULL) {
2292 		return (ENOMEM);
2293 	}
2294 	fc = req->req_vbuf;
2295 	memset(fc, 0, sizeof(*fc));
2296 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2297 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2298 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2299 	mpt_send_cmd(mpt, req);
2300 	if (dowait) {
2301 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2302 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2303 		if (r == 0) {
2304 			mpt_free_request(mpt, req);
2305 		}
2306 	}
2307 	return (r);
2308 }
2309 
2310 static int
2311 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2312 	      MSG_EVENT_NOTIFY_REPLY *msg)
2313 {
2314 	uint32_t data0, data1;
2315 
2316 	data0 = le32toh(msg->Data[0]);
2317 	data1 = le32toh(msg->Data[1]);
2318 	switch(msg->Event & 0xFF) {
2319 	case MPI_EVENT_UNIT_ATTENTION:
2320 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2321 		    (data0 >> 8) & 0xff, data0 & 0xff);
2322 		break;
2323 
2324 	case MPI_EVENT_IOC_BUS_RESET:
2325 		/* We generated a bus reset */
2326 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2327 		    (data0 >> 8) & 0xff);
2328 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2329 		break;
2330 
2331 	case MPI_EVENT_EXT_BUS_RESET:
2332 		/* Someone else generated a bus reset */
2333 		mpt_prt(mpt, "External Bus Reset Detected\n");
2334 		/*
2335 		 * These replies don't return EventData like the MPI
2336 		 * spec says they do
2337 		 */
2338 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2339 		break;
2340 
2341 	case MPI_EVENT_RESCAN:
2342 	{
2343 		union ccb *ccb;
2344 		uint32_t pathid;
2345 		/*
2346 		 * In general this means a device has been added to the loop.
2347 		 */
2348 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2349 		if (mpt->ready == 0) {
2350 			break;
2351 		}
2352 		if (mpt->phydisk_sim) {
2353 			pathid = cam_sim_path(mpt->phydisk_sim);
2354 		} else {
2355 			pathid = cam_sim_path(mpt->sim);
2356 		}
2357 		/*
2358 		 * Allocate a CCB, create a wildcard path for this bus,
2359 		 * and schedule a rescan.
2360 		 */
2361 		ccb = xpt_alloc_ccb_nowait();
2362 		if (ccb == NULL) {
2363 			mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2364 			break;
2365 		}
2366 
2367 		if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
2368 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2369 			mpt_prt(mpt, "unable to create path for rescan\n");
2370 			xpt_free_ccb(ccb);
2371 			break;
2372 		}
2373 		xpt_rescan(ccb);
2374 		break;
2375 	}
2376 
2377 	case MPI_EVENT_LINK_STATUS_CHANGE:
2378 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2379 		    (data1 >> 8) & 0xff,
2380 		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2381 		break;
2382 
2383 	case MPI_EVENT_LOOP_STATE_CHANGE:
2384 		switch ((data0 >> 16) & 0xff) {
2385 		case 0x01:
2386 			mpt_prt(mpt,
2387 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2388 			    "(Loop Initialization)\n",
2389 			    (data1 >> 8) & 0xff,
2390 			    (data0 >> 8) & 0xff,
2391 			    (data0     ) & 0xff);
2392 			switch ((data0 >> 8) & 0xff) {
2393 			case 0xF7:
2394 				if ((data0 & 0xff) == 0xF7) {
2395 					mpt_prt(mpt, "Device needs AL_PA\n");
2396 				} else {
2397 					mpt_prt(mpt, "Device %02x doesn't like "
2398 					    "FC performance\n",
2399 					    data0 & 0xFF);
2400 				}
2401 				break;
2402 			case 0xF8:
2403 				if ((data0 & 0xff) == 0xF7) {
2404 					mpt_prt(mpt, "Device had loop failure "
2405 					    "at its receiver prior to acquiring"
2406 					    " AL_PA\n");
2407 				} else {
2408 					mpt_prt(mpt, "Device %02x detected loop"
2409 					    " failure at its receiver\n",
2410 					    data0 & 0xFF);
2411 				}
2412 				break;
2413 			default:
2414 				mpt_prt(mpt, "Device %02x requests that device "
2415 				    "%02x reset itself\n",
2416 				    data0 & 0xFF,
2417 				    (data0 >> 8) & 0xFF);
2418 				break;
2419 			}
2420 			break;
2421 		case 0x02:
2422 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2423 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2424 			    (data1 >> 8) & 0xff, /* Port */
2425 			    (data0 >>  8) & 0xff, /* Character 3 */
2426 			    (data0      ) & 0xff  /* Character 4 */);
2427 			break;
2428 		case 0x03:
2429 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2430 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2431 			    (data1 >> 8) & 0xff, /* Port */
2432 			    (data0 >> 8) & 0xff, /* Character 3 */
2433 			    (data0     ) & 0xff  /* Character 4 */);
2434 			break;
2435 		default:
2436 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2437 			    "FC event (%02x %02x %02x)\n",
2438 			    (data1 >> 8) & 0xff, /* Port */
2439 			    (data0 >> 16) & 0xff, /* Event */
2440 			    (data0 >>  8) & 0xff, /* Character 3 */
2441 			    (data0      ) & 0xff  /* Character 4 */);
2442 		}
2443 		break;
2444 
2445 	case MPI_EVENT_LOGOUT:
2446 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2447 		    (data1 >> 8) & 0xff, data0);
2448 		break;
2449 	case MPI_EVENT_QUEUE_FULL:
2450 	{
2451 		struct cam_sim *sim;
2452 		struct cam_path *tmppath;
2453 		struct ccb_relsim crs;
2454 		PTR_EVENT_DATA_QUEUE_FULL pqf;
2455 		lun_id_t lun_id;
2456 
2457 		pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2458 		pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2459 		if (bootverbose) {
2460 		    mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x "
2461 			"Depth %d\n",
2462 			pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2463 		}
2464 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2465 		    pqf->TargetID) != 0) {
2466 			sim = mpt->phydisk_sim;
2467 		} else {
2468 			sim = mpt->sim;
2469 		}
2470 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2471 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2472 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2473 				mpt_prt(mpt, "unable to create a path to send "
2474 				    "XPT_REL_SIMQ");
2475 				break;
2476 			}
2477 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2478 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2479 			crs.ccb_h.flags = CAM_DEV_QFREEZE;
2480 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2481 			crs.openings = pqf->CurrentDepth - 1;
2482 			xpt_action((union ccb *)&crs);
2483 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2484 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2485 			}
2486 			xpt_free_path(tmppath);
2487 		}
2488 		break;
2489 	}
2490 	case MPI_EVENT_IR_RESYNC_UPDATE:
2491 		mpt_prt(mpt, "IR resync update %d completed\n",
2492 		    (data0 >> 16) & 0xff);
2493 		break;
2494 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2495 	{
2496 		union ccb *ccb;
2497 		struct cam_sim *sim;
2498 		struct cam_path *tmppath;
2499 		PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2500 
2501 		psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2502 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2503 		    psdsc->TargetID) != 0)
2504 			sim = mpt->phydisk_sim;
2505 		else
2506 			sim = mpt->sim;
2507 		switch(psdsc->ReasonCode) {
2508 		case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2509 			ccb = xpt_alloc_ccb_nowait();
2510 			if (ccb == NULL) {
2511 				mpt_prt(mpt,
2512 				    "unable to alloc CCB for rescan\n");
2513 				break;
2514 			}
2515 			if (xpt_create_path(&ccb->ccb_h.path, NULL,
2516 			    cam_sim_path(sim), psdsc->TargetID,
2517 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2518 				mpt_prt(mpt,
2519 				    "unable to create path for rescan\n");
2520 				xpt_free_ccb(ccb);
2521 				break;
2522 			}
2523 			xpt_rescan(ccb);
2524 			break;
2525 		case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2526 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2527 			    psdsc->TargetID, CAM_LUN_WILDCARD) !=
2528 			    CAM_REQ_CMP) {
2529 				mpt_prt(mpt,
2530 				    "unable to create path for async event");
2531 				break;
2532 			}
2533 			xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2534 			xpt_free_path(tmppath);
2535 			break;
2536 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2537 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2538 		case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2539 			break;
2540 		default:
2541 			mpt_lprt(mpt, MPT_PRT_WARN,
2542 			    "SAS device status change: Bus: 0x%02x TargetID: "
2543 			    "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2544 			    psdsc->TargetID, psdsc->ReasonCode);
2545 			break;
2546 		}
2547 		break;
2548 	}
2549 	case MPI_EVENT_SAS_DISCOVERY_ERROR:
2550 	{
2551 		PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2552 
2553 		pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2554 		pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2555 		mpt_lprt(mpt, MPT_PRT_WARN,
2556 		    "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2557 		    pde->Port, pde->DiscoveryStatus);
2558 		break;
2559 	}
2560 	case MPI_EVENT_EVENT_CHANGE:
2561 	case MPI_EVENT_INTEGRATED_RAID:
2562 	case MPI_EVENT_IR2:
2563 	case MPI_EVENT_LOG_ENTRY_ADDED:
2564 	case MPI_EVENT_SAS_DISCOVERY:
2565 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2566 	case MPI_EVENT_SAS_SES:
2567 		break;
2568 	default:
2569 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2570 		    msg->Event & 0xFF);
2571 		return (0);
2572 	}
2573 	return (1);
2574 }
2575 
2576 /*
2577  * Reply path for all SCSI I/O requests, called from our
2578  * interrupt handler by extracting our handler index from
2579  * the MsgContext field of the reply from the IOC.
2580  *
2581  * This routine is optimized for the common case of a
2582  * completion without error.  All exception handling is
2583  * offloaded to non-inlined helper routines to minimize
2584  * cache footprint.
2585  */
2586 static int
2587 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2588     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2589 {
2590 	MSG_SCSI_IO_REQUEST *scsi_req;
2591 	union ccb *ccb;
2592 
2593 	if (req->state == REQ_STATE_FREE) {
2594 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2595 		return (TRUE);
2596 	}
2597 
2598 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2599 	ccb = req->ccb;
2600 	if (ccb == NULL) {
2601 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2602 		    req, req->serno);
2603 		return (TRUE);
2604 	}
2605 
2606 	mpt_req_untimeout(req, mpt_timeout, ccb);
2607 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2608 
2609 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2610 		bus_dmasync_op_t op;
2611 
2612 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2613 			op = BUS_DMASYNC_POSTREAD;
2614 		else
2615 			op = BUS_DMASYNC_POSTWRITE;
2616 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2617 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2618 	}
2619 
2620 	if (reply_frame == NULL) {
2621 		/*
2622 		 * Context only reply, completion without error status.
2623 		 */
2624 		ccb->csio.resid = 0;
2625 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2626 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2627 	} else {
2628 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2629 	}
2630 
2631 	if (mpt->outofbeer) {
2632 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2633 		mpt->outofbeer = 0;
2634 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2635 	}
2636 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2637 		struct scsi_inquiry_data *iq =
2638 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2639 		if (scsi_req->Function ==
2640 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2641 			/*
2642 			 * Fake out the device type so that only the
2643 			 * pass-thru device will attach.
2644 			 */
2645 			iq->device &= ~0x1F;
2646 			iq->device |= T_NODEVICE;
2647 		}
2648 	}
2649 	if (mpt->verbose == MPT_PRT_DEBUG) {
2650 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2651 		    req, req->serno);
2652 	}
2653 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2654 	xpt_done(ccb);
2655 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2656 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2657 	} else {
2658 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2659 		    req, req->serno);
2660 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2661 	}
2662 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2663 	    ("CCB req needed wakeup"));
2664 #ifdef	INVARIANTS
2665 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2666 #endif
2667 	mpt_free_request(mpt, req);
2668 	return (TRUE);
2669 }
2670 
2671 static int
2672 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2673     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2674 {
2675 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2676 
2677 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2678 #ifdef	INVARIANTS
2679 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2680 #endif
2681 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2682 	/* Record IOC Status and Response Code of TMF for any waiters. */
2683 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2684 	req->ResponseCode = tmf_reply->ResponseCode;
2685 
2686 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2687 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2688 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2689 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2690 		req->state |= REQ_STATE_DONE;
2691 		wakeup(req);
2692 	} else {
2693 		mpt->tmf_req->state = REQ_STATE_FREE;
2694 	}
2695 	return (TRUE);
2696 }
2697 
2698 /*
2699  * XXX: Move to definitions file
2700  */
2701 #define	ELS	0x22
2702 #define	FC4LS	0x32
2703 #define	ABTS	0x81
2704 #define	BA_ACC	0x84
2705 
2706 #define	LS_RJT	0x01
2707 #define	LS_ACC	0x02
2708 #define	PLOGI	0x03
2709 #define	LOGO	0x05
2710 #define SRR	0x14
2711 #define PRLI	0x20
2712 #define PRLO	0x21
2713 #define ADISC	0x52
2714 #define RSCN	0x61
2715 
2716 static void
2717 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2718     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2719 {
2720 	uint32_t fl;
2721 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2722 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2723 
2724 	/*
2725 	 * We are going to reuse the ELS request to send this response back.
2726 	 */
2727 	rsp = &tmp;
2728 	memset(rsp, 0, sizeof(*rsp));
2729 
2730 #ifdef	USE_IMMEDIATE_LINK_DATA
2731 	/*
2732 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2733 	 */
2734 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2735 #endif
2736 	rsp->RspLength = length;
2737 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2738 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2739 
2740 	/*
2741 	 * Copy over information from the original reply frame to
2742 	 * it's correct place in the response.
2743 	 */
2744 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2745 
2746 	/*
2747 	 * And now copy back the temporary area to the original frame.
2748 	 */
2749 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2750 	rsp = req->req_vbuf;
2751 
2752 #ifdef	USE_IMMEDIATE_LINK_DATA
2753 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2754 #else
2755 {
2756 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2757 	bus_addr_t paddr = req->req_pbuf;
2758 	paddr += MPT_RQSL(mpt);
2759 
2760 	fl =
2761 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2762 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2763 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2764 		MPI_SGE_FLAGS_END_OF_LIST	|
2765 		MPI_SGE_FLAGS_END_OF_BUFFER;
2766 	fl <<= MPI_SGE_FLAGS_SHIFT;
2767 	fl |= (length);
2768 	se->FlagsLength = htole32(fl);
2769 	se->Address = htole32((uint32_t) paddr);
2770 }
2771 #endif
2772 
2773 	/*
2774 	 * Send it on...
2775 	 */
2776 	mpt_send_cmd(mpt, req);
2777 }
2778 
2779 static int
2780 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2781     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2782 {
2783 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2784 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2785 	U8 rctl;
2786 	U8 type;
2787 	U8 cmd;
2788 	U16 status = le16toh(reply_frame->IOCStatus);
2789 	U32 *elsbuf;
2790 	int ioindex;
2791 	int do_refresh = TRUE;
2792 
2793 #ifdef	INVARIANTS
2794 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2795 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2796 	    req, req->serno, rp->Function));
2797 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2798 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2799 	} else {
2800 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2801 	}
2802 #endif
2803 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2804 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2805 	    req, req->serno, reply_frame, reply_frame->Function);
2806 
2807 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2808 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2809 		    status, reply_frame->Function);
2810 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2811 			/*
2812 			 * XXX: to get around shutdown issue
2813 			 */
2814 			mpt->disabled = 1;
2815 			return (TRUE);
2816 		}
2817 		return (TRUE);
2818 	}
2819 
2820 	/*
2821 	 * If the function of a link service response, we recycle the
2822 	 * response to be a refresh for a new link service request.
2823 	 *
2824 	 * The request pointer is bogus in this case and we have to fetch
2825 	 * it based upon the TransactionContext.
2826 	 */
2827 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2828 		/* Freddie Uncle Charlie Katie */
2829 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2830 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2831 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2832 				break;
2833 			}
2834 
2835 		KASSERT(ioindex < mpt->els_cmds_allocated,
2836 		    ("can't find my mommie!"));
2837 
2838 		/* remove from active list as we're going to re-post it */
2839 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2840 		req->state &= ~REQ_STATE_QUEUED;
2841 		req->state |= REQ_STATE_DONE;
2842 		mpt_fc_post_els(mpt, req, ioindex);
2843 		return (TRUE);
2844 	}
2845 
2846 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2847 		/* remove from active list as we're done */
2848 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2849 		req->state &= ~REQ_STATE_QUEUED;
2850 		req->state |= REQ_STATE_DONE;
2851 		if (req->state & REQ_STATE_TIMEDOUT) {
2852 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2853 			    "Sync Primitive Send Completed After Timeout\n");
2854 			mpt_free_request(mpt, req);
2855 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2856 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2857 			    "Async Primitive Send Complete\n");
2858 			mpt_free_request(mpt, req);
2859 		} else {
2860 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2861 			    "Sync Primitive Send Complete- Waking Waiter\n");
2862 			wakeup(req);
2863 		}
2864 		return (TRUE);
2865 	}
2866 
2867 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2868 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2869 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2870 		    rp->MsgLength, rp->MsgFlags);
2871 		return (TRUE);
2872 	}
2873 
2874 	if (rp->MsgLength <= 5) {
2875 		/*
2876 		 * This is just a ack of an original ELS buffer post
2877 		 */
2878 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2879 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2880 		return (TRUE);
2881 	}
2882 
2883 
2884 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2885 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2886 
2887 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2888 	cmd = be32toh(elsbuf[0]) >> 24;
2889 
2890 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2891 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2892 		return (TRUE);
2893 	}
2894 
2895 	ioindex = le32toh(rp->TransactionContext);
2896 	req = mpt->els_cmd_ptrs[ioindex];
2897 
2898 	if (rctl == ELS && type == 1) {
2899 		switch (cmd) {
2900 		case PRLI:
2901 			/*
2902 			 * Send back a PRLI ACC
2903 			 */
2904 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2905 			    le32toh(rp->Wwn.PortNameHigh),
2906 			    le32toh(rp->Wwn.PortNameLow));
2907 			elsbuf[0] = htobe32(0x02100014);
2908 			elsbuf[1] |= htobe32(0x00000100);
2909 			elsbuf[4] = htobe32(0x00000002);
2910 			if (mpt->role & MPT_ROLE_TARGET)
2911 				elsbuf[4] |= htobe32(0x00000010);
2912 			if (mpt->role & MPT_ROLE_INITIATOR)
2913 				elsbuf[4] |= htobe32(0x00000020);
2914 			/* remove from active list as we're done */
2915 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2916 			req->state &= ~REQ_STATE_QUEUED;
2917 			req->state |= REQ_STATE_DONE;
2918 			mpt_fc_els_send_response(mpt, req, rp, 20);
2919 			do_refresh = FALSE;
2920 			break;
2921 		case PRLO:
2922 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2923 			elsbuf[0] = htobe32(0x02100014);
2924 			elsbuf[1] = htobe32(0x08000100);
2925 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2926 			    le32toh(rp->Wwn.PortNameHigh),
2927 			    le32toh(rp->Wwn.PortNameLow));
2928 			/* remove from active list as we're done */
2929 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2930 			req->state &= ~REQ_STATE_QUEUED;
2931 			req->state |= REQ_STATE_DONE;
2932 			mpt_fc_els_send_response(mpt, req, rp, 20);
2933 			do_refresh = FALSE;
2934 			break;
2935 		default:
2936 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2937 			break;
2938 		}
2939 	} else if (rctl == ABTS && type == 0) {
2940 		uint16_t rx_id = le16toh(rp->Rxid);
2941 		uint16_t ox_id = le16toh(rp->Oxid);
2942 		mpt_tgt_state_t *tgt;
2943 		request_t *tgt_req = NULL;
2944 		union ccb *ccb;
2945 		uint32_t ct_id;
2946 
2947 		mpt_prt(mpt,
2948 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2949 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2950 		    le32toh(rp->Wwn.PortNameLow));
2951 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2952 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2953 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2954 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2955 		} else {
2956 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2957 		}
2958 		if (tgt_req == NULL) {
2959 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2960 			goto skip;
2961 		}
2962 		tgt = MPT_TGT_STATE(mpt, tgt_req);
2963 
2964 		/* Check to make sure we have the correct command. */
2965 		ct_id = GET_IO_INDEX(tgt->reply_desc);
2966 		if (ct_id != rx_id) {
2967 			mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2968 			    "RX_ID received=0x%x, in cmd=0x%x\n", rx_id, ct_id);
2969 			goto skip;
2970 		}
2971 		if (tgt->itag != ox_id) {
2972 			mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2973 			    "OX_ID received=0x%x, in cmd=0x%x\n", ox_id, tgt->itag);
2974 			goto skip;
2975 		}
2976 
2977 		if ((ccb = tgt->ccb) != NULL) {
2978 			mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n",
2979 			    ccb, (uintmax_t)ccb->ccb_h.target_lun,
2980 			    ccb->ccb_h.flags, ccb->ccb_h.status);
2981 		}
2982 		mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2983 		    "%x nxfers %x\n", tgt->state, tgt->resid,
2984 		    tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers);
2985 		if (mpt_abort_target_cmd(mpt, tgt_req))
2986 			mpt_prt(mpt, "unable to start TargetAbort\n");
2987 
2988 skip:
2989 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2990 		elsbuf[0] = htobe32(0);
2991 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2992 		elsbuf[2] = htobe32(0x000ffff);
2993 		/*
2994 		 * Dork with the reply frame so that the response to it
2995 		 * will be correct.
2996 		 */
2997 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2998 		/* remove from active list as we're done */
2999 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3000 		req->state &= ~REQ_STATE_QUEUED;
3001 		req->state |= REQ_STATE_DONE;
3002 		mpt_fc_els_send_response(mpt, req, rp, 12);
3003 		do_refresh = FALSE;
3004 	} else {
3005 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3006 	}
3007 	if (do_refresh == TRUE) {
3008 		/* remove from active list as we're done */
3009 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3010 		req->state &= ~REQ_STATE_QUEUED;
3011 		req->state |= REQ_STATE_DONE;
3012 		mpt_fc_post_els(mpt, req, ioindex);
3013 	}
3014 	return (TRUE);
3015 }
3016 
3017 /*
3018  * Clean up all SCSI Initiator personality state in response
3019  * to a controller reset.
3020  */
3021 static void
3022 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3023 {
3024 
3025 	/*
3026 	 * The pending list is already run down by
3027 	 * the generic handler.  Perform the same
3028 	 * operation on the timed out request list.
3029 	 */
3030 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3031 				   MPI_IOCSTATUS_INVALID_STATE);
3032 
3033 	/*
3034 	 * XXX: We need to repost ELS and Target Command Buffers?
3035 	 */
3036 
3037 	/*
3038 	 * Inform the XPT that a bus reset has occurred.
3039 	 */
3040 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
3041 }
3042 
3043 /*
3044  * Parse additional completion information in the reply
3045  * frame for SCSI I/O requests.
3046  */
3047 static int
3048 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3049 			     MSG_DEFAULT_REPLY *reply_frame)
3050 {
3051 	union ccb *ccb;
3052 	MSG_SCSI_IO_REPLY *scsi_io_reply;
3053 	u_int ioc_status;
3054 	u_int sstate;
3055 
3056 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3057 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3058 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3059 		("MPT SCSI I/O Handler called with incorrect reply type"));
3060 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3061 		("MPT SCSI I/O Handler called with continuation reply"));
3062 
3063 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3064 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
3065 	ioc_status &= MPI_IOCSTATUS_MASK;
3066 	sstate = scsi_io_reply->SCSIState;
3067 
3068 	ccb = req->ccb;
3069 	ccb->csio.resid =
3070 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3071 
3072 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3073 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3074 		uint32_t sense_returned;
3075 
3076 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3077 
3078 		sense_returned = le32toh(scsi_io_reply->SenseCount);
3079 		if (sense_returned < ccb->csio.sense_len)
3080 			ccb->csio.sense_resid = ccb->csio.sense_len -
3081 						sense_returned;
3082 		else
3083 			ccb->csio.sense_resid = 0;
3084 
3085 		bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3086 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3087 		    min(ccb->csio.sense_len, sense_returned));
3088 	}
3089 
3090 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3091 		/*
3092 		 * Tag messages rejected, but non-tagged retry
3093 		 * was successful.
3094 XXXX
3095 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3096 		 */
3097 	}
3098 
3099 	switch(ioc_status) {
3100 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3101 		/*
3102 		 * XXX
3103 		 * Linux driver indicates that a zero
3104 		 * transfer length with this error code
3105 		 * indicates a CRC error.
3106 		 *
3107 		 * No need to swap the bytes for checking
3108 		 * against zero.
3109 		 */
3110 		if (scsi_io_reply->TransferCount == 0) {
3111 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3112 			break;
3113 		}
3114 		/* FALLTHROUGH */
3115 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3116 	case MPI_IOCSTATUS_SUCCESS:
3117 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3118 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3119 			/*
3120 			 * Status was never returned for this transaction.
3121 			 */
3122 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3123 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3124 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3125 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3126 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3127 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3128 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3129 
3130 			/* XXX Handle SPI-Packet and FCP-2 response info. */
3131 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3132 		} else
3133 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3134 		break;
3135 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3136 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3137 		break;
3138 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3139 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3140 		break;
3141 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3142 		/*
3143 		 * Since selection timeouts and "device really not
3144 		 * there" are grouped into this error code, report
3145 		 * selection timeout.  Selection timeouts are
3146 		 * typically retried before giving up on the device
3147 		 * whereas "device not there" errors are considered
3148 		 * unretryable.
3149 		 */
3150 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3151 		break;
3152 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3153 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3154 		break;
3155 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3156 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3157 		break;
3158 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3159 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3160 		break;
3161 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3162 		ccb->ccb_h.status = CAM_UA_TERMIO;
3163 		break;
3164 	case MPI_IOCSTATUS_INVALID_STATE:
3165 		/*
3166 		 * The IOC has been reset.  Emulate a bus reset.
3167 		 */
3168 		/* FALLTHROUGH */
3169 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3170 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3171 		break;
3172 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3173 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3174 		/*
3175 		 * Don't clobber any timeout status that has
3176 		 * already been set for this transaction.  We
3177 		 * want the SCSI layer to be able to differentiate
3178 		 * between the command we aborted due to timeout
3179 		 * and any innocent bystanders.
3180 		 */
3181 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3182 			break;
3183 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3184 		break;
3185 
3186 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3187 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3188 		break;
3189 	case MPI_IOCSTATUS_BUSY:
3190 		mpt_set_ccb_status(ccb, CAM_BUSY);
3191 		break;
3192 	case MPI_IOCSTATUS_INVALID_FUNCTION:
3193 	case MPI_IOCSTATUS_INVALID_SGL:
3194 	case MPI_IOCSTATUS_INTERNAL_ERROR:
3195 	case MPI_IOCSTATUS_INVALID_FIELD:
3196 	default:
3197 		/* XXX
3198 		 * Some of the above may need to kick
3199 		 * of a recovery action!!!!
3200 		 */
3201 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3202 		break;
3203 	}
3204 
3205 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3206 		mpt_freeze_ccb(ccb);
3207 	}
3208 
3209 	return (TRUE);
3210 }
3211 
3212 static void
3213 mpt_action(struct cam_sim *sim, union ccb *ccb)
3214 {
3215 	struct mpt_softc *mpt;
3216 	struct ccb_trans_settings *cts;
3217 	target_id_t tgt;
3218 	lun_id_t lun;
3219 	int raid_passthru;
3220 
3221 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3222 
3223 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
3224 	raid_passthru = (sim == mpt->phydisk_sim);
3225 	MPT_LOCK_ASSERT(mpt);
3226 
3227 	tgt = ccb->ccb_h.target_id;
3228 	lun = ccb->ccb_h.target_lun;
3229 	if (raid_passthru &&
3230 	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
3231 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
3232 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
3233 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3234 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3235 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3236 			xpt_done(ccb);
3237 			return;
3238 		}
3239 	}
3240 	ccb->ccb_h.ccb_mpt_ptr = mpt;
3241 
3242 	switch (ccb->ccb_h.func_code) {
3243 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
3244 		/*
3245 		 * Do a couple of preliminary checks...
3246 		 */
3247 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3248 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3249 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3250 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3251 				break;
3252 			}
3253 		}
3254 		/* Max supported CDB length is 16 bytes */
3255 		/* XXX Unless we implement the new 32byte message type */
3256 		if (ccb->csio.cdb_len >
3257 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3258 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3259 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3260 			break;
3261 		}
3262 #ifdef	MPT_TEST_MULTIPATH
3263 		if (mpt->failure_id == ccb->ccb_h.target_id) {
3264 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3265 			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3266 			break;
3267 		}
3268 #endif
3269 		ccb->csio.scsi_status = SCSI_STATUS_OK;
3270 		mpt_start(sim, ccb);
3271 		return;
3272 
3273 	case XPT_RESET_BUS:
3274 		if (raid_passthru) {
3275 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3276 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3277 			break;
3278 		}
3279 	case XPT_RESET_DEV:
3280 		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3281 			if (bootverbose) {
3282 				xpt_print(ccb->ccb_h.path, "reset bus\n");
3283 			}
3284 		} else {
3285 			xpt_print(ccb->ccb_h.path, "reset device\n");
3286 		}
3287 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3288 
3289 		/*
3290 		 * mpt_bus_reset is always successful in that it
3291 		 * will fall back to a hard reset should a bus
3292 		 * reset attempt fail.
3293 		 */
3294 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3295 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3296 		break;
3297 
3298 	case XPT_ABORT:
3299 	{
3300 		union ccb *accb = ccb->cab.abort_ccb;
3301 		switch (accb->ccb_h.func_code) {
3302 		case XPT_ACCEPT_TARGET_IO:
3303 		case XPT_IMMEDIATE_NOTIFY:
3304 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3305 			break;
3306 		case XPT_CONT_TARGET_IO:
3307 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3308 			ccb->ccb_h.status = CAM_UA_ABORT;
3309 			break;
3310 		case XPT_SCSI_IO:
3311 			ccb->ccb_h.status = CAM_UA_ABORT;
3312 			break;
3313 		default:
3314 			ccb->ccb_h.status = CAM_REQ_INVALID;
3315 			break;
3316 		}
3317 		break;
3318 	}
3319 
3320 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3321 
3322 #define	DP_DISC_ENABLE	0x1
3323 #define	DP_DISC_DISABL	0x2
3324 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3325 
3326 #define	DP_TQING_ENABLE	0x4
3327 #define	DP_TQING_DISABL	0x8
3328 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3329 
3330 #define	DP_WIDE		0x10
3331 #define	DP_NARROW	0x20
3332 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3333 
3334 #define	DP_SYNC		0x40
3335 
3336 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3337 	{
3338 		struct ccb_trans_settings_scsi *scsi;
3339 		struct ccb_trans_settings_spi *spi;
3340 		uint8_t dval;
3341 		u_int period;
3342 		u_int offset;
3343 		int i, j;
3344 
3345 		cts = &ccb->cts;
3346 
3347 		if (mpt->is_fc || mpt->is_sas) {
3348 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3349 			break;
3350 		}
3351 
3352 		scsi = &cts->proto_specific.scsi;
3353 		spi = &cts->xport_specific.spi;
3354 
3355 		/*
3356 		 * We can be called just to valid transport and proto versions
3357 		 */
3358 		if (scsi->valid == 0 && spi->valid == 0) {
3359 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3360 			break;
3361 		}
3362 
3363 		/*
3364 		 * Skip attempting settings on RAID volume disks.
3365 		 * Other devices on the bus get the normal treatment.
3366 		 */
3367 		if (mpt->phydisk_sim && raid_passthru == 0 &&
3368 		    mpt_is_raid_volume(mpt, tgt) != 0) {
3369 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3370 			    "no transfer settings for RAID vols\n");
3371 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3372 			break;
3373 		}
3374 
3375 		i = mpt->mpt_port_page2.PortSettings &
3376 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3377 		j = mpt->mpt_port_page2.PortFlags &
3378 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3379 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3380 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3381 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3382 			    "honoring BIOS transfer negotiations\n");
3383 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3384 			break;
3385 		}
3386 
3387 		dval = 0;
3388 		period = 0;
3389 		offset = 0;
3390 
3391 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3392 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3393 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3394 		}
3395 
3396 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3397 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3398 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3399 		}
3400 
3401 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3402 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3403 			    DP_WIDE : DP_NARROW;
3404 		}
3405 
3406 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3407 			dval |= DP_SYNC;
3408 			offset = spi->sync_offset;
3409 		} else {
3410 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3411 			    &mpt->mpt_dev_page1[tgt];
3412 			offset = ptr->RequestedParameters;
3413 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3414 	    		offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3415 		}
3416 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3417 			dval |= DP_SYNC;
3418 			period = spi->sync_period;
3419 		} else {
3420 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3421 			    &mpt->mpt_dev_page1[tgt];
3422 			period = ptr->RequestedParameters;
3423 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3424 	    		period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3425 		}
3426 
3427 		if (dval & DP_DISC_ENABLE) {
3428 			mpt->mpt_disc_enable |= (1 << tgt);
3429 		} else if (dval & DP_DISC_DISABL) {
3430 			mpt->mpt_disc_enable &= ~(1 << tgt);
3431 		}
3432 		if (dval & DP_TQING_ENABLE) {
3433 			mpt->mpt_tag_enable |= (1 << tgt);
3434 		} else if (dval & DP_TQING_DISABL) {
3435 			mpt->mpt_tag_enable &= ~(1 << tgt);
3436 		}
3437 		if (dval & DP_WIDTH) {
3438 			mpt_setwidth(mpt, tgt, 1);
3439 		}
3440 		if (dval & DP_SYNC) {
3441 			mpt_setsync(mpt, tgt, period, offset);
3442 		}
3443 		if (dval == 0) {
3444 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3445 			break;
3446 		}
3447 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3448 		    "set [%d]: 0x%x period 0x%x offset %d\n",
3449 		    tgt, dval, period, offset);
3450 		if (mpt_update_spi_config(mpt, tgt)) {
3451 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3452 		} else {
3453 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3454 		}
3455 		break;
3456 	}
3457 	case XPT_GET_TRAN_SETTINGS:
3458 	{
3459 		struct ccb_trans_settings_scsi *scsi;
3460 		cts = &ccb->cts;
3461 		cts->protocol = PROTO_SCSI;
3462 		if (mpt->is_fc) {
3463 			struct ccb_trans_settings_fc *fc =
3464 			    &cts->xport_specific.fc;
3465 			cts->protocol_version = SCSI_REV_SPC;
3466 			cts->transport = XPORT_FC;
3467 			cts->transport_version = 0;
3468 			if (mpt->mpt_fcport_speed != 0) {
3469 				fc->valid = CTS_FC_VALID_SPEED;
3470 				fc->bitrate = 100000 * mpt->mpt_fcport_speed;
3471 			}
3472 		} else if (mpt->is_sas) {
3473 			struct ccb_trans_settings_sas *sas =
3474 			    &cts->xport_specific.sas;
3475 			cts->protocol_version = SCSI_REV_SPC2;
3476 			cts->transport = XPORT_SAS;
3477 			cts->transport_version = 0;
3478 			sas->valid = CTS_SAS_VALID_SPEED;
3479 			sas->bitrate = 300000;
3480 		} else {
3481 			cts->protocol_version = SCSI_REV_2;
3482 			cts->transport = XPORT_SPI;
3483 			cts->transport_version = 2;
3484 			if (mpt_get_spi_settings(mpt, cts) != 0) {
3485 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3486 				break;
3487 			}
3488 		}
3489 		scsi = &cts->proto_specific.scsi;
3490 		scsi->valid = CTS_SCSI_VALID_TQ;
3491 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3492 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3493 		break;
3494 	}
3495 	case XPT_CALC_GEOMETRY:
3496 	{
3497 		struct ccb_calc_geometry *ccg;
3498 
3499 		ccg = &ccb->ccg;
3500 		if (ccg->block_size == 0) {
3501 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3502 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3503 			break;
3504 		}
3505 		cam_calc_geometry(ccg, /* extended */ 1);
3506 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3507 		break;
3508 	}
3509 	case XPT_GET_SIM_KNOB:
3510 	{
3511 		struct ccb_sim_knob *kp = &ccb->knob;
3512 
3513 		if (mpt->is_fc) {
3514 			kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3515 			kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3516 			switch (mpt->role) {
3517 			case MPT_ROLE_NONE:
3518 				kp->xport_specific.fc.role = KNOB_ROLE_NONE;
3519 				break;
3520 			case MPT_ROLE_INITIATOR:
3521 				kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
3522 				break;
3523 			case MPT_ROLE_TARGET:
3524 				kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
3525 				break;
3526 			case MPT_ROLE_BOTH:
3527 				kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
3528 				break;
3529 			}
3530 			kp->xport_specific.fc.valid =
3531 			    KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
3532 			ccb->ccb_h.status = CAM_REQ_CMP;
3533 		} else {
3534 			ccb->ccb_h.status = CAM_REQ_INVALID;
3535 		}
3536 		xpt_done(ccb);
3537 		break;
3538 	}
3539 	case XPT_PATH_INQ:		/* Path routing inquiry */
3540 	{
3541 		struct ccb_pathinq *cpi = &ccb->cpi;
3542 
3543 		cpi->version_num = 1;
3544 		cpi->target_sprt = 0;
3545 		cpi->hba_eng_cnt = 0;
3546 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3547 		cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3548 		/*
3549 		 * FC cards report MAX_DEVICES of 512, but
3550 		 * the MSG_SCSI_IO_REQUEST target id field
3551 		 * is only 8 bits. Until we fix the driver
3552 		 * to support 'channels' for bus overflow,
3553 		 * just limit it.
3554 		 */
3555 		if (cpi->max_target > 255) {
3556 			cpi->max_target = 255;
3557 		}
3558 
3559 		/*
3560 		 * VMware ESX reports > 16 devices and then dies when we probe.
3561 		 */
3562 		if (mpt->is_spi && cpi->max_target > 15) {
3563 			cpi->max_target = 15;
3564 		}
3565 		if (mpt->is_spi)
3566 			cpi->max_lun = 7;
3567 		else
3568 			cpi->max_lun = MPT_MAX_LUNS;
3569 		cpi->initiator_id = mpt->mpt_ini_id;
3570 		cpi->bus_id = cam_sim_bus(sim);
3571 
3572 		/*
3573 		 * The base speed is the speed of the underlying connection.
3574 		 */
3575 		cpi->protocol = PROTO_SCSI;
3576 		if (mpt->is_fc) {
3577 			cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3578 			    PIM_EXTLUNS;
3579 			cpi->base_transfer_speed = 100000;
3580 			cpi->hba_inquiry = PI_TAG_ABLE;
3581 			cpi->transport = XPORT_FC;
3582 			cpi->transport_version = 0;
3583 			cpi->protocol_version = SCSI_REV_SPC;
3584 			cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3585 			cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3586 			cpi->xport_specific.fc.port = mpt->scinfo.fc.portid;
3587 			cpi->xport_specific.fc.bitrate =
3588 			    100000 * mpt->mpt_fcport_speed;
3589 		} else if (mpt->is_sas) {
3590 			cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3591 			    PIM_EXTLUNS;
3592 			cpi->base_transfer_speed = 300000;
3593 			cpi->hba_inquiry = PI_TAG_ABLE;
3594 			cpi->transport = XPORT_SAS;
3595 			cpi->transport_version = 0;
3596 			cpi->protocol_version = SCSI_REV_SPC2;
3597 		} else {
3598 			cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED |
3599 			    PIM_EXTLUNS;
3600 			cpi->base_transfer_speed = 3300;
3601 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3602 			cpi->transport = XPORT_SPI;
3603 			cpi->transport_version = 2;
3604 			cpi->protocol_version = SCSI_REV_2;
3605 		}
3606 
3607 		/*
3608 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3609 		 * wide and restrict it to one lun.
3610 		 */
3611 		if (raid_passthru) {
3612 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3613 			cpi->initiator_id = cpi->max_target + 1;
3614 			cpi->max_lun = 0;
3615 		}
3616 
3617 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3618 			cpi->hba_misc |= PIM_NOINITIATOR;
3619 		}
3620 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3621 			cpi->target_sprt =
3622 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3623 		} else {
3624 			cpi->target_sprt = 0;
3625 		}
3626 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3627 		strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3628 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3629 		cpi->unit_number = cam_sim_unit(sim);
3630 		cpi->ccb_h.status = CAM_REQ_CMP;
3631 		break;
3632 	}
3633 	case XPT_EN_LUN:		/* Enable LUN as a target */
3634 	{
3635 		int result;
3636 
3637 		if (ccb->cel.enable)
3638 			result = mpt_enable_lun(mpt,
3639 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3640 		else
3641 			result = mpt_disable_lun(mpt,
3642 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3643 		if (result == 0) {
3644 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3645 		} else {
3646 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3647 		}
3648 		break;
3649 	}
3650 	case XPT_IMMEDIATE_NOTIFY:	/* Add Immediate Notify Resource */
3651 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3652 	{
3653 		tgt_resource_t *trtp;
3654 		lun_id_t lun = ccb->ccb_h.target_lun;
3655 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3656 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3657 
3658 		if (lun == CAM_LUN_WILDCARD) {
3659 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3660 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3661 				break;
3662 			}
3663 			trtp = &mpt->trt_wildcard;
3664 		} else if (lun >= MPT_MAX_LUNS) {
3665 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3666 			break;
3667 		} else {
3668 			trtp = &mpt->trt[lun];
3669 		}
3670 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3671 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3672 			    "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun);
3673 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3674 			    sim_links.stqe);
3675 		} else {
3676 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3677 			    "Put FREE INOT lun %jx\n", (uintmax_t)lun);
3678 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3679 			    sim_links.stqe);
3680 		}
3681 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3682 		return;
3683 	}
3684 	case XPT_NOTIFY_ACKNOWLEDGE:	/* Task management request done. */
3685 	{
3686 		request_t *req = MPT_TAG_2_REQ(mpt, ccb->cna2.tag_id);
3687 
3688 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Got Notify ACK\n");
3689 		mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0);
3690 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3691 		break;
3692 	}
3693 	case XPT_CONT_TARGET_IO:
3694 		mpt_target_start_io(mpt, ccb);
3695 		return;
3696 
3697 	default:
3698 		ccb->ccb_h.status = CAM_REQ_INVALID;
3699 		break;
3700 	}
3701 	xpt_done(ccb);
3702 }
3703 
3704 static int
3705 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3706 {
3707 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3708 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3709 	target_id_t tgt;
3710 	uint32_t dval, pval, oval;
3711 	int rv;
3712 
3713 	if (IS_CURRENT_SETTINGS(cts) == 0) {
3714 		tgt = cts->ccb_h.target_id;
3715 	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3716 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3717 			return (-1);
3718 		}
3719 	} else {
3720 		tgt = cts->ccb_h.target_id;
3721 	}
3722 
3723 	/*
3724 	 * We aren't looking at Port Page 2 BIOS settings here-
3725 	 * sometimes these have been known to be bogus XXX.
3726 	 *
3727 	 * For user settings, we pick the max from port page 0
3728 	 *
3729 	 * For current settings we read the current settings out from
3730 	 * device page 0 for that target.
3731 	 */
3732 	if (IS_CURRENT_SETTINGS(cts)) {
3733 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3734 		dval = 0;
3735 
3736 		tmp = mpt->mpt_dev_page0[tgt];
3737 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3738 		    sizeof(tmp), FALSE, 5000);
3739 		if (rv) {
3740 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3741 			return (rv);
3742 		}
3743 		mpt2host_config_page_scsi_device_0(&tmp);
3744 
3745 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3746 		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3747 		    tmp.NegotiatedParameters, tmp.Information);
3748 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3749 		    DP_WIDE : DP_NARROW;
3750 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3751 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3752 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3753 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3754 		oval = tmp.NegotiatedParameters;
3755 		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3756 		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3757 		pval = tmp.NegotiatedParameters;
3758 		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3759 		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3760 		mpt->mpt_dev_page0[tgt] = tmp;
3761 	} else {
3762 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3763 		oval = mpt->mpt_port_page0.Capabilities;
3764 		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3765 		pval = mpt->mpt_port_page0.Capabilities;
3766 		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3767 	}
3768 
3769 	spi->valid = 0;
3770 	scsi->valid = 0;
3771 	spi->flags = 0;
3772 	scsi->flags = 0;
3773 	spi->sync_offset = oval;
3774 	spi->sync_period = pval;
3775 	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3776 	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3777 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3778 	if (dval & DP_WIDE) {
3779 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3780 	} else {
3781 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3782 	}
3783 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3784 		scsi->valid = CTS_SCSI_VALID_TQ;
3785 		if (dval & DP_TQING_ENABLE) {
3786 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3787 		}
3788 		spi->valid |= CTS_SPI_VALID_DISC;
3789 		if (dval & DP_DISC_ENABLE) {
3790 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3791 		}
3792 	}
3793 
3794 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3795 	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3796 	    IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
3797 	return (0);
3798 }
3799 
3800 static void
3801 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3802 {
3803 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3804 
3805 	ptr = &mpt->mpt_dev_page1[tgt];
3806 	if (onoff) {
3807 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3808 	} else {
3809 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3810 	}
3811 }
3812 
3813 static void
3814 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3815 {
3816 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3817 
3818 	ptr = &mpt->mpt_dev_page1[tgt];
3819 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3820 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3821 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3822 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3823 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3824 	if (period == 0) {
3825 		return;
3826 	}
3827 	ptr->RequestedParameters |=
3828 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3829 	ptr->RequestedParameters |=
3830 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3831 	if (period < 0xa) {
3832 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3833 	}
3834 	if (period < 0x9) {
3835 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3836 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3837 	}
3838 }
3839 
3840 static int
3841 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3842 {
3843 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3844 	int rv;
3845 
3846 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3847 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3848 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3849 	tmp = mpt->mpt_dev_page1[tgt];
3850 	host2mpt_config_page_scsi_device_1(&tmp);
3851 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3852 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3853 	if (rv) {
3854 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3855 		return (-1);
3856 	}
3857 	return (0);
3858 }
3859 
3860 /****************************** Timeout Recovery ******************************/
3861 static int
3862 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3863 {
3864 	int error;
3865 
3866 	error = kproc_create(mpt_recovery_thread, mpt,
3867 	    &mpt->recovery_thread, /*flags*/0,
3868 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3869 	return (error);
3870 }
3871 
3872 static void
3873 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3874 {
3875 
3876 	if (mpt->recovery_thread == NULL) {
3877 		return;
3878 	}
3879 	mpt->shutdwn_recovery = 1;
3880 	wakeup(mpt);
3881 	/*
3882 	 * Sleep on a slightly different location
3883 	 * for this interlock just for added safety.
3884 	 */
3885 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3886 }
3887 
3888 static void
3889 mpt_recovery_thread(void *arg)
3890 {
3891 	struct mpt_softc *mpt;
3892 
3893 	mpt = (struct mpt_softc *)arg;
3894 	MPT_LOCK(mpt);
3895 	for (;;) {
3896 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3897 			if (mpt->shutdwn_recovery == 0) {
3898 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3899 			}
3900 		}
3901 		if (mpt->shutdwn_recovery != 0) {
3902 			break;
3903 		}
3904 		mpt_recover_commands(mpt);
3905 	}
3906 	mpt->recovery_thread = NULL;
3907 	wakeup(&mpt->recovery_thread);
3908 	MPT_UNLOCK(mpt);
3909 	kproc_exit(0);
3910 }
3911 
3912 static int
3913 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3914     u_int channel, target_id_t target, lun_id_t lun, u_int abort_ctx,
3915     int sleep_ok)
3916 {
3917 	MSG_SCSI_TASK_MGMT *tmf_req;
3918 	int		    error;
3919 
3920 	/*
3921 	 * Wait for any current TMF request to complete.
3922 	 * We're only allowed to issue one TMF at a time.
3923 	 */
3924 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3925 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3926 	if (error != 0) {
3927 		mpt_reset(mpt, TRUE);
3928 		return (ETIMEDOUT);
3929 	}
3930 
3931 	mpt_assign_serno(mpt, mpt->tmf_req);
3932 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3933 
3934 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3935 	memset(tmf_req, 0, sizeof(*tmf_req));
3936 	tmf_req->TargetID = target;
3937 	tmf_req->Bus = channel;
3938 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3939 	tmf_req->TaskType = type;
3940 	tmf_req->MsgFlags = flags;
3941 	tmf_req->MsgContext =
3942 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3943 	be64enc(tmf_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
3944 	tmf_req->TaskMsgContext = abort_ctx;
3945 
3946 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3947 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3948 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3949 	if (mpt->verbose > MPT_PRT_DEBUG) {
3950 		mpt_print_request(tmf_req);
3951 	}
3952 
3953 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3954 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3955 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3956 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3957 	if (error != MPT_OK) {
3958 		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3959 		mpt->tmf_req->state = REQ_STATE_FREE;
3960 		mpt_reset(mpt, TRUE);
3961 	}
3962 	return (error);
3963 }
3964 
3965 /*
3966  * When a command times out, it is placed on the requeust_timeout_list
3967  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3968  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3969  * the timedout transactions.  The next TMF is issued either by the
3970  * completion handler of the current TMF waking our recovery thread,
3971  * or the TMF timeout handler causing a hard reset sequence.
3972  */
3973 static void
3974 mpt_recover_commands(struct mpt_softc *mpt)
3975 {
3976 	request_t	   *req;
3977 	union ccb	   *ccb;
3978 	int		    error;
3979 
3980 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3981 		/*
3982 		 * No work to do- leave.
3983 		 */
3984 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3985 		return;
3986 	}
3987 
3988 	/*
3989 	 * Flush any commands whose completion coincides with their timeout.
3990 	 */
3991 	mpt_intr(mpt);
3992 
3993 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3994 		/*
3995 		 * The timedout commands have already
3996 		 * completed.  This typically means
3997 		 * that either the timeout value was on
3998 		 * the hairy edge of what the device
3999 		 * requires or - more likely - interrupts
4000 		 * are not happening.
4001 		 */
4002 		mpt_prt(mpt, "Timedout requests already complete. "
4003 		    "Interrupts may not be functioning.\n");
4004 		mpt_enable_ints(mpt);
4005 		return;
4006 	}
4007 
4008 	/*
4009 	 * We have no visibility into the current state of the
4010 	 * controller, so attempt to abort the commands in the
4011 	 * order they timed-out. For initiator commands, we
4012 	 * depend on the reply handler pulling requests off
4013 	 * the timeout list.
4014 	 */
4015 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4016 		uint16_t status;
4017 		uint8_t response;
4018 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4019 
4020 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4021 		    req, req->serno, hdrp->Function);
4022 		ccb = req->ccb;
4023 		if (ccb == NULL) {
4024 			mpt_prt(mpt, "null ccb in timed out request. "
4025 			    "Resetting Controller.\n");
4026 			mpt_reset(mpt, TRUE);
4027 			continue;
4028 		}
4029 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4030 
4031 		/*
4032 		 * Check to see if this is not an initiator command and
4033 		 * deal with it differently if it is.
4034 		 */
4035 		switch (hdrp->Function) {
4036 		case MPI_FUNCTION_SCSI_IO_REQUEST:
4037 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4038 			break;
4039 		default:
4040 			/*
4041 			 * XXX: FIX ME: need to abort target assists...
4042 			 */
4043 			mpt_prt(mpt, "just putting it back on the pend q\n");
4044 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4045 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4046 			    links);
4047 			continue;
4048 		}
4049 
4050 		error = mpt_scsi_send_tmf(mpt,
4051 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4052 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4053 		    htole32(req->index | scsi_io_handler_id), TRUE);
4054 
4055 		if (error != 0) {
4056 			/*
4057 			 * mpt_scsi_send_tmf hard resets on failure, so no
4058 			 * need to do so here.  Our queue should be emptied
4059 			 * by the hard reset.
4060 			 */
4061 			continue;
4062 		}
4063 
4064 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4065 		    REQ_STATE_DONE, TRUE, 500);
4066 
4067 		status = le16toh(mpt->tmf_req->IOCStatus);
4068 		response = mpt->tmf_req->ResponseCode;
4069 		mpt->tmf_req->state = REQ_STATE_FREE;
4070 
4071 		if (error != 0) {
4072 			/*
4073 			 * If we've errored out,, reset the controller.
4074 			 */
4075 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4076 			    "Resetting controller\n");
4077 			mpt_reset(mpt, TRUE);
4078 			continue;
4079 		}
4080 
4081 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4082 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4083 			    "Resetting controller.\n", status);
4084 			mpt_reset(mpt, TRUE);
4085 			continue;
4086 		}
4087 
4088 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4089 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4090 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4091 			    "Resetting controller.\n", response);
4092 			mpt_reset(mpt, TRUE);
4093 			continue;
4094 		}
4095 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4096 	}
4097 }
4098 
4099 /************************ Target Mode Support ****************************/
4100 static void
4101 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4102 {
4103 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4104 	PTR_SGE_TRANSACTION32 tep;
4105 	PTR_SGE_SIMPLE32 se;
4106 	bus_addr_t paddr;
4107 	uint32_t fl;
4108 
4109 	paddr = req->req_pbuf;
4110 	paddr += MPT_RQSL(mpt);
4111 
4112 	fc = req->req_vbuf;
4113 	memset(fc, 0, MPT_REQUEST_AREA);
4114 	fc->BufferCount = 1;
4115 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4116 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
4117 
4118 	/*
4119 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
4120 	 * consist of a TE SGL element (with details length of zero)
4121 	 * followed by a SIMPLE SGL element which holds the address
4122 	 * of the buffer.
4123 	 */
4124 
4125 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4126 
4127 	tep->ContextSize = 4;
4128 	tep->Flags = 0;
4129 	tep->TransactionContext[0] = htole32(ioindex);
4130 
4131 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4132 	fl =
4133 		MPI_SGE_FLAGS_HOST_TO_IOC	|
4134 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4135 		MPI_SGE_FLAGS_LAST_ELEMENT	|
4136 		MPI_SGE_FLAGS_END_OF_LIST	|
4137 		MPI_SGE_FLAGS_END_OF_BUFFER;
4138 	fl <<= MPI_SGE_FLAGS_SHIFT;
4139 	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4140 	se->FlagsLength = htole32(fl);
4141 	se->Address = htole32((uint32_t) paddr);
4142 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4143 	    "add ELS index %d ioindex %d for %p:%u\n",
4144 	    req->index, ioindex, req, req->serno);
4145 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4146 	    ("mpt_fc_post_els: request not locked"));
4147 	mpt_send_cmd(mpt, req);
4148 }
4149 
4150 static void
4151 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4152 {
4153 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4154 	PTR_CMD_BUFFER_DESCRIPTOR cb;
4155 	bus_addr_t paddr;
4156 
4157 	paddr = req->req_pbuf;
4158 	paddr += MPT_RQSL(mpt);
4159 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4160 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4161 
4162 	fc = req->req_vbuf;
4163 	fc->BufferCount = 1;
4164 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4165 	fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX);
4166 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4167 
4168 	cb = &fc->Buffer[0];
4169 	cb->IoIndex = htole16(ioindex);
4170 	cb->u.PhysicalAddress32 = htole32((U32) paddr);
4171 
4172 	mpt_check_doorbell(mpt);
4173 	mpt_send_cmd(mpt, req);
4174 }
4175 
4176 static int
4177 mpt_add_els_buffers(struct mpt_softc *mpt)
4178 {
4179 	int i;
4180 
4181 	if (mpt->is_fc == 0) {
4182 		return (TRUE);
4183 	}
4184 
4185 	if (mpt->els_cmds_allocated) {
4186 		return (TRUE);
4187 	}
4188 
4189 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4190 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4191 
4192 	if (mpt->els_cmd_ptrs == NULL) {
4193 		return (FALSE);
4194 	}
4195 
4196 	/*
4197 	 * Feed the chip some ELS buffer resources
4198 	 */
4199 	for (i = 0; i < MPT_MAX_ELS; i++) {
4200 		request_t *req = mpt_get_request(mpt, FALSE);
4201 		if (req == NULL) {
4202 			break;
4203 		}
4204 		req->state |= REQ_STATE_LOCKED;
4205 		mpt->els_cmd_ptrs[i] = req;
4206 		mpt_fc_post_els(mpt, req, i);
4207 	}
4208 
4209 	if (i == 0) {
4210 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
4211 		free(mpt->els_cmd_ptrs, M_DEVBUF);
4212 		mpt->els_cmd_ptrs = NULL;
4213 		return (FALSE);
4214 	}
4215 	if (i != MPT_MAX_ELS) {
4216 		mpt_lprt(mpt, MPT_PRT_INFO,
4217 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4218 	}
4219 	mpt->els_cmds_allocated = i;
4220 	return(TRUE);
4221 }
4222 
4223 static int
4224 mpt_add_target_commands(struct mpt_softc *mpt)
4225 {
4226 	int i, max;
4227 
4228 	if (mpt->tgt_cmd_ptrs) {
4229 		return (TRUE);
4230 	}
4231 
4232 	max = MPT_MAX_REQUESTS(mpt) >> 1;
4233 	if (max > mpt->mpt_max_tgtcmds) {
4234 		max = mpt->mpt_max_tgtcmds;
4235 	}
4236 	mpt->tgt_cmd_ptrs =
4237 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4238 	if (mpt->tgt_cmd_ptrs == NULL) {
4239 		mpt_prt(mpt,
4240 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4241 		return (FALSE);
4242 	}
4243 
4244 	for (i = 0; i < max; i++) {
4245 		request_t *req;
4246 
4247 		req = mpt_get_request(mpt, FALSE);
4248 		if (req == NULL) {
4249 			break;
4250 		}
4251 		req->state |= REQ_STATE_LOCKED;
4252 		mpt->tgt_cmd_ptrs[i] = req;
4253 		mpt_post_target_command(mpt, req, i);
4254 	}
4255 
4256 
4257 	if (i == 0) {
4258 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4259 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4260 		mpt->tgt_cmd_ptrs = NULL;
4261 		return (FALSE);
4262 	}
4263 
4264 	mpt->tgt_cmds_allocated = i;
4265 
4266 	if (i < max) {
4267 		mpt_lprt(mpt, MPT_PRT_INFO,
4268 		    "added %d of %d target bufs\n", i, max);
4269 	}
4270 	return (i);
4271 }
4272 
4273 static int
4274 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4275 {
4276 
4277 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4278 		mpt->twildcard = 1;
4279 	} else if (lun >= MPT_MAX_LUNS) {
4280 		return (EINVAL);
4281 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4282 		return (EINVAL);
4283 	}
4284 	if (mpt->tenabled == 0) {
4285 		if (mpt->is_fc) {
4286 			(void) mpt_fc_reset_link(mpt, 0);
4287 		}
4288 		mpt->tenabled = 1;
4289 	}
4290 	if (lun == CAM_LUN_WILDCARD) {
4291 		mpt->trt_wildcard.enabled = 1;
4292 	} else {
4293 		mpt->trt[lun].enabled = 1;
4294 	}
4295 	return (0);
4296 }
4297 
4298 static int
4299 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4300 {
4301 	int i;
4302 
4303 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4304 		mpt->twildcard = 0;
4305 	} else if (lun >= MPT_MAX_LUNS) {
4306 		return (EINVAL);
4307 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4308 		return (EINVAL);
4309 	}
4310 	if (lun == CAM_LUN_WILDCARD) {
4311 		mpt->trt_wildcard.enabled = 0;
4312 	} else {
4313 		mpt->trt[lun].enabled = 0;
4314 	}
4315 	for (i = 0; i < MPT_MAX_LUNS; i++) {
4316 		if (mpt->trt[i].enabled) {
4317 			break;
4318 		}
4319 	}
4320 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4321 		if (mpt->is_fc) {
4322 			(void) mpt_fc_reset_link(mpt, 0);
4323 		}
4324 		mpt->tenabled = 0;
4325 	}
4326 	return (0);
4327 }
4328 
4329 /*
4330  * Called with MPT lock held
4331  */
4332 static void
4333 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4334 {
4335 	struct ccb_scsiio *csio = &ccb->csio;
4336 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4337 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4338 
4339 	switch (tgt->state) {
4340 	case TGT_STATE_IN_CAM:
4341 		break;
4342 	case TGT_STATE_MOVING_DATA:
4343 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4344 		xpt_freeze_simq(mpt->sim, 1);
4345 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4346 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4347 		xpt_done(ccb);
4348 		return;
4349 	default:
4350 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4351 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4352 		mpt_tgt_dump_req_state(mpt, cmd_req);
4353 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4354 		xpt_done(ccb);
4355 		return;
4356 	}
4357 
4358 	if (csio->dxfer_len) {
4359 		bus_dmamap_callback_t *cb;
4360 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4361 		request_t *req;
4362 		int error;
4363 
4364 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4365 		    ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4366 
4367 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4368 			if (mpt->outofbeer == 0) {
4369 				mpt->outofbeer = 1;
4370 				xpt_freeze_simq(mpt->sim, 1);
4371 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4372 			}
4373 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4374 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4375 			xpt_done(ccb);
4376 			return;
4377 		}
4378 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4379 		if (sizeof (bus_addr_t) > 4) {
4380 			cb = mpt_execute_req_a64;
4381 		} else {
4382 			cb = mpt_execute_req;
4383 		}
4384 
4385 		req->ccb = ccb;
4386 		ccb->ccb_h.ccb_req_ptr = req;
4387 
4388 		/*
4389 		 * Record the currently active ccb and the
4390 		 * request for it in our target state area.
4391 		 */
4392 		tgt->ccb = ccb;
4393 		tgt->req = req;
4394 
4395 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4396 		ta = req->req_vbuf;
4397 
4398 		if (mpt->is_sas) {
4399 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4400 			     cmd_req->req_vbuf;
4401 			ta->QueueTag = ssp->InitiatorTag;
4402 		} else if (mpt->is_spi) {
4403 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4404 			     cmd_req->req_vbuf;
4405 			ta->QueueTag = sp->Tag;
4406 		}
4407 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4408 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4409 		ta->ReplyWord = htole32(tgt->reply_desc);
4410 		be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(csio->ccb_h.target_lun));
4411 
4412 		ta->RelativeOffset = tgt->bytes_xfered;
4413 		ta->DataLength = ccb->csio.dxfer_len;
4414 		if (ta->DataLength > tgt->resid) {
4415 			ta->DataLength = tgt->resid;
4416 		}
4417 
4418 		/*
4419 		 * XXX Should be done after data transfer completes?
4420 		 */
4421 		csio->resid = csio->dxfer_len - ta->DataLength;
4422 		tgt->resid -= csio->dxfer_len;
4423 		tgt->bytes_xfered += csio->dxfer_len;
4424 
4425 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4426 			ta->TargetAssistFlags |=
4427 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4428 		}
4429 
4430 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4431 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4432 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4433 			ta->TargetAssistFlags |=
4434 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4435 		}
4436 #endif
4437 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4438 
4439 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4440 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4441 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4442 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4443 
4444 		error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
4445 		    cb, req, 0);
4446 		if (error == EINPROGRESS) {
4447 			xpt_freeze_simq(mpt->sim, 1);
4448 			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4449 		}
4450 	} else {
4451 		/*
4452 		 * XXX: I don't know why this seems to happen, but
4453 		 * XXX: completing the CCB seems to make things happy.
4454 		 * XXX: This seems to happen if the initiator requests
4455 		 * XXX: enough data that we have to do multiple CTIOs.
4456 		 */
4457 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4458 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4459 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4460 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4461 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4462 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4463 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4464 			xpt_done(ccb);
4465 			return;
4466 		}
4467 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status,
4468 		    (void *)&csio->sense_data,
4469 		    (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
4470 		     csio->sense_len : 0);
4471 	}
4472 }
4473 
4474 static void
4475 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4476     lun_id_t lun, int send, uint8_t *data, size_t length)
4477 {
4478 	mpt_tgt_state_t *tgt;
4479 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4480 	SGE_SIMPLE32 *se;
4481 	uint32_t flags;
4482 	uint8_t *dptr;
4483 	bus_addr_t pptr;
4484 	request_t *req;
4485 
4486 	/*
4487 	 * We enter with resid set to the data load for the command.
4488 	 */
4489 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4490 	if (length == 0 || tgt->resid == 0) {
4491 		tgt->resid = 0;
4492 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0);
4493 		return;
4494 	}
4495 
4496 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4497 		mpt_prt(mpt, "out of resources- dropping local response\n");
4498 		return;
4499 	}
4500 	tgt->is_local = 1;
4501 
4502 
4503 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4504 	ta = req->req_vbuf;
4505 
4506 	if (mpt->is_sas) {
4507 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4508 		ta->QueueTag = ssp->InitiatorTag;
4509 	} else if (mpt->is_spi) {
4510 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4511 		ta->QueueTag = sp->Tag;
4512 	}
4513 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4514 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4515 	ta->ReplyWord = htole32(tgt->reply_desc);
4516 	be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
4517 	ta->RelativeOffset = 0;
4518 	ta->DataLength = length;
4519 
4520 	dptr = req->req_vbuf;
4521 	dptr += MPT_RQSL(mpt);
4522 	pptr = req->req_pbuf;
4523 	pptr += MPT_RQSL(mpt);
4524 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4525 
4526 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4527 	memset(se, 0,sizeof (*se));
4528 
4529 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4530 	if (send) {
4531 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4532 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4533 	}
4534 	se->Address = pptr;
4535 	MPI_pSGE_SET_LENGTH(se, length);
4536 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4537 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4538 	MPI_pSGE_SET_FLAGS(se, flags);
4539 
4540 	tgt->ccb = NULL;
4541 	tgt->req = req;
4542 	tgt->resid -= length;
4543 	tgt->bytes_xfered = length;
4544 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4545 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4546 #else
4547 	tgt->state = TGT_STATE_MOVING_DATA;
4548 #endif
4549 	mpt_send_cmd(mpt, req);
4550 }
4551 
4552 /*
4553  * Abort queued up CCBs
4554  */
4555 static cam_status
4556 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4557 {
4558 	struct mpt_hdr_stailq *lp;
4559 	struct ccb_hdr *srch;
4560 	union ccb *accb = ccb->cab.abort_ccb;
4561 	tgt_resource_t *trtp;
4562 	mpt_tgt_state_t *tgt;
4563 	request_t *req;
4564 	uint32_t tag;
4565 
4566 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4567 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD)
4568 		trtp = &mpt->trt_wildcard;
4569 	else
4570 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4571 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4572 		lp = &trtp->atios;
4573 		tag = accb->atio.tag_id;
4574 	} else {
4575 		lp = &trtp->inots;
4576 		tag = accb->cin1.tag_id;
4577 	}
4578 
4579 	/* Search the CCB among queued. */
4580 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4581 		if (srch != &accb->ccb_h)
4582 			continue;
4583 		STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4584 		accb->ccb_h.status = CAM_REQ_ABORTED;
4585 		xpt_done(accb);
4586 		return (CAM_REQ_CMP);
4587 	}
4588 
4589 	/* Search the CCB among running. */
4590 	req = MPT_TAG_2_REQ(mpt, tag);
4591 	tgt = MPT_TGT_STATE(mpt, req);
4592 	if (tgt->tag_id == tag) {
4593 		mpt_abort_target_cmd(mpt, req);
4594 		return (CAM_REQ_CMP);
4595 	}
4596 
4597 	return (CAM_UA_ABORT);
4598 }
4599 
4600 /*
4601  * Ask the MPT to abort the current target command
4602  */
4603 static int
4604 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4605 {
4606 	int error;
4607 	request_t *req;
4608 	PTR_MSG_TARGET_MODE_ABORT abtp;
4609 
4610 	req = mpt_get_request(mpt, FALSE);
4611 	if (req == NULL) {
4612 		return (-1);
4613 	}
4614 	abtp = req->req_vbuf;
4615 	memset(abtp, 0, sizeof (*abtp));
4616 
4617 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4618 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4619 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4620 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4621 	error = 0;
4622 	if (mpt->is_fc || mpt->is_sas) {
4623 		mpt_send_cmd(mpt, req);
4624 	} else {
4625 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4626 	}
4627 	return (error);
4628 }
4629 
4630 /*
4631  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4632  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4633  * FC929 to set bogus FC_RSP fields (nonzero residuals
4634  * but w/o RESID fields set). This causes QLogic initiators
4635  * to think maybe that a frame was lost.
4636  *
4637  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4638  * we use allocated requests to do TARGET_ASSIST and we
4639  * need to know when to release them.
4640  */
4641 
4642 static void
4643 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4644     uint8_t status, uint8_t const *sense_data, u_int sense_len)
4645 {
4646 	uint8_t *cmd_vbuf;
4647 	mpt_tgt_state_t *tgt;
4648 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4649 	request_t *req;
4650 	bus_addr_t paddr;
4651 	int resplen = 0;
4652 	uint32_t fl;
4653 
4654 	cmd_vbuf = cmd_req->req_vbuf;
4655 	cmd_vbuf += MPT_RQSL(mpt);
4656 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4657 
4658 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4659 		if (mpt->outofbeer == 0) {
4660 			mpt->outofbeer = 1;
4661 			xpt_freeze_simq(mpt->sim, 1);
4662 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4663 		}
4664 		if (ccb) {
4665 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4666 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4667 			xpt_done(ccb);
4668 		} else {
4669 			mpt_prt(mpt,
4670 			    "could not allocate status request- dropping\n");
4671 		}
4672 		return;
4673 	}
4674 	req->ccb = ccb;
4675 	if (ccb) {
4676 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4677 		ccb->ccb_h.ccb_req_ptr = req;
4678 	}
4679 
4680 	/*
4681 	 * Record the currently active ccb, if any, and the
4682 	 * request for it in our target state area.
4683 	 */
4684 	tgt->ccb = ccb;
4685 	tgt->req = req;
4686 	tgt->state = TGT_STATE_SENDING_STATUS;
4687 
4688 	tp = req->req_vbuf;
4689 	paddr = req->req_pbuf;
4690 	paddr += MPT_RQSL(mpt);
4691 
4692 	memset(tp, 0, sizeof (*tp));
4693 	tp->StatusCode = status;
4694 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4695 	if (mpt->is_fc) {
4696 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4697 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4698 		uint8_t *sts_vbuf;
4699 		uint32_t *rsp;
4700 
4701 		sts_vbuf = req->req_vbuf;
4702 		sts_vbuf += MPT_RQSL(mpt);
4703 		rsp = (uint32_t *) sts_vbuf;
4704 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4705 
4706 		/*
4707 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4708 		 * It has to be big-endian in memory and is organized
4709 		 * in 32 bit words, which are much easier to deal with
4710 		 * as words which are swizzled as needed.
4711 		 *
4712 		 * All we're filling here is the FC_RSP payload.
4713 		 * We may just have the chip synthesize it if
4714 		 * we have no residual and an OK status.
4715 		 *
4716 		 */
4717 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4718 
4719 		rsp[2] = htobe32(status);
4720 #define	MIN_FCP_RESPONSE_SIZE	24
4721 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4722 		resplen = MIN_FCP_RESPONSE_SIZE;
4723 #endif
4724 		if (tgt->resid < 0) {
4725 			rsp[2] |= htobe32(0x400); /* XXXX NEED MNEMONIC!!!! */
4726 			rsp[3] = htobe32(-tgt->resid);
4727 			resplen = MIN_FCP_RESPONSE_SIZE;
4728 		} else if (tgt->resid > 0) {
4729 			rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */
4730 			rsp[3] = htobe32(tgt->resid);
4731 			resplen = MIN_FCP_RESPONSE_SIZE;
4732 		}
4733 		if (sense_len > 0) {
4734 			rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */
4735 			rsp[4] = htobe32(sense_len);
4736 			memcpy(&rsp[6], sense_data, sense_len);
4737 			resplen = MIN_FCP_RESPONSE_SIZE + sense_len;
4738 		}
4739 	} else if (mpt->is_sas) {
4740 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4741 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4742 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4743 	} else {
4744 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4745 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4746 		tp->QueueTag = htole16(sp->Tag);
4747 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4748 	}
4749 
4750 	tp->ReplyWord = htole32(tgt->reply_desc);
4751 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4752 
4753 #ifdef	WE_CAN_USE_AUTO_REPOST
4754 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4755 #endif
4756 	if (status == SCSI_STATUS_OK && resplen == 0) {
4757 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4758 	} else {
4759 		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4760 		fl = MPI_SGE_FLAGS_HOST_TO_IOC |
4761 		     MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4762 		     MPI_SGE_FLAGS_LAST_ELEMENT |
4763 		     MPI_SGE_FLAGS_END_OF_LIST |
4764 		     MPI_SGE_FLAGS_END_OF_BUFFER;
4765 		fl <<= MPI_SGE_FLAGS_SHIFT;
4766 		fl |= resplen;
4767 		tp->StatusDataSGE.FlagsLength = htole32(fl);
4768 	}
4769 
4770 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4771 	    "STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n",
4772 	    ccb, sense_len > 0 ? "" : "out", tgt->tag_id,
4773 	    req, req->serno, tgt->resid);
4774 	if (mpt->verbose > MPT_PRT_DEBUG)
4775 		mpt_print_request(req->req_vbuf);
4776 	if (ccb) {
4777 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4778 		mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb);
4779 	}
4780 	mpt_send_cmd(mpt, req);
4781 }
4782 
4783 static void
4784 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4785     tgt_resource_t *trtp, int init_id)
4786 {
4787 	struct ccb_immediate_notify *inot;
4788 	mpt_tgt_state_t *tgt;
4789 
4790 	tgt = MPT_TGT_STATE(mpt, req);
4791 	inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
4792 	if (inot == NULL) {
4793 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4794 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0);
4795 		return;
4796 	}
4797 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4798 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4799 	    "Get FREE INOT %p lun %jx\n", inot,
4800 	    (uintmax_t)inot->ccb_h.target_lun);
4801 
4802 	inot->initiator_id = init_id;	/* XXX */
4803 	inot->tag_id = tgt->tag_id;
4804 	inot->seq_id = 0;
4805 	/*
4806 	 * This is a somewhat grotesque attempt to map from task management
4807 	 * to old style SCSI messages. God help us all.
4808 	 */
4809 	switch (fc) {
4810 	case MPT_QUERY_TASK_SET:
4811 		inot->arg = MSG_QUERY_TASK_SET;
4812 		break;
4813 	case MPT_ABORT_TASK_SET:
4814 		inot->arg = MSG_ABORT_TASK_SET;
4815 		break;
4816 	case MPT_CLEAR_TASK_SET:
4817 		inot->arg = MSG_CLEAR_TASK_SET;
4818 		break;
4819 	case MPT_QUERY_ASYNC_EVENT:
4820 		inot->arg = MSG_QUERY_ASYNC_EVENT;
4821 		break;
4822 	case MPT_LOGICAL_UNIT_RESET:
4823 		inot->arg = MSG_LOGICAL_UNIT_RESET;
4824 		break;
4825 	case MPT_TARGET_RESET:
4826 		inot->arg = MSG_TARGET_RESET;
4827 		break;
4828 	case MPT_CLEAR_ACA:
4829 		inot->arg = MSG_CLEAR_ACA;
4830 		break;
4831 	default:
4832 		inot->arg = MSG_NOOP;
4833 		break;
4834 	}
4835 	tgt->ccb = (union ccb *) inot;
4836 	inot->ccb_h.status = CAM_MESSAGE_RECV;
4837 	xpt_done((union ccb *)inot);
4838 }
4839 
4840 static void
4841 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4842 {
4843 	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4844 	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4845 	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
4846 	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
4847 	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
4848 	     '0',  '0',  '0',  '1'
4849 	};
4850 	struct ccb_accept_tio *atiop;
4851 	lun_id_t lun;
4852 	int tag_action = 0;
4853 	mpt_tgt_state_t *tgt;
4854 	tgt_resource_t *trtp = NULL;
4855 	U8 *lunptr;
4856 	U8 *vbuf;
4857 	U16 ioindex;
4858 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4859 	uint8_t *cdbp;
4860 
4861 	/*
4862 	 * Stash info for the current command where we can get at it later.
4863 	 */
4864 	vbuf = req->req_vbuf;
4865 	vbuf += MPT_RQSL(mpt);
4866 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4867 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4868 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4869 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4870 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4871 	}
4872 
4873 	/*
4874 	 * Get our state pointer set up.
4875 	 */
4876 	tgt = MPT_TGT_STATE(mpt, req);
4877 	if (tgt->state != TGT_STATE_LOADED) {
4878 		mpt_tgt_dump_req_state(mpt, req);
4879 		panic("bad target state in mpt_scsi_tgt_atio");
4880 	}
4881 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4882 	tgt->state = TGT_STATE_IN_CAM;
4883 	tgt->reply_desc = reply_desc;
4884 	ioindex = GET_IO_INDEX(reply_desc);
4885 
4886 	/*
4887 	 * The tag we construct here allows us to find the
4888 	 * original request that the command came in with.
4889 	 *
4890 	 * This way we don't have to depend on anything but the
4891 	 * tag to find things when CCBs show back up from CAM.
4892 	 */
4893 	tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4894 
4895 	if (mpt->is_fc) {
4896 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4897 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4898 		if (fc->FcpCntl[2]) {
4899 			/*
4900 			 * Task Management Request
4901 			 */
4902 			switch (fc->FcpCntl[2]) {
4903 			case 0x1:
4904 				fct = MPT_QUERY_TASK_SET;
4905 				break;
4906 			case 0x2:
4907 				fct = MPT_ABORT_TASK_SET;
4908 				break;
4909 			case 0x4:
4910 				fct = MPT_CLEAR_TASK_SET;
4911 				break;
4912 			case 0x8:
4913 				fct = MPT_QUERY_ASYNC_EVENT;
4914 				break;
4915 			case 0x10:
4916 				fct = MPT_LOGICAL_UNIT_RESET;
4917 				break;
4918 			case 0x20:
4919 				fct = MPT_TARGET_RESET;
4920 				break;
4921 			case 0x40:
4922 				fct = MPT_CLEAR_ACA;
4923 				break;
4924 			default:
4925 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4926 				    fc->FcpCntl[2]);
4927 				mpt_scsi_tgt_status(mpt, NULL, req,
4928 				    SCSI_STATUS_OK, NULL, 0);
4929 				return;
4930 			}
4931 		} else {
4932 			switch (fc->FcpCntl[1]) {
4933 			case 0:
4934 				tag_action = MSG_SIMPLE_Q_TAG;
4935 				break;
4936 			case 1:
4937 				tag_action = MSG_HEAD_OF_Q_TAG;
4938 				break;
4939 			case 2:
4940 				tag_action = MSG_ORDERED_Q_TAG;
4941 				break;
4942 			default:
4943 				/*
4944 				 * Bah. Ignore Untagged Queing and ACA
4945 				 */
4946 				tag_action = MSG_SIMPLE_Q_TAG;
4947 				break;
4948 			}
4949 		}
4950 		tgt->resid = be32toh(fc->FcpDl);
4951 		cdbp = fc->FcpCdb;
4952 		lunptr = fc->FcpLun;
4953 		tgt->itag = fc->OptionalOxid;
4954 	} else if (mpt->is_sas) {
4955 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4956 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4957 		cdbp = ssp->CDB;
4958 		lunptr = ssp->LogicalUnitNumber;
4959 		tgt->itag = ssp->InitiatorTag;
4960 	} else {
4961 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4962 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4963 		cdbp = sp->CDB;
4964 		lunptr = sp->LogicalUnitNumber;
4965 		tgt->itag = sp->Tag;
4966 	}
4967 
4968 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(lunptr));
4969 
4970 	/*
4971 	 * Deal with non-enabled or bad luns here.
4972 	 */
4973 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4974 	    mpt->trt[lun].enabled == 0) {
4975 		if (mpt->twildcard) {
4976 			trtp = &mpt->trt_wildcard;
4977 		} else if (fct == MPT_NIL_TMT_VALUE) {
4978 			/*
4979 			 * In this case, we haven't got an upstream listener
4980 			 * for either a specific lun or wildcard luns. We
4981 			 * have to make some sensible response. For regular
4982 			 * inquiry, just return some NOT HERE inquiry data.
4983 			 * For VPD inquiry, report illegal field in cdb.
4984 			 * For REQUEST SENSE, just return NO SENSE data.
4985 			 * REPORT LUNS gets illegal command.
4986 			 * All other commands get 'no such device'.
4987 			 */
4988 			uint8_t sense[MPT_SENSE_SIZE];
4989 			size_t len;
4990 
4991 			memset(sense, 0, sizeof(sense));
4992 			sense[0] = 0xf0;
4993 			sense[2] = 0x5;
4994 			sense[7] = 0x8;
4995 
4996 			switch (cdbp[0]) {
4997 			case INQUIRY:
4998 			{
4999 				if (cdbp[1] != 0) {
5000 					sense[12] = 0x26;
5001 					sense[13] = 0x01;
5002 					break;
5003 				}
5004 				len = min(tgt->resid, cdbp[4]);
5005 				len = min(len, sizeof (null_iqd));
5006 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5007 				    "local inquiry %ld bytes\n", (long) len);
5008 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5009 				    null_iqd, len);
5010 				return;
5011 			}
5012 			case REQUEST_SENSE:
5013 			{
5014 				sense[2] = 0x0;
5015 				len = min(tgt->resid, cdbp[4]);
5016 				len = min(len, sizeof (sense));
5017 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5018 				    "local reqsense %ld bytes\n", (long) len);
5019 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5020 				    sense, len);
5021 				return;
5022 			}
5023 			case REPORT_LUNS:
5024 				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5025 				sense[12] = 0x26;
5026 				return;
5027 			default:
5028 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5029 				    "CMD 0x%x to unmanaged lun %jx\n",
5030 				    cdbp[0], (uintmax_t)lun);
5031 				sense[12] = 0x25;
5032 				break;
5033 			}
5034 			mpt_scsi_tgt_status(mpt, NULL, req,
5035 			    SCSI_STATUS_CHECK_COND, sense, sizeof(sense));
5036 			return;
5037 		}
5038 		/* otherwise, leave trtp NULL */
5039 	} else {
5040 		trtp = &mpt->trt[lun];
5041 	}
5042 
5043 	/*
5044 	 * Deal with any task management
5045 	 */
5046 	if (fct != MPT_NIL_TMT_VALUE) {
5047 		if (trtp == NULL) {
5048 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
5049 			    fct);
5050 			mpt_scsi_tgt_status(mpt, NULL, req,
5051 			    SCSI_STATUS_OK, NULL, 0);
5052 		} else {
5053 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5054 			    GET_INITIATOR_INDEX(reply_desc));
5055 		}
5056 		return;
5057 	}
5058 
5059 
5060 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5061 	if (atiop == NULL) {
5062 		mpt_lprt(mpt, MPT_PRT_WARN,
5063 		    "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun,
5064 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
5065 		mpt_scsi_tgt_status(mpt, NULL, req,
5066 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5067 		    NULL, 0);
5068 		return;
5069 	}
5070 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5071 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5072 	    "Get FREE ATIO %p lun %jx\n", atiop,
5073 	    (uintmax_t)atiop->ccb_h.target_lun);
5074 	atiop->ccb_h.ccb_mpt_ptr = mpt;
5075 	atiop->ccb_h.status = CAM_CDB_RECVD;
5076 	atiop->ccb_h.target_lun = lun;
5077 	atiop->sense_len = 0;
5078 	atiop->tag_id = tgt->tag_id;
5079 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5080 	atiop->cdb_len = 16;
5081 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5082 	if (tag_action) {
5083 		atiop->tag_action = tag_action;
5084 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
5085 	}
5086 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5087 		int i;
5088 		mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop,
5089 		    (uintmax_t)atiop->ccb_h.target_lun);
5090 		for (i = 0; i < atiop->cdb_len; i++) {
5091 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5092 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
5093 		}
5094 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5095 		    tgt->itag, tgt->tag_id, tgt->reply_desc, tgt->resid);
5096 	}
5097 
5098 	xpt_done((union ccb *)atiop);
5099 }
5100 
5101 static void
5102 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5103 {
5104 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5105 
5106 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5107 	    "nx %d tag 0x%08x itag 0x%04x state=%d\n", req, req->serno,
5108 	    tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb,
5109 	    tgt->req, tgt->nxfers, tgt->tag_id, tgt->itag, tgt->state);
5110 }
5111 
5112 static void
5113 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5114 {
5115 
5116 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5117 	    req->index, req->index, req->state);
5118 	mpt_tgt_dump_tgt_state(mpt, req);
5119 }
5120 
5121 static int
5122 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5123     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5124 {
5125 	int dbg;
5126 	union ccb *ccb;
5127 	U16 status;
5128 
5129 	if (reply_frame == NULL) {
5130 		/*
5131 		 * Figure out what the state of the command is.
5132 		 */
5133 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5134 
5135 #ifdef	INVARIANTS
5136 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5137 		if (tgt->req) {
5138 			mpt_req_not_spcl(mpt, tgt->req,
5139 			    "turbo scsi_tgt_reply associated req", __LINE__);
5140 		}
5141 #endif
5142 		switch(tgt->state) {
5143 		case TGT_STATE_LOADED:
5144 			/*
5145 			 * This is a new command starting.
5146 			 */
5147 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5148 			break;
5149 		case TGT_STATE_MOVING_DATA:
5150 		{
5151 			ccb = tgt->ccb;
5152 			if (tgt->req == NULL) {
5153 				panic("mpt: turbo target reply with null "
5154 				    "associated request moving data");
5155 				/* NOTREACHED */
5156 			}
5157 			if (ccb == NULL) {
5158 				if (tgt->is_local == 0) {
5159 					panic("mpt: turbo target reply with "
5160 					    "null associated ccb moving data");
5161 					/* NOTREACHED */
5162 				}
5163 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5164 				    "TARGET_ASSIST local done\n");
5165 				TAILQ_REMOVE(&mpt->request_pending_list,
5166 				    tgt->req, links);
5167 				mpt_free_request(mpt, tgt->req);
5168 				tgt->req = NULL;
5169 				mpt_scsi_tgt_status(mpt, NULL, req,
5170 				    0, NULL, 0);
5171 				return (TRUE);
5172 			}
5173 			tgt->ccb = NULL;
5174 			tgt->nxfers++;
5175 			mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5176 			mpt_lprt(mpt, MPT_PRT_DEBUG,
5177 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5178 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5179 			/*
5180 			 * Free the Target Assist Request
5181 			 */
5182 			KASSERT(tgt->req->ccb == ccb,
5183 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5184 			    tgt->req->serno, tgt->req->ccb));
5185 			TAILQ_REMOVE(&mpt->request_pending_list,
5186 			    tgt->req, links);
5187 			mpt_free_request(mpt, tgt->req);
5188 			tgt->req = NULL;
5189 
5190 			/*
5191 			 * Do we need to send status now? That is, are
5192 			 * we done with all our data transfers?
5193 			 */
5194 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5195 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5196 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5197 				KASSERT(ccb->ccb_h.status,
5198 				    ("zero ccb sts at %d", __LINE__));
5199 				tgt->state = TGT_STATE_IN_CAM;
5200 				if (mpt->outofbeer) {
5201 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5202 					mpt->outofbeer = 0;
5203 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5204 				}
5205 				xpt_done(ccb);
5206 				break;
5207 			}
5208 			/*
5209 			 * Otherwise, send status (and sense)
5210 			 */
5211 			mpt_scsi_tgt_status(mpt, ccb, req,
5212 			    ccb->csio.scsi_status,
5213 			    (void *)&ccb->csio.sense_data,
5214 			    (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
5215 			     ccb->csio.sense_len : 0);
5216 			break;
5217 		}
5218 		case TGT_STATE_SENDING_STATUS:
5219 		case TGT_STATE_MOVING_DATA_AND_STATUS:
5220 		{
5221 			int ioindex;
5222 			ccb = tgt->ccb;
5223 
5224 			if (tgt->req == NULL) {
5225 				panic("mpt: turbo target reply with null "
5226 				    "associated request sending status");
5227 				/* NOTREACHED */
5228 			}
5229 
5230 			if (ccb) {
5231 				tgt->ccb = NULL;
5232 				if (tgt->state ==
5233 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5234 					tgt->nxfers++;
5235 				}
5236 				mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5237 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5238 					ccb->ccb_h.status |= CAM_SENT_SENSE;
5239 				}
5240 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5241 				    "TARGET_STATUS tag %x sts %x flgs %x req "
5242 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5243 				    ccb->ccb_h.flags, tgt->req);
5244 				/*
5245 				 * Free the Target Send Status Request
5246 				 */
5247 				KASSERT(tgt->req->ccb == ccb,
5248 				    ("tgt->req %p:%u tgt->req->ccb %p",
5249 				    tgt->req, tgt->req->serno, tgt->req->ccb));
5250 				/*
5251 				 * Notify CAM that we're done
5252 				 */
5253 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5254 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5255 				KASSERT(ccb->ccb_h.status,
5256 				    ("ZERO ccb sts at %d", __LINE__));
5257 				tgt->ccb = NULL;
5258 			} else {
5259 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5260 				    "TARGET_STATUS non-CAM for req %p:%u\n",
5261 				    tgt->req, tgt->req->serno);
5262 			}
5263 			TAILQ_REMOVE(&mpt->request_pending_list,
5264 			    tgt->req, links);
5265 			mpt_free_request(mpt, tgt->req);
5266 			tgt->req = NULL;
5267 
5268 			/*
5269 			 * And re-post the Command Buffer.
5270 			 * This will reset the state.
5271 			 */
5272 			ioindex = GET_IO_INDEX(reply_desc);
5273 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5274 			tgt->is_local = 0;
5275 			mpt_post_target_command(mpt, req, ioindex);
5276 
5277 			/*
5278 			 * And post a done for anyone who cares
5279 			 */
5280 			if (ccb) {
5281 				if (mpt->outofbeer) {
5282 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5283 					mpt->outofbeer = 0;
5284 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5285 				}
5286 				xpt_done(ccb);
5287 			}
5288 			break;
5289 		}
5290 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5291 			tgt->state = TGT_STATE_LOADED;
5292 			break;
5293 		default:
5294 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5295 			    "Reply Function\n", tgt->state);
5296 		}
5297 		return (TRUE);
5298 	}
5299 
5300 	status = le16toh(reply_frame->IOCStatus);
5301 	if (status != MPI_IOCSTATUS_SUCCESS) {
5302 		dbg = MPT_PRT_ERROR;
5303 	} else {
5304 		dbg = MPT_PRT_DEBUG1;
5305 	}
5306 
5307 	mpt_lprt(mpt, dbg,
5308 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5309 	     req, req->serno, reply_frame, reply_frame->Function, status);
5310 
5311 	switch (reply_frame->Function) {
5312 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5313 	{
5314 		mpt_tgt_state_t *tgt;
5315 #ifdef	INVARIANTS
5316 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5317 #endif
5318 		if (status != MPI_IOCSTATUS_SUCCESS) {
5319 			/*
5320 			 * XXX What to do?
5321 			 */
5322 			break;
5323 		}
5324 		tgt = MPT_TGT_STATE(mpt, req);
5325 		KASSERT(tgt->state == TGT_STATE_LOADING,
5326 		    ("bad state 0x%x on reply to buffer post", tgt->state));
5327 		mpt_assign_serno(mpt, req);
5328 		tgt->state = TGT_STATE_LOADED;
5329 		break;
5330 	}
5331 	case MPI_FUNCTION_TARGET_ASSIST:
5332 #ifdef	INVARIANTS
5333 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5334 #endif
5335 		mpt_prt(mpt, "target assist completion\n");
5336 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5337 		mpt_free_request(mpt, req);
5338 		break;
5339 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5340 #ifdef	INVARIANTS
5341 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5342 #endif
5343 		mpt_prt(mpt, "status send completion\n");
5344 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5345 		mpt_free_request(mpt, req);
5346 		break;
5347 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5348 	{
5349 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5350 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5351 		PTR_MSG_TARGET_MODE_ABORT abtp =
5352 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5353 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5354 #ifdef	INVARIANTS
5355 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5356 #endif
5357 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5358 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5359 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5360 		mpt_free_request(mpt, req);
5361 		break;
5362 	}
5363 	default:
5364 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5365 		    "0x%x\n", reply_frame->Function);
5366 		break;
5367 	}
5368 	return (TRUE);
5369 }
5370