xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
108 #if __FreeBSD_version >= 500000
109 #include <sys/sysctl.h>
110 #endif
111 #include <sys/callout.h>
112 #include <sys/kthread.h>
113 
114 #if __FreeBSD_version >= 700025
115 #ifndef	CAM_NEW_TRAN_CODE
116 #define	CAM_NEW_TRAN_CODE	1
117 #endif
118 #endif
119 
120 static void mpt_poll(struct cam_sim *);
121 static timeout_t mpt_timeout;
122 static void mpt_action(struct cam_sim *, union ccb *);
123 static int
124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
125 static void mpt_setwidth(struct mpt_softc *, int, int);
126 static void mpt_setsync(struct mpt_softc *, int, int, int);
127 static int mpt_update_spi_config(struct mpt_softc *, int);
128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
129 
130 static mpt_reply_handler_t mpt_scsi_reply_handler;
131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
132 static mpt_reply_handler_t mpt_fc_els_reply_handler;
133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
134 					MSG_DEFAULT_REPLY *);
135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
136 static int mpt_fc_reset_link(struct mpt_softc *, int);
137 
138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
140 static void mpt_recovery_thread(void *arg);
141 static void mpt_recover_commands(struct mpt_softc *mpt);
142 
143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
144     u_int, u_int, u_int, int);
145 
146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
148 static int mpt_add_els_buffers(struct mpt_softc *mpt);
149 static int mpt_add_target_commands(struct mpt_softc *mpt);
150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
152 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
156     uint8_t, uint8_t const *);
157 static void
158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
159     tgt_resource_t *, int);
160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
163 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
164 
165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
169 
170 static mpt_probe_handler_t	mpt_cam_probe;
171 static mpt_attach_handler_t	mpt_cam_attach;
172 static mpt_enable_handler_t	mpt_cam_enable;
173 static mpt_ready_handler_t	mpt_cam_ready;
174 static mpt_event_handler_t	mpt_cam_event;
175 static mpt_reset_handler_t	mpt_cam_ioc_reset;
176 static mpt_detach_handler_t	mpt_cam_detach;
177 
178 static struct mpt_personality mpt_cam_personality =
179 {
180 	.name		= "mpt_cam",
181 	.probe		= mpt_cam_probe,
182 	.attach		= mpt_cam_attach,
183 	.enable		= mpt_cam_enable,
184 	.ready		= mpt_cam_ready,
185 	.event		= mpt_cam_event,
186 	.reset		= mpt_cam_ioc_reset,
187 	.detach		= mpt_cam_detach,
188 };
189 
190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
192 
193 int mpt_enable_sata_wc = -1;
194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
195 
196 int
197 mpt_cam_probe(struct mpt_softc *mpt)
198 {
199 	int role;
200 
201 	/*
202 	 * Only attach to nodes that support the initiator or target role
203 	 * (or want to) or have RAID physical devices that need CAM pass-thru
204 	 * support.
205 	 */
206 	if (mpt->do_cfg_role) {
207 		role = mpt->cfg_role;
208 	} else {
209 		role = mpt->role;
210 	}
211 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
212 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
213 		return (0);
214 	}
215 	return (ENODEV);
216 }
217 
218 int
219 mpt_cam_attach(struct mpt_softc *mpt)
220 {
221 	struct cam_devq *devq;
222 	mpt_handler_t	 handler;
223 	int		 maxq;
224 	int		 error;
225 
226 	MPT_LOCK(mpt);
227 	TAILQ_INIT(&mpt->request_timeout_list);
228 	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
229 	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
230 
231 	handler.reply_handler = mpt_scsi_reply_handler;
232 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 				     &scsi_io_handler_id);
234 	if (error != 0) {
235 		MPT_UNLOCK(mpt);
236 		goto cleanup;
237 	}
238 
239 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
240 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
241 				     &scsi_tmf_handler_id);
242 	if (error != 0) {
243 		MPT_UNLOCK(mpt);
244 		goto cleanup;
245 	}
246 
247 	/*
248 	 * If we're fibre channel and could support target mode, we register
249 	 * an ELS reply handler and give it resources.
250 	 */
251 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
252 		handler.reply_handler = mpt_fc_els_reply_handler;
253 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
254 		    &fc_els_handler_id);
255 		if (error != 0) {
256 			MPT_UNLOCK(mpt);
257 			goto cleanup;
258 		}
259 		if (mpt_add_els_buffers(mpt) == FALSE) {
260 			error = ENOMEM;
261 			MPT_UNLOCK(mpt);
262 			goto cleanup;
263 		}
264 		maxq -= mpt->els_cmds_allocated;
265 	}
266 
267 	/*
268 	 * If we support target mode, we register a reply handler for it,
269 	 * but don't add command resources until we actually enable target
270 	 * mode.
271 	 */
272 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
273 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
274 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
275 		    &mpt->scsi_tgt_handler_id);
276 		if (error != 0) {
277 			MPT_UNLOCK(mpt);
278 			goto cleanup;
279 		}
280 	}
281 
282 	if (mpt->is_sas) {
283 		handler.reply_handler = mpt_sata_pass_reply_handler;
284 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
285 		    &sata_pass_handler_id);
286 		if (error != 0) {
287 			MPT_UNLOCK(mpt);
288 			goto cleanup;
289 		}
290 	}
291 
292 	/*
293 	 * We keep one request reserved for timeout TMF requests.
294 	 */
295 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
296 	if (mpt->tmf_req == NULL) {
297 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
298 		error = ENOMEM;
299 		MPT_UNLOCK(mpt);
300 		goto cleanup;
301 	}
302 
303 	/*
304 	 * Mark the request as free even though not on the free list.
305 	 * There is only one TMF request allowed to be outstanding at
306 	 * a time and the TMF routines perform their own allocation
307 	 * tracking using the standard state flags.
308 	 */
309 	mpt->tmf_req->state = REQ_STATE_FREE;
310 	maxq--;
311 
312 	/*
313 	 * The rest of this is CAM foo, for which we need to drop our lock
314 	 */
315 	MPT_UNLOCK(mpt);
316 
317 	if (mpt_spawn_recovery_thread(mpt) != 0) {
318 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
319 		error = ENOMEM;
320 		goto cleanup;
321 	}
322 
323 	/*
324 	 * Create the device queue for our SIM(s).
325 	 */
326 	devq = cam_simq_alloc(maxq);
327 	if (devq == NULL) {
328 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
329 		error = ENOMEM;
330 		goto cleanup;
331 	}
332 
333 	/*
334 	 * Construct our SIM entry.
335 	 */
336 	mpt->sim =
337 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
338 	if (mpt->sim == NULL) {
339 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
340 		cam_simq_free(devq);
341 		error = ENOMEM;
342 		goto cleanup;
343 	}
344 
345 	/*
346 	 * Register exactly this bus.
347 	 */
348 	MPT_LOCK(mpt);
349 	if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
350 		mpt_prt(mpt, "Bus registration Failed!\n");
351 		error = ENOMEM;
352 		MPT_UNLOCK(mpt);
353 		goto cleanup;
354 	}
355 
356 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
357 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
358 		mpt_prt(mpt, "Unable to allocate Path!\n");
359 		error = ENOMEM;
360 		MPT_UNLOCK(mpt);
361 		goto cleanup;
362 	}
363 	MPT_UNLOCK(mpt);
364 
365 	/*
366 	 * Only register a second bus for RAID physical
367 	 * devices if the controller supports RAID.
368 	 */
369 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
370 		return (0);
371 	}
372 
373 	/*
374 	 * Create a "bus" to export all hidden disks to CAM.
375 	 */
376 	mpt->phydisk_sim =
377 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
378 	if (mpt->phydisk_sim == NULL) {
379 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
380 		error = ENOMEM;
381 		goto cleanup;
382 	}
383 
384 	/*
385 	 * Register this bus.
386 	 */
387 	MPT_LOCK(mpt);
388 	if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != CAM_SUCCESS) {
389 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
390 		error = ENOMEM;
391 		MPT_UNLOCK(mpt);
392 		goto cleanup;
393 	}
394 
395 	if (xpt_create_path(&mpt->phydisk_path, NULL,
396 	    cam_sim_path(mpt->phydisk_sim),
397 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
398 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
399 		error = ENOMEM;
400 		MPT_UNLOCK(mpt);
401 		goto cleanup;
402 	}
403 	MPT_UNLOCK(mpt);
404 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
405 	return (0);
406 
407 cleanup:
408 	mpt_cam_detach(mpt);
409 	return (error);
410 }
411 
412 /*
413  * Read FC configuration information
414  */
415 static int
416 mpt_read_config_info_fc(struct mpt_softc *mpt)
417 {
418 	char *topology = NULL;
419 	int rv;
420 
421 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
422 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
423 	if (rv) {
424 		return (-1);
425 	}
426 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
427 		 mpt->mpt_fcport_page0.Header.PageVersion,
428 		 mpt->mpt_fcport_page0.Header.PageLength,
429 		 mpt->mpt_fcport_page0.Header.PageNumber,
430 		 mpt->mpt_fcport_page0.Header.PageType);
431 
432 
433 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
434 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
435 	if (rv) {
436 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
437 		return (-1);
438 	}
439 
440 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
441 
442 	switch (mpt->mpt_fcport_page0.Flags &
443 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
444 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
445 		mpt->mpt_fcport_speed = 0;
446 		topology = "<NO LOOP>";
447 		break;
448 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
449 		topology = "N-Port";
450 		break;
451 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
452 		topology = "NL-Port";
453 		break;
454 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
455 		topology = "F-Port";
456 		break;
457 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
458 		topology = "FL-Port";
459 		break;
460 	default:
461 		mpt->mpt_fcport_speed = 0;
462 		topology = "?";
463 		break;
464 	}
465 
466 	mpt_lprt(mpt, MPT_PRT_INFO,
467 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
468 	    "Speed %u-Gbit\n", topology,
469 	    mpt->mpt_fcport_page0.WWNN.High,
470 	    mpt->mpt_fcport_page0.WWNN.Low,
471 	    mpt->mpt_fcport_page0.WWPN.High,
472 	    mpt->mpt_fcport_page0.WWPN.Low,
473 	    mpt->mpt_fcport_speed);
474 #if __FreeBSD_version >= 500000
475 	MPT_UNLOCK(mpt);
476 	{
477 		struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
478 		struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
479 
480 		snprintf(mpt->scinfo.fc.wwnn,
481 		    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
482 		    mpt->mpt_fcport_page0.WWNN.High,
483 		    mpt->mpt_fcport_page0.WWNN.Low);
484 
485 		snprintf(mpt->scinfo.fc.wwpn,
486 		    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
487 		    mpt->mpt_fcport_page0.WWPN.High,
488 		    mpt->mpt_fcport_page0.WWPN.Low);
489 
490 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
491 		       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
492 		       "World Wide Node Name");
493 
494 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
495 		       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
496 		       "World Wide Port Name");
497 
498 	}
499 	MPT_LOCK(mpt);
500 #endif
501 	return (0);
502 }
503 
504 /*
505  * Set FC configuration information.
506  */
507 static int
508 mpt_set_initial_config_fc(struct mpt_softc *mpt)
509 {
510 
511 	CONFIG_PAGE_FC_PORT_1 fc;
512 	U32 fl;
513 	int r, doit = 0;
514 	int role;
515 
516 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
517 	    &fc.Header, FALSE, 5000);
518 	if (r) {
519 		mpt_prt(mpt, "failed to read FC page 1 header\n");
520 		return (mpt_fc_reset_link(mpt, 1));
521 	}
522 
523 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
524 	    &fc.Header, sizeof (fc), FALSE, 5000);
525 	if (r) {
526 		mpt_prt(mpt, "failed to read FC page 1\n");
527 		return (mpt_fc_reset_link(mpt, 1));
528 	}
529 
530 	/*
531 	 * Check our flags to make sure we support the role we want.
532 	 */
533 	doit = 0;
534 	role = 0;
535 	fl = le32toh(fc.Flags);;
536 
537 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
538 		role |= MPT_ROLE_INITIATOR;
539 	}
540 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
541 		role |= MPT_ROLE_TARGET;
542 	}
543 
544 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
545 
546 	if (mpt->do_cfg_role == 0) {
547 		role = mpt->cfg_role;
548 	} else {
549 		mpt->do_cfg_role = 0;
550 	}
551 
552 	if (role != mpt->cfg_role) {
553 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
554 			if ((role & MPT_ROLE_INITIATOR) == 0) {
555 				mpt_prt(mpt, "adding initiator role\n");
556 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
557 				doit++;
558 			} else {
559 				mpt_prt(mpt, "keeping initiator role\n");
560 			}
561 		} else if (role & MPT_ROLE_INITIATOR) {
562 			mpt_prt(mpt, "removing initiator role\n");
563 			doit++;
564 		}
565 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
566 			if ((role & MPT_ROLE_TARGET) == 0) {
567 				mpt_prt(mpt, "adding target role\n");
568 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
569 				doit++;
570 			} else {
571 				mpt_prt(mpt, "keeping target role\n");
572 			}
573 		} else if (role & MPT_ROLE_TARGET) {
574 			mpt_prt(mpt, "removing target role\n");
575 			doit++;
576 		}
577 		mpt->role = mpt->cfg_role;
578 	}
579 
580 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
581 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
582 			mpt_prt(mpt, "adding OXID option\n");
583 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
584 			doit++;
585 		}
586 	}
587 
588 	if (doit) {
589 		fc.Flags = htole32(fl);
590 		r = mpt_write_cfg_page(mpt,
591 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
592 		    sizeof(fc), FALSE, 5000);
593 		if (r != 0) {
594 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
595 			return (0);
596 		}
597 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
598 		    "effect until next reboot or IOC reset\n");
599 	}
600 	return (0);
601 }
602 
603 static int
604 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
605 {
606 	ConfigExtendedPageHeader_t hdr;
607 	struct mptsas_phyinfo *phyinfo;
608 	SasIOUnitPage0_t *buffer;
609 	int error, len, i;
610 
611 	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
612 				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
613 				       &hdr, 0, 10000);
614 	if (error)
615 		goto out;
616 	if (hdr.ExtPageLength == 0) {
617 		error = ENXIO;
618 		goto out;
619 	}
620 
621 	len = hdr.ExtPageLength * 4;
622 	buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
623 	if (buffer == NULL) {
624 		error = ENOMEM;
625 		goto out;
626 	}
627 
628 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
629 				     0, &hdr, buffer, len, 0, 10000);
630 	if (error) {
631 		free(buffer, M_DEVBUF);
632 		goto out;
633 	}
634 
635 	portinfo->num_phys = buffer->NumPhys;
636 	portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
637 	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
638 	if (portinfo->phy_info == NULL) {
639 		free(buffer, M_DEVBUF);
640 		error = ENOMEM;
641 		goto out;
642 	}
643 
644 	for (i = 0; i < portinfo->num_phys; i++) {
645 		phyinfo = &portinfo->phy_info[i];
646 		phyinfo->phy_num = i;
647 		phyinfo->port_id = buffer->PhyData[i].Port;
648 		phyinfo->negotiated_link_rate =
649 		    buffer->PhyData[i].NegotiatedLinkRate;
650 		phyinfo->handle =
651 		    le16toh(buffer->PhyData[i].ControllerDevHandle);
652 	}
653 
654 	free(buffer, M_DEVBUF);
655 out:
656 	return (error);
657 }
658 
659 static int
660 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
661 	uint32_t form, uint32_t form_specific)
662 {
663 	ConfigExtendedPageHeader_t hdr;
664 	SasPhyPage0_t *buffer;
665 	int error;
666 
667 	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
668 				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
669 				       0, 10000);
670 	if (error)
671 		goto out;
672 	if (hdr.ExtPageLength == 0) {
673 		error = ENXIO;
674 		goto out;
675 	}
676 
677 	buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
678 	if (buffer == NULL) {
679 		error = ENOMEM;
680 		goto out;
681 	}
682 
683 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
684 				     form + form_specific, &hdr, buffer,
685 				     sizeof(SasPhyPage0_t), 0, 10000);
686 	if (error) {
687 		free(buffer, M_DEVBUF);
688 		goto out;
689 	}
690 
691 	phy_info->hw_link_rate = buffer->HwLinkRate;
692 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
693 	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
694 	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
695 
696 	free(buffer, M_DEVBUF);
697 out:
698 	return (error);
699 }
700 
701 static int
702 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
703 	uint32_t form, uint32_t form_specific)
704 {
705 	ConfigExtendedPageHeader_t hdr;
706 	SasDevicePage0_t *buffer;
707 	uint64_t sas_address;
708 	int error = 0;
709 
710 	bzero(device_info, sizeof(*device_info));
711 	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
712 				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
713 				       &hdr, 0, 10000);
714 	if (error)
715 		goto out;
716 	if (hdr.ExtPageLength == 0) {
717 		error = ENXIO;
718 		goto out;
719 	}
720 
721 	buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
722 	if (buffer == NULL) {
723 		error = ENOMEM;
724 		goto out;
725 	}
726 
727 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
728 				     form + form_specific, &hdr, buffer,
729 				     sizeof(SasDevicePage0_t), 0, 10000);
730 	if (error) {
731 		free(buffer, M_DEVBUF);
732 		goto out;
733 	}
734 
735 	device_info->dev_handle = le16toh(buffer->DevHandle);
736 	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
737 	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
738 	device_info->slot = le16toh(buffer->Slot);
739 	device_info->phy_num = buffer->PhyNum;
740 	device_info->physical_port = buffer->PhysicalPort;
741 	device_info->target_id = buffer->TargetID;
742 	device_info->bus = buffer->Bus;
743 	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
744 	device_info->sas_address = le64toh(sas_address);
745 	device_info->device_info = le32toh(buffer->DeviceInfo);
746 
747 	free(buffer, M_DEVBUF);
748 out:
749 	return (error);
750 }
751 
752 /*
753  * Read SAS configuration information. Nothing to do yet.
754  */
755 static int
756 mpt_read_config_info_sas(struct mpt_softc *mpt)
757 {
758 	struct mptsas_portinfo *portinfo;
759 	struct mptsas_phyinfo *phyinfo;
760 	int error, i;
761 
762 	portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
763 	if (portinfo == NULL)
764 		return (ENOMEM);
765 
766 	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
767 	if (error) {
768 		free(portinfo, M_DEVBUF);
769 		return (0);
770 	}
771 
772 	for (i = 0; i < portinfo->num_phys; i++) {
773 		phyinfo = &portinfo->phy_info[i];
774 		error = mptsas_sas_phy_pg0(mpt, phyinfo,
775 		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
776 		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
777 		if (error)
778 			break;
779 		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
780 		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
781 		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
782 		    phyinfo->handle);
783 		if (error)
784 			break;
785 		phyinfo->identify.phy_num = phyinfo->phy_num = i;
786 		if (phyinfo->attached.dev_handle)
787 			error = mptsas_sas_device_pg0(mpt,
788 			    &phyinfo->attached,
789 			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
790 			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
791 			    phyinfo->attached.dev_handle);
792 		if (error)
793 			break;
794 	}
795 	mpt->sas_portinfo = portinfo;
796 	return (0);
797 }
798 
799 static void
800 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
801 	int enabled)
802 {
803 	SataPassthroughRequest_t	*pass;
804 	request_t *req;
805 	int error, status;
806 
807 	req = mpt_get_request(mpt, 0);
808 	if (req == NULL)
809 		return;
810 
811 	pass = req->req_vbuf;
812 	bzero(pass, sizeof(SataPassthroughRequest_t));
813 	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
814 	pass->TargetID = devinfo->target_id;
815 	pass->Bus = devinfo->bus;
816 	pass->PassthroughFlags = 0;
817 	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
818 	pass->DataLength = 0;
819 	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
820 	pass->CommandFIS[0] = 0x27;
821 	pass->CommandFIS[1] = 0x80;
822 	pass->CommandFIS[2] = 0xef;
823 	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
824 	pass->CommandFIS[7] = 0x40;
825 	pass->CommandFIS[15] = 0x08;
826 
827 	mpt_check_doorbell(mpt);
828 	mpt_send_cmd(mpt, req);
829 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
830 			     10 * 1000);
831 	if (error) {
832 		mpt_free_request(mpt, req);
833 		printf("error %d sending passthrough\n", error);
834 		return;
835 	}
836 
837 	status = le16toh(req->IOCStatus);
838 	if (status != MPI_IOCSTATUS_SUCCESS) {
839 		mpt_free_request(mpt, req);
840 		printf("IOCSTATUS %d\n", status);
841 		return;
842 	}
843 
844 	mpt_free_request(mpt, req);
845 }
846 
847 /*
848  * Set SAS configuration information. Nothing to do yet.
849  */
850 static int
851 mpt_set_initial_config_sas(struct mpt_softc *mpt)
852 {
853 	struct mptsas_phyinfo *phyinfo;
854 	int i;
855 
856 	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
857 		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
858 			phyinfo = &mpt->sas_portinfo->phy_info[i];
859 			if (phyinfo->attached.dev_handle == 0)
860 				continue;
861 			if ((phyinfo->attached.device_info &
862 			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
863 				continue;
864 			if (bootverbose)
865 				device_printf(mpt->dev,
866 				    "%sabling SATA WC on phy %d\n",
867 				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
868 			mptsas_set_sata_wc(mpt, &phyinfo->attached,
869 					   mpt_enable_sata_wc);
870 		}
871 	}
872 
873 	return (0);
874 }
875 
876 static int
877 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
878  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
879 {
880 	if (req != NULL) {
881 
882 		if (reply_frame != NULL) {
883 			MSG_SATA_PASSTHROUGH_REQUEST *pass;
884 			MSG_SATA_PASSTHROUGH_REPLY *reply;
885 
886 			pass = (MSG_SATA_PASSTHROUGH_REQUEST *)req->req_vbuf;
887 			reply = (MSG_SATA_PASSTHROUGH_REPLY *)reply_frame;
888 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
889 		}
890 		req->state &= ~REQ_STATE_QUEUED;
891 		req->state |= REQ_STATE_DONE;
892 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
893 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
894 			wakeup(req);
895 		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
896 			/*
897 			 * Whew- we can free this request (late completion)
898 			 */
899 			mpt_free_request(mpt, req);
900 		}
901 	}
902 
903 	return (TRUE);
904 }
905 
906 /*
907  * Read SCSI configuration information
908  */
909 static int
910 mpt_read_config_info_spi(struct mpt_softc *mpt)
911 {
912 	int rv, i;
913 
914 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
915 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
916 	if (rv) {
917 		return (-1);
918 	}
919 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
920 	    mpt->mpt_port_page0.Header.PageVersion,
921 	    mpt->mpt_port_page0.Header.PageLength,
922 	    mpt->mpt_port_page0.Header.PageNumber,
923 	    mpt->mpt_port_page0.Header.PageType);
924 
925 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
926 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
927 	if (rv) {
928 		return (-1);
929 	}
930 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
931 	    mpt->mpt_port_page1.Header.PageVersion,
932 	    mpt->mpt_port_page1.Header.PageLength,
933 	    mpt->mpt_port_page1.Header.PageNumber,
934 	    mpt->mpt_port_page1.Header.PageType);
935 
936 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
937 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
938 	if (rv) {
939 		return (-1);
940 	}
941 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
942 	    mpt->mpt_port_page2.Header.PageVersion,
943 	    mpt->mpt_port_page2.Header.PageLength,
944 	    mpt->mpt_port_page2.Header.PageNumber,
945 	    mpt->mpt_port_page2.Header.PageType);
946 
947 	for (i = 0; i < 16; i++) {
948 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
949 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
950 		if (rv) {
951 			return (-1);
952 		}
953 		mpt_lprt(mpt, MPT_PRT_DEBUG,
954 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
955 		    mpt->mpt_dev_page0[i].Header.PageVersion,
956 		    mpt->mpt_dev_page0[i].Header.PageLength,
957 		    mpt->mpt_dev_page0[i].Header.PageNumber,
958 		    mpt->mpt_dev_page0[i].Header.PageType);
959 
960 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
961 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
962 		if (rv) {
963 			return (-1);
964 		}
965 		mpt_lprt(mpt, MPT_PRT_DEBUG,
966 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
967 		    mpt->mpt_dev_page1[i].Header.PageVersion,
968 		    mpt->mpt_dev_page1[i].Header.PageLength,
969 		    mpt->mpt_dev_page1[i].Header.PageNumber,
970 		    mpt->mpt_dev_page1[i].Header.PageType);
971 	}
972 
973 	/*
974 	 * At this point, we don't *have* to fail. As long as we have
975 	 * valid config header information, we can (barely) lurch
976 	 * along.
977 	 */
978 
979 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
980 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
981 	if (rv) {
982 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
983 	} else {
984 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
985 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
986 		    mpt->mpt_port_page0.Capabilities,
987 		    mpt->mpt_port_page0.PhysicalInterface);
988 	}
989 
990 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
991 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
992 	if (rv) {
993 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
994 	} else {
995 		mpt_lprt(mpt, MPT_PRT_DEBUG,
996 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
997 		    mpt->mpt_port_page1.Configuration,
998 		    mpt->mpt_port_page1.OnBusTimerValue);
999 	}
1000 
1001 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1002 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
1003 	if (rv) {
1004 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1005 	} else {
1006 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1007 		    "Port Page 2: Flags %x Settings %x\n",
1008 		    mpt->mpt_port_page2.PortFlags,
1009 		    mpt->mpt_port_page2.PortSettings);
1010 		for (i = 0; i < 16; i++) {
1011 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1012 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1013 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1014 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1015 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1016 		}
1017 	}
1018 
1019 	for (i = 0; i < 16; i++) {
1020 		rv = mpt_read_cur_cfg_page(mpt, i,
1021 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1022 		    FALSE, 5000);
1023 		if (rv) {
1024 			mpt_prt(mpt,
1025 			    "cannot read SPI Target %d Device Page 0\n", i);
1026 			continue;
1027 		}
1028 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1029 		    "target %d page 0: Negotiated Params %x Information %x\n",
1030 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1031 		    mpt->mpt_dev_page0[i].Information);
1032 
1033 		rv = mpt_read_cur_cfg_page(mpt, i,
1034 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1035 		    FALSE, 5000);
1036 		if (rv) {
1037 			mpt_prt(mpt,
1038 			    "cannot read SPI Target %d Device Page 1\n", i);
1039 			continue;
1040 		}
1041 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1042 		    "target %d page 1: Requested Params %x Configuration %x\n",
1043 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
1044 		    mpt->mpt_dev_page1[i].Configuration);
1045 	}
1046 	return (0);
1047 }
1048 
1049 /*
1050  * Validate SPI configuration information.
1051  *
1052  * In particular, validate SPI Port Page 1.
1053  */
1054 static int
1055 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1056 {
1057 	int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
1058 	int error;
1059 
1060 	mpt->mpt_disc_enable = 0xff;
1061 	mpt->mpt_tag_enable = 0;
1062 
1063 	if (mpt->mpt_port_page1.Configuration != pp1val) {
1064 		CONFIG_PAGE_SCSI_PORT_1 tmp;
1065 
1066 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1067 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1068 		tmp = mpt->mpt_port_page1;
1069 		tmp.Configuration = pp1val;
1070 		error = mpt_write_cur_cfg_page(mpt, 0,
1071 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1072 		if (error) {
1073 			return (-1);
1074 		}
1075 		error = mpt_read_cur_cfg_page(mpt, 0,
1076 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1077 		if (error) {
1078 			return (-1);
1079 		}
1080 		if (tmp.Configuration != pp1val) {
1081 			mpt_prt(mpt,
1082 			    "failed to reset SPI Port Page 1 Config value\n");
1083 			return (-1);
1084 		}
1085 		mpt->mpt_port_page1 = tmp;
1086 	}
1087 
1088 	/*
1089 	 * The purpose of this exercise is to get
1090 	 * all targets back to async/narrow.
1091 	 *
1092 	 * We skip this step if the BIOS has already negotiated
1093 	 * speeds with the targets and does not require us to
1094 	 * do Domain Validation.
1095 	 */
1096 	i = mpt->mpt_port_page2.PortSettings &
1097 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1098 	j = mpt->mpt_port_page2.PortFlags &
1099 	    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
1100 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* &&
1101 	    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) {
1102 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1103 		    "honoring BIOS transfer negotiations\n");
1104 	} else {
1105 		for (i = 0; i < 16; i++) {
1106 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
1107 			mpt->mpt_dev_page1[i].Configuration = 0;
1108 			(void) mpt_update_spi_config(mpt, i);
1109 		}
1110 	}
1111 	return (0);
1112 }
1113 
1114 int
1115 mpt_cam_enable(struct mpt_softc *mpt)
1116 {
1117 	int error;
1118 
1119 	MPT_LOCK(mpt);
1120 
1121 	error = EIO;
1122 	if (mpt->is_fc) {
1123 		if (mpt_read_config_info_fc(mpt)) {
1124 			goto out;
1125 		}
1126 		if (mpt_set_initial_config_fc(mpt)) {
1127 			goto out;
1128 		}
1129 	} else if (mpt->is_sas) {
1130 		if (mpt_read_config_info_sas(mpt)) {
1131 			goto out;
1132 		}
1133 		if (mpt_set_initial_config_sas(mpt)) {
1134 			goto out;
1135 		}
1136 	} else if (mpt->is_spi) {
1137 		if (mpt_read_config_info_spi(mpt)) {
1138 			goto out;
1139 		}
1140 		if (mpt_set_initial_config_spi(mpt)) {
1141 			goto out;
1142 		}
1143 	}
1144 	error = 0;
1145 
1146 out:
1147 	MPT_UNLOCK(mpt);
1148 	return (error);
1149 }
1150 
1151 void
1152 mpt_cam_ready(struct mpt_softc *mpt)
1153 {
1154 	/*
1155 	 * If we're in target mode, hang out resources now
1156 	 * so we don't cause the world to hang talking to us.
1157 	 */
1158 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1159 		/*
1160 		 * Try to add some target command resources
1161 		 */
1162 		MPT_LOCK(mpt);
1163 		if (mpt_add_target_commands(mpt) == FALSE) {
1164 			mpt_prt(mpt, "failed to add target commands\n");
1165 		}
1166 		MPT_UNLOCK(mpt);
1167 	}
1168 	mpt->ready = 1;
1169 }
1170 
1171 void
1172 mpt_cam_detach(struct mpt_softc *mpt)
1173 {
1174 	mpt_handler_t handler;
1175 
1176 	MPT_LOCK(mpt);
1177 	mpt->ready = 0;
1178 	mpt_terminate_recovery_thread(mpt);
1179 
1180 	handler.reply_handler = mpt_scsi_reply_handler;
1181 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1182 			       scsi_io_handler_id);
1183 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
1184 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1185 			       scsi_tmf_handler_id);
1186 	handler.reply_handler = mpt_fc_els_reply_handler;
1187 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1188 			       fc_els_handler_id);
1189 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
1190 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1191 			       mpt->scsi_tgt_handler_id);
1192 	handler.reply_handler = mpt_sata_pass_reply_handler;
1193 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1194 			       sata_pass_handler_id);
1195 
1196 	if (mpt->tmf_req != NULL) {
1197 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1198 		mpt_free_request(mpt, mpt->tmf_req);
1199 		mpt->tmf_req = NULL;
1200 	}
1201 	if (mpt->sas_portinfo != NULL) {
1202 		free(mpt->sas_portinfo, M_DEVBUF);
1203 		mpt->sas_portinfo = NULL;
1204 	}
1205 	MPT_UNLOCK(mpt);
1206 
1207 	if (mpt->sim != NULL) {
1208 		xpt_free_path(mpt->path);
1209 		xpt_bus_deregister(cam_sim_path(mpt->sim));
1210 		cam_sim_free(mpt->sim, TRUE);
1211 		mpt->sim = NULL;
1212 	}
1213 
1214 	if (mpt->phydisk_sim != NULL) {
1215 		xpt_free_path(mpt->phydisk_path);
1216 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1217 		cam_sim_free(mpt->phydisk_sim, TRUE);
1218 		mpt->phydisk_sim = NULL;
1219 	}
1220 }
1221 
1222 /* This routine is used after a system crash to dump core onto the swap device.
1223  */
1224 static void
1225 mpt_poll(struct cam_sim *sim)
1226 {
1227 	struct mpt_softc *mpt;
1228 
1229 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1230 	mpt_intr(mpt);
1231 }
1232 
1233 /*
1234  * Watchdog timeout routine for SCSI requests.
1235  */
1236 static void
1237 mpt_timeout(void *arg)
1238 {
1239 	union ccb	 *ccb;
1240 	struct mpt_softc *mpt;
1241 	request_t	 *req;
1242 
1243 	ccb = (union ccb *)arg;
1244 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1245 
1246 	MPT_LOCK(mpt);
1247 	req = ccb->ccb_h.ccb_req_ptr;
1248 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1249 	    req->serno, ccb, req->ccb);
1250 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1251 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1252 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1253 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1254 		req->state |= REQ_STATE_TIMEDOUT;
1255 		mpt_wakeup_recovery_thread(mpt);
1256 	}
1257 	MPT_UNLOCK(mpt);
1258 }
1259 
1260 /*
1261  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1262  *
1263  * Takes a list of physical segments and builds the SGL for SCSI IO command
1264  * and forwards the commard to the IOC after one last check that CAM has not
1265  * aborted the transaction.
1266  */
1267 static void
1268 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1269 {
1270 	request_t *req, *trq;
1271 	char *mpt_off;
1272 	union ccb *ccb;
1273 	struct mpt_softc *mpt;
1274 	int seg, first_lim;
1275 	uint32_t flags, nxt_off;
1276 	void *sglp = NULL;
1277 	MSG_REQUEST_HEADER *hdrp;
1278 	SGE_SIMPLE64 *se;
1279 	SGE_CHAIN64 *ce;
1280 	int istgt = 0;
1281 
1282 	req = (request_t *)arg;
1283 	ccb = req->ccb;
1284 
1285 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1286 	req = ccb->ccb_h.ccb_req_ptr;
1287 
1288 	hdrp = req->req_vbuf;
1289 	mpt_off = req->req_vbuf;
1290 
1291 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1292 		error = EFBIG;
1293 	}
1294 
1295 	if (error == 0) {
1296 		switch (hdrp->Function) {
1297 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1298 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1299 			istgt = 0;
1300 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1301 			break;
1302 		case MPI_FUNCTION_TARGET_ASSIST:
1303 			istgt = 1;
1304 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1305 			break;
1306 		default:
1307 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1308 			    hdrp->Function);
1309 			error = EINVAL;
1310 			break;
1311 		}
1312 	}
1313 
1314 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1315 		error = EFBIG;
1316 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1317 		    nseg, mpt->max_seg_cnt);
1318 	}
1319 
1320 bad:
1321 	if (error != 0) {
1322 		if (error != EFBIG && error != ENOMEM) {
1323 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1324 		}
1325 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1326 			cam_status status;
1327 			mpt_freeze_ccb(ccb);
1328 			if (error == EFBIG) {
1329 				status = CAM_REQ_TOO_BIG;
1330 			} else if (error == ENOMEM) {
1331 				if (mpt->outofbeer == 0) {
1332 					mpt->outofbeer = 1;
1333 					xpt_freeze_simq(mpt->sim, 1);
1334 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1335 					    "FREEZEQ\n");
1336 				}
1337 				status = CAM_REQUEUE_REQ;
1338 			} else {
1339 				status = CAM_REQ_CMP_ERR;
1340 			}
1341 			mpt_set_ccb_status(ccb, status);
1342 		}
1343 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1344 			request_t *cmd_req =
1345 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1346 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1347 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1348 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1349 		}
1350 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1351 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1352 		xpt_done(ccb);
1353 		CAMLOCK_2_MPTLOCK(mpt);
1354 		mpt_free_request(mpt, req);
1355 		MPTLOCK_2_CAMLOCK(mpt);
1356 		return;
1357 	}
1358 
1359 	/*
1360 	 * No data to transfer?
1361 	 * Just make a single simple SGL with zero length.
1362 	 */
1363 
1364 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1365 		int tidx = ((char *)sglp) - mpt_off;
1366 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1367 	}
1368 
1369 	if (nseg == 0) {
1370 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1371 		MPI_pSGE_SET_FLAGS(se1,
1372 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1373 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1374 		se1->FlagsLength = htole32(se1->FlagsLength);
1375 		goto out;
1376 	}
1377 
1378 
1379 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1380 	if (istgt == 0) {
1381 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1382 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1383 		}
1384 	} else {
1385 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1386 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1387 		}
1388 	}
1389 
1390 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1391 		bus_dmasync_op_t op;
1392 		if (istgt == 0) {
1393 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1394 				op = BUS_DMASYNC_PREREAD;
1395 			} else {
1396 				op = BUS_DMASYNC_PREWRITE;
1397 			}
1398 		} else {
1399 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1400 				op = BUS_DMASYNC_PREWRITE;
1401 			} else {
1402 				op = BUS_DMASYNC_PREREAD;
1403 			}
1404 		}
1405 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1406 	}
1407 
1408 	/*
1409 	 * Okay, fill in what we can at the end of the command frame.
1410 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1411 	 * the command frame.
1412 	 *
1413 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1414 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1415 	 * that.
1416 	 */
1417 
1418 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1419 		first_lim = nseg;
1420 	} else {
1421 		/*
1422 		 * Leave room for CHAIN element
1423 		 */
1424 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1425 	}
1426 
1427 	se = (SGE_SIMPLE64 *) sglp;
1428 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1429 		uint32_t tf;
1430 
1431 		memset(se, 0, sizeof (*se));
1432 		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1433 		if (sizeof(bus_addr_t) > 4) {
1434 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
1435 		}
1436 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1437 		tf = flags;
1438 		if (seg == first_lim - 1) {
1439 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1440 		}
1441 		if (seg == nseg - 1) {
1442 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1443 				MPI_SGE_FLAGS_END_OF_BUFFER;
1444 		}
1445 		MPI_pSGE_SET_FLAGS(se, tf);
1446 		se->FlagsLength = htole32(se->FlagsLength);
1447 	}
1448 
1449 	if (seg == nseg) {
1450 		goto out;
1451 	}
1452 
1453 	/*
1454 	 * Tell the IOC where to find the first chain element.
1455 	 */
1456 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1457 	nxt_off = MPT_RQSL(mpt);
1458 	trq = req;
1459 
1460 	/*
1461 	 * Make up the rest of the data segments out of a chain element
1462 	 * (contiained in the current request frame) which points to
1463 	 * SIMPLE64 elements in the next request frame, possibly ending
1464 	 * with *another* chain element (if there's more).
1465 	 */
1466 	while (seg < nseg) {
1467 		int this_seg_lim;
1468 		uint32_t tf, cur_off;
1469 		bus_addr_t chain_list_addr;
1470 
1471 		/*
1472 		 * Point to the chain descriptor. Note that the chain
1473 		 * descriptor is at the end of the *previous* list (whether
1474 		 * chain or simple).
1475 		 */
1476 		ce = (SGE_CHAIN64 *) se;
1477 
1478 		/*
1479 		 * Before we change our current pointer, make  sure we won't
1480 		 * overflow the request area with this frame. Note that we
1481 		 * test against 'greater than' here as it's okay in this case
1482 		 * to have next offset be just outside the request area.
1483 		 */
1484 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1485 			nxt_off = MPT_REQUEST_AREA;
1486 			goto next_chain;
1487 		}
1488 
1489 		/*
1490 		 * Set our SGE element pointer to the beginning of the chain
1491 		 * list and update our next chain list offset.
1492 		 */
1493 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1494 		cur_off = nxt_off;
1495 		nxt_off += MPT_RQSL(mpt);
1496 
1497 		/*
1498 		 * Now initialized the chain descriptor.
1499 		 */
1500 		memset(ce, 0, sizeof (*ce));
1501 
1502 		/*
1503 		 * Get the physical address of the chain list.
1504 		 */
1505 		chain_list_addr = trq->req_pbuf;
1506 		chain_list_addr += cur_off;
1507 		if (sizeof (bus_addr_t) > 4) {
1508 			ce->Address.High =
1509 			    htole32((uint32_t) ((uint64_t)chain_list_addr >> 32));
1510 		}
1511 		ce->Address.Low = htole32((uint32_t) chain_list_addr);
1512 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1513 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1514 
1515 		/*
1516 		 * If we have more than a frame's worth of segments left,
1517 		 * set up the chain list to have the last element be another
1518 		 * chain descriptor.
1519 		 */
1520 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1521 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1522 			/*
1523 			 * The length of the chain is the length in bytes of the
1524 			 * number of segments plus the next chain element.
1525 			 *
1526 			 * The next chain descriptor offset is the length,
1527 			 * in words, of the number of segments.
1528 			 */
1529 			ce->Length = (this_seg_lim - seg) *
1530 			    sizeof (SGE_SIMPLE64);
1531 			ce->NextChainOffset = ce->Length >> 2;
1532 			ce->Length += sizeof (SGE_CHAIN64);
1533 		} else {
1534 			this_seg_lim = nseg;
1535 			ce->Length = (this_seg_lim - seg) *
1536 			    sizeof (SGE_SIMPLE64);
1537 		}
1538 
1539 		/*
1540 		 * Fill in the chain list SGE elements with our segment data.
1541 		 *
1542 		 * If we're the last element in this chain list, set the last
1543 		 * element flag. If we're the completely last element period,
1544 		 * set the end of list and end of buffer flags.
1545 		 */
1546 		while (seg < this_seg_lim) {
1547 			memset(se, 0, sizeof (*se));
1548 			se->Address.Low = htole32(dm_segs->ds_addr);
1549 			if (sizeof (bus_addr_t) > 4) {
1550 				se->Address.High =
1551 				    htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1552 			}
1553 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1554 			tf = flags;
1555 			if (seg ==  this_seg_lim - 1) {
1556 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1557 			}
1558 			if (seg == nseg - 1) {
1559 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1560 					MPI_SGE_FLAGS_END_OF_BUFFER;
1561 			}
1562 			MPI_pSGE_SET_FLAGS(se, tf);
1563 			se->FlagsLength = htole32(se->FlagsLength);
1564 			se++;
1565 			seg++;
1566 			dm_segs++;
1567 		}
1568 
1569     next_chain:
1570 		/*
1571 		 * If we have more segments to do and we've used up all of
1572 		 * the space in a request area, go allocate another one
1573 		 * and chain to that.
1574 		 */
1575 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1576 			request_t *nrq;
1577 
1578 			CAMLOCK_2_MPTLOCK(mpt);
1579 			nrq = mpt_get_request(mpt, FALSE);
1580 			MPTLOCK_2_CAMLOCK(mpt);
1581 
1582 			if (nrq == NULL) {
1583 				error = ENOMEM;
1584 				goto bad;
1585 			}
1586 
1587 			/*
1588 			 * Append the new request area on the tail of our list.
1589 			 */
1590 			if ((trq = req->chain) == NULL) {
1591 				req->chain = nrq;
1592 			} else {
1593 				while (trq->chain != NULL) {
1594 					trq = trq->chain;
1595 				}
1596 				trq->chain = nrq;
1597 			}
1598 			trq = nrq;
1599 			mpt_off = trq->req_vbuf;
1600 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1601 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1602 			}
1603 			nxt_off = 0;
1604 		}
1605 	}
1606 out:
1607 
1608 	/*
1609 	 * Last time we need to check if this CCB needs to be aborted.
1610 	 */
1611 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1612 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1613 			request_t *cmd_req =
1614 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1615 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1616 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1617 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1618 		}
1619 		mpt_prt(mpt,
1620 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1621 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1622 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1623 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1624 		}
1625 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1626 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1627 		xpt_done(ccb);
1628 		CAMLOCK_2_MPTLOCK(mpt);
1629 		mpt_free_request(mpt, req);
1630 		MPTLOCK_2_CAMLOCK(mpt);
1631 		return;
1632 	}
1633 
1634 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1635 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1636 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1637 		    mpt_timeout, ccb);
1638 	}
1639 	if (mpt->verbose > MPT_PRT_DEBUG) {
1640 		int nc = 0;
1641 		mpt_print_request(req->req_vbuf);
1642 		for (trq = req->chain; trq; trq = trq->chain) {
1643 			printf("  Additional Chain Area %d\n", nc++);
1644 			mpt_dump_sgl(trq->req_vbuf, 0);
1645 		}
1646 	}
1647 
1648 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1649 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1650 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1651 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1652 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1653 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1654 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1655 		} else {
1656 			tgt->state = TGT_STATE_MOVING_DATA;
1657 		}
1658 #else
1659 		tgt->state = TGT_STATE_MOVING_DATA;
1660 #endif
1661 	}
1662 	CAMLOCK_2_MPTLOCK(mpt);
1663 	mpt_send_cmd(mpt, req);
1664 	MPTLOCK_2_CAMLOCK(mpt);
1665 }
1666 
1667 static void
1668 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1669 {
1670 	request_t *req, *trq;
1671 	char *mpt_off;
1672 	union ccb *ccb;
1673 	struct mpt_softc *mpt;
1674 	int seg, first_lim;
1675 	uint32_t flags, nxt_off;
1676 	void *sglp = NULL;
1677 	MSG_REQUEST_HEADER *hdrp;
1678 	SGE_SIMPLE32 *se;
1679 	SGE_CHAIN32 *ce;
1680 	int istgt = 0;
1681 
1682 	req = (request_t *)arg;
1683 	ccb = req->ccb;
1684 
1685 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1686 	req = ccb->ccb_h.ccb_req_ptr;
1687 
1688 	hdrp = req->req_vbuf;
1689 	mpt_off = req->req_vbuf;
1690 
1691 
1692 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1693 		error = EFBIG;
1694 	}
1695 
1696 	if (error == 0) {
1697 		switch (hdrp->Function) {
1698 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1699 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1700 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1701 			break;
1702 		case MPI_FUNCTION_TARGET_ASSIST:
1703 			istgt = 1;
1704 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1705 			break;
1706 		default:
1707 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1708 			    hdrp->Function);
1709 			error = EINVAL;
1710 			break;
1711 		}
1712 	}
1713 
1714 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1715 		error = EFBIG;
1716 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1717 		    nseg, mpt->max_seg_cnt);
1718 	}
1719 
1720 bad:
1721 	if (error != 0) {
1722 		if (error != EFBIG && error != ENOMEM) {
1723 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1724 		}
1725 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1726 			cam_status status;
1727 			mpt_freeze_ccb(ccb);
1728 			if (error == EFBIG) {
1729 				status = CAM_REQ_TOO_BIG;
1730 			} else if (error == ENOMEM) {
1731 				if (mpt->outofbeer == 0) {
1732 					mpt->outofbeer = 1;
1733 					xpt_freeze_simq(mpt->sim, 1);
1734 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1735 					    "FREEZEQ\n");
1736 				}
1737 				status = CAM_REQUEUE_REQ;
1738 			} else {
1739 				status = CAM_REQ_CMP_ERR;
1740 			}
1741 			mpt_set_ccb_status(ccb, status);
1742 		}
1743 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1744 			request_t *cmd_req =
1745 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1746 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1747 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1748 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1749 		}
1750 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1751 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1752 		xpt_done(ccb);
1753 		CAMLOCK_2_MPTLOCK(mpt);
1754 		mpt_free_request(mpt, req);
1755 		MPTLOCK_2_CAMLOCK(mpt);
1756 		return;
1757 	}
1758 
1759 	/*
1760 	 * No data to transfer?
1761 	 * Just make a single simple SGL with zero length.
1762 	 */
1763 
1764 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1765 		int tidx = ((char *)sglp) - mpt_off;
1766 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1767 	}
1768 
1769 	if (nseg == 0) {
1770 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1771 		MPI_pSGE_SET_FLAGS(se1,
1772 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1773 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1774 		se1->FlagsLength = htole32(se1->FlagsLength);
1775 		goto out;
1776 	}
1777 
1778 
1779 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1780 	if (istgt == 0) {
1781 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1782 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1783 		}
1784 	} else {
1785 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1786 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1787 		}
1788 	}
1789 
1790 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1791 		bus_dmasync_op_t op;
1792 		if (istgt) {
1793 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1794 				op = BUS_DMASYNC_PREREAD;
1795 			} else {
1796 				op = BUS_DMASYNC_PREWRITE;
1797 			}
1798 		} else {
1799 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1800 				op = BUS_DMASYNC_PREWRITE;
1801 			} else {
1802 				op = BUS_DMASYNC_PREREAD;
1803 			}
1804 		}
1805 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1806 	}
1807 
1808 	/*
1809 	 * Okay, fill in what we can at the end of the command frame.
1810 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1811 	 * the command frame.
1812 	 *
1813 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1814 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1815 	 * that.
1816 	 */
1817 
1818 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1819 		first_lim = nseg;
1820 	} else {
1821 		/*
1822 		 * Leave room for CHAIN element
1823 		 */
1824 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1825 	}
1826 
1827 	se = (SGE_SIMPLE32 *) sglp;
1828 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1829 		uint32_t tf;
1830 
1831 		memset(se, 0,sizeof (*se));
1832 		se->Address = dm_segs->ds_addr;
1833 
1834 
1835 
1836 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1837 		tf = flags;
1838 		if (seg == first_lim - 1) {
1839 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1840 		}
1841 		if (seg == nseg - 1) {
1842 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1843 				MPI_SGE_FLAGS_END_OF_BUFFER;
1844 		}
1845 		MPI_pSGE_SET_FLAGS(se, tf);
1846 		se->FlagsLength = htole32(se->FlagsLength);
1847 	}
1848 
1849 	if (seg == nseg) {
1850 		goto out;
1851 	}
1852 
1853 	/*
1854 	 * Tell the IOC where to find the first chain element.
1855 	 */
1856 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1857 	nxt_off = MPT_RQSL(mpt);
1858 	trq = req;
1859 
1860 	/*
1861 	 * Make up the rest of the data segments out of a chain element
1862 	 * (contiained in the current request frame) which points to
1863 	 * SIMPLE32 elements in the next request frame, possibly ending
1864 	 * with *another* chain element (if there's more).
1865 	 */
1866 	while (seg < nseg) {
1867 		int this_seg_lim;
1868 		uint32_t tf, cur_off;
1869 		bus_addr_t chain_list_addr;
1870 
1871 		/*
1872 		 * Point to the chain descriptor. Note that the chain
1873 		 * descriptor is at the end of the *previous* list (whether
1874 		 * chain or simple).
1875 		 */
1876 		ce = (SGE_CHAIN32 *) se;
1877 
1878 		/*
1879 		 * Before we change our current pointer, make  sure we won't
1880 		 * overflow the request area with this frame. Note that we
1881 		 * test against 'greater than' here as it's okay in this case
1882 		 * to have next offset be just outside the request area.
1883 		 */
1884 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1885 			nxt_off = MPT_REQUEST_AREA;
1886 			goto next_chain;
1887 		}
1888 
1889 		/*
1890 		 * Set our SGE element pointer to the beginning of the chain
1891 		 * list and update our next chain list offset.
1892 		 */
1893 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1894 		cur_off = nxt_off;
1895 		nxt_off += MPT_RQSL(mpt);
1896 
1897 		/*
1898 		 * Now initialized the chain descriptor.
1899 		 */
1900 		memset(ce, 0, sizeof (*ce));
1901 
1902 		/*
1903 		 * Get the physical address of the chain list.
1904 		 */
1905 		chain_list_addr = trq->req_pbuf;
1906 		chain_list_addr += cur_off;
1907 
1908 
1909 
1910 		ce->Address = chain_list_addr;
1911 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1912 
1913 
1914 		/*
1915 		 * If we have more than a frame's worth of segments left,
1916 		 * set up the chain list to have the last element be another
1917 		 * chain descriptor.
1918 		 */
1919 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1920 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1921 			/*
1922 			 * The length of the chain is the length in bytes of the
1923 			 * number of segments plus the next chain element.
1924 			 *
1925 			 * The next chain descriptor offset is the length,
1926 			 * in words, of the number of segments.
1927 			 */
1928 			ce->Length = (this_seg_lim - seg) *
1929 			    sizeof (SGE_SIMPLE32);
1930 			ce->NextChainOffset = ce->Length >> 2;
1931 			ce->Length += sizeof (SGE_CHAIN32);
1932 		} else {
1933 			this_seg_lim = nseg;
1934 			ce->Length = (this_seg_lim - seg) *
1935 			    sizeof (SGE_SIMPLE32);
1936 		}
1937 
1938 		/*
1939 		 * Fill in the chain list SGE elements with our segment data.
1940 		 *
1941 		 * If we're the last element in this chain list, set the last
1942 		 * element flag. If we're the completely last element period,
1943 		 * set the end of list and end of buffer flags.
1944 		 */
1945 		while (seg < this_seg_lim) {
1946 			memset(se, 0, sizeof (*se));
1947 			se->Address = dm_segs->ds_addr;
1948 
1949 
1950 
1951 
1952 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1953 			tf = flags;
1954 			if (seg ==  this_seg_lim - 1) {
1955 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1956 			}
1957 			if (seg == nseg - 1) {
1958 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1959 					MPI_SGE_FLAGS_END_OF_BUFFER;
1960 			}
1961 			MPI_pSGE_SET_FLAGS(se, tf);
1962 			se->FlagsLength = htole32(se->FlagsLength);
1963 			se++;
1964 			seg++;
1965 			dm_segs++;
1966 		}
1967 
1968     next_chain:
1969 		/*
1970 		 * If we have more segments to do and we've used up all of
1971 		 * the space in a request area, go allocate another one
1972 		 * and chain to that.
1973 		 */
1974 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1975 			request_t *nrq;
1976 
1977 			CAMLOCK_2_MPTLOCK(mpt);
1978 			nrq = mpt_get_request(mpt, FALSE);
1979 			MPTLOCK_2_CAMLOCK(mpt);
1980 
1981 			if (nrq == NULL) {
1982 				error = ENOMEM;
1983 				goto bad;
1984 			}
1985 
1986 			/*
1987 			 * Append the new request area on the tail of our list.
1988 			 */
1989 			if ((trq = req->chain) == NULL) {
1990 				req->chain = nrq;
1991 			} else {
1992 				while (trq->chain != NULL) {
1993 					trq = trq->chain;
1994 				}
1995 				trq->chain = nrq;
1996 			}
1997 			trq = nrq;
1998 			mpt_off = trq->req_vbuf;
1999 			if (mpt->verbose >= MPT_PRT_DEBUG) {
2000 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2001 			}
2002 			nxt_off = 0;
2003 		}
2004 	}
2005 out:
2006 
2007 	/*
2008 	 * Last time we need to check if this CCB needs to be aborted.
2009 	 */
2010 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2011 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2012 			request_t *cmd_req =
2013 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2014 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2015 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2016 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2017 		}
2018 		mpt_prt(mpt,
2019 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2020 		    ccb->ccb_h.status & CAM_STATUS_MASK);
2021 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2022 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2023 		}
2024 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2025 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2026 		xpt_done(ccb);
2027 		CAMLOCK_2_MPTLOCK(mpt);
2028 		mpt_free_request(mpt, req);
2029 		MPTLOCK_2_CAMLOCK(mpt);
2030 		return;
2031 	}
2032 
2033 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2034 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2035 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2036 		    mpt_timeout, ccb);
2037 	}
2038 	if (mpt->verbose > MPT_PRT_DEBUG) {
2039 		int nc = 0;
2040 		mpt_print_request(req->req_vbuf);
2041 		for (trq = req->chain; trq; trq = trq->chain) {
2042 			printf("  Additional Chain Area %d\n", nc++);
2043 			mpt_dump_sgl(trq->req_vbuf, 0);
2044 		}
2045 	}
2046 
2047 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2048 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2049 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2050 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
2051 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2052 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2053 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2054 		} else {
2055 			tgt->state = TGT_STATE_MOVING_DATA;
2056 		}
2057 #else
2058 		tgt->state = TGT_STATE_MOVING_DATA;
2059 #endif
2060 	}
2061 	CAMLOCK_2_MPTLOCK(mpt);
2062 	mpt_send_cmd(mpt, req);
2063 	MPTLOCK_2_CAMLOCK(mpt);
2064 }
2065 
2066 static void
2067 mpt_start(struct cam_sim *sim, union ccb *ccb)
2068 {
2069 	request_t *req;
2070 	struct mpt_softc *mpt;
2071 	MSG_SCSI_IO_REQUEST *mpt_req;
2072 	struct ccb_scsiio *csio = &ccb->csio;
2073 	struct ccb_hdr *ccbh = &ccb->ccb_h;
2074 	bus_dmamap_callback_t *cb;
2075 	target_id_t tgt;
2076 	int raid_passthru;
2077 
2078 	/* Get the pointer for the physical addapter */
2079 	mpt = ccb->ccb_h.ccb_mpt_ptr;
2080 	raid_passthru = (sim == mpt->phydisk_sim);
2081 
2082 	CAMLOCK_2_MPTLOCK(mpt);
2083 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2084 		if (mpt->outofbeer == 0) {
2085 			mpt->outofbeer = 1;
2086 			xpt_freeze_simq(mpt->sim, 1);
2087 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2088 		}
2089 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2090 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2091 		MPTLOCK_2_CAMLOCK(mpt);
2092 		xpt_done(ccb);
2093 		return;
2094 	}
2095 #ifdef	INVARIANTS
2096 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2097 #endif
2098 	MPTLOCK_2_CAMLOCK(mpt);
2099 
2100 	if (sizeof (bus_addr_t) > 4) {
2101 		cb = mpt_execute_req_a64;
2102 	} else {
2103 		cb = mpt_execute_req;
2104 	}
2105 
2106 	/*
2107 	 * Link the ccb and the request structure so we can find
2108 	 * the other knowing either the request or the ccb
2109 	 */
2110 	req->ccb = ccb;
2111 	ccb->ccb_h.ccb_req_ptr = req;
2112 
2113 	/* Now we build the command for the IOC */
2114 	mpt_req = req->req_vbuf;
2115 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2116 
2117 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2118 	if (raid_passthru) {
2119 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2120 		CAMLOCK_2_MPTLOCK(mpt);
2121 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2122 			MPTLOCK_2_CAMLOCK(mpt);
2123 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2124 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2125 			xpt_done(ccb);
2126 			return;
2127 		}
2128 		MPTLOCK_2_CAMLOCK(mpt);
2129 		mpt_req->Bus = 0;	/* we never set bus here */
2130 	} else {
2131 		tgt = ccb->ccb_h.target_id;
2132 		mpt_req->Bus = 0;	/* XXX */
2133 
2134 	}
2135 	mpt_req->SenseBufferLength =
2136 		(csio->sense_len < MPT_SENSE_SIZE) ?
2137 		 csio->sense_len : MPT_SENSE_SIZE;
2138 
2139 	/*
2140 	 * We use the message context to find the request structure when we
2141 	 * Get the command completion interrupt from the IOC.
2142 	 */
2143 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2144 
2145 	/* Which physical device to do the I/O on */
2146 	mpt_req->TargetID = tgt;
2147 
2148 	/* We assume a single level LUN type */
2149 	if (ccb->ccb_h.target_lun >= 256) {
2150 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2151 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2152 	} else {
2153 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2154 	}
2155 
2156 	/* Set the direction of the transfer */
2157 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2158 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2159 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2160 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2161 	} else {
2162 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2163 	}
2164 
2165 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2166 		switch(ccb->csio.tag_action) {
2167 		case MSG_HEAD_OF_Q_TAG:
2168 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2169 			break;
2170 		case MSG_ACA_TASK:
2171 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2172 			break;
2173 		case MSG_ORDERED_Q_TAG:
2174 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2175 			break;
2176 		case MSG_SIMPLE_Q_TAG:
2177 		default:
2178 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2179 			break;
2180 		}
2181 	} else {
2182 		if (mpt->is_fc || mpt->is_sas) {
2183 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2184 		} else {
2185 			/* XXX No such thing for a target doing packetized. */
2186 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2187 		}
2188 	}
2189 
2190 	if (mpt->is_spi) {
2191 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2192 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2193 		}
2194 	}
2195 
2196 	/* Copy the scsi command block into place */
2197 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2198 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2199 	} else {
2200 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2201 	}
2202 
2203 	mpt_req->CDBLength = csio->cdb_len;
2204 	mpt_req->DataLength = htole32(csio->dxfer_len);
2205 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2206 
2207 	/*
2208 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2209 	 */
2210 	if (mpt->verbose == MPT_PRT_DEBUG) {
2211 		U32 df;
2212 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2213 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2214 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2215 		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2216 		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2217 			mpt_prtc(mpt, "(%s %u byte%s ",
2218 			    (df == MPI_SCSIIO_CONTROL_READ)?
2219 			    "read" : "write",  csio->dxfer_len,
2220 			    (csio->dxfer_len == 1)? ")" : "s)");
2221 		}
2222 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2223 		    ccb->ccb_h.target_lun, req, req->serno);
2224 	}
2225 
2226 	/*
2227 	 * If we have any data to send with this command map it into bus space.
2228 	 */
2229 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2230 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2231 			/*
2232 			 * We've been given a pointer to a single buffer.
2233 			 */
2234 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2235 				/*
2236 				 * Virtual address that needs to translated into
2237 				 * one or more physical address ranges.
2238 				 */
2239 				int error;
2240 				int s = splsoftvm();
2241 				error = bus_dmamap_load(mpt->buffer_dmat,
2242 				    req->dmap, csio->data_ptr, csio->dxfer_len,
2243 				    cb, req, 0);
2244 				splx(s);
2245 				if (error == EINPROGRESS) {
2246 					/*
2247 					 * So as to maintain ordering,
2248 					 * freeze the controller queue
2249 					 * until our mapping is
2250 					 * returned.
2251 					 */
2252 					xpt_freeze_simq(mpt->sim, 1);
2253 					ccbh->status |= CAM_RELEASE_SIMQ;
2254 				}
2255 			} else {
2256 				/*
2257 				 * We have been given a pointer to single
2258 				 * physical buffer.
2259 				 */
2260 				struct bus_dma_segment seg;
2261 				seg.ds_addr =
2262 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
2263 				seg.ds_len = csio->dxfer_len;
2264 				(*cb)(req, &seg, 1, 0);
2265 			}
2266 		} else {
2267 			/*
2268 			 * We have been given a list of addresses.
2269 			 * This case could be easily supported but they are not
2270 			 * currently generated by the CAM subsystem so there
2271 			 * is no point in wasting the time right now.
2272 			 */
2273 			struct bus_dma_segment *segs;
2274 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2275 				(*cb)(req, NULL, 0, EFAULT);
2276 			} else {
2277 				/* Just use the segments provided */
2278 				segs = (struct bus_dma_segment *)csio->data_ptr;
2279 				(*cb)(req, segs, csio->sglist_cnt, 0);
2280 			}
2281 		}
2282 	} else {
2283 		(*cb)(req, NULL, 0, 0);
2284 	}
2285 }
2286 
2287 static int
2288 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2289     int sleep_ok)
2290 {
2291 	int   error;
2292 	uint16_t status;
2293 	uint8_t response;
2294 
2295 	error = mpt_scsi_send_tmf(mpt,
2296 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2297 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2298 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2299 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2300 	    0,	/* XXX How do I get the channel ID? */
2301 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2302 	    lun != CAM_LUN_WILDCARD ? lun : 0,
2303 	    0, sleep_ok);
2304 
2305 	if (error != 0) {
2306 		/*
2307 		 * mpt_scsi_send_tmf hard resets on failure, so no
2308 		 * need to do so here.
2309 		 */
2310 		mpt_prt(mpt,
2311 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2312 		return (EIO);
2313 	}
2314 
2315 	/* Wait for bus reset to be processed by the IOC. */
2316 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2317 	    REQ_STATE_DONE, sleep_ok, 5000);
2318 
2319 	status = mpt->tmf_req->IOCStatus;
2320 	response = mpt->tmf_req->ResponseCode;
2321 	mpt->tmf_req->state = REQ_STATE_FREE;
2322 
2323 	if (error) {
2324 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2325 		    "Resetting controller.\n");
2326 		mpt_reset(mpt, TRUE);
2327 		return (ETIMEDOUT);
2328 	}
2329 
2330 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2331 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2332 		    "Resetting controller.\n", status);
2333 		mpt_reset(mpt, TRUE);
2334 		return (EIO);
2335 	}
2336 
2337 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2338 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2339 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2340 		    "Resetting controller.\n", response);
2341 		mpt_reset(mpt, TRUE);
2342 		return (EIO);
2343 	}
2344 	return (0);
2345 }
2346 
2347 static int
2348 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2349 {
2350 	int r = 0;
2351 	request_t *req;
2352 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2353 
2354  	req = mpt_get_request(mpt, FALSE);
2355 	if (req == NULL) {
2356 		return (ENOMEM);
2357 	}
2358 	fc = req->req_vbuf;
2359 	memset(fc, 0, sizeof(*fc));
2360 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2361 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2362 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2363 	mpt_send_cmd(mpt, req);
2364 	if (dowait) {
2365 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2366 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2367 		if (r == 0) {
2368 			mpt_free_request(mpt, req);
2369 		}
2370 	}
2371 	return (r);
2372 }
2373 
2374 static int
2375 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2376 	      MSG_EVENT_NOTIFY_REPLY *msg)
2377 {
2378 	uint32_t data0, data1;
2379 
2380 	data0 = le32toh(msg->Data[0]);
2381 	data1 = le32toh(msg->Data[1]);
2382 	switch(msg->Event & 0xFF) {
2383 	case MPI_EVENT_UNIT_ATTENTION:
2384 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2385 		    (data0 >> 8) & 0xff, data0 & 0xff);
2386 		break;
2387 
2388 	case MPI_EVENT_IOC_BUS_RESET:
2389 		/* We generated a bus reset */
2390 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2391 		    (data0 >> 8) & 0xff);
2392 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2393 		break;
2394 
2395 	case MPI_EVENT_EXT_BUS_RESET:
2396 		/* Someone else generated a bus reset */
2397 		mpt_prt(mpt, "External Bus Reset Detected\n");
2398 		/*
2399 		 * These replies don't return EventData like the MPI
2400 		 * spec says they do
2401 		 */
2402 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2403 		break;
2404 
2405 	case MPI_EVENT_RESCAN:
2406 #if __FreeBSD_version >= 600000
2407 	{
2408 		union ccb *ccb;
2409 		uint32_t pathid;
2410 		/*
2411 		 * In general this means a device has been added to the loop.
2412 		 */
2413 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2414 		if (mpt->ready == 0) {
2415 			break;
2416 		}
2417 		if (mpt->phydisk_sim) {
2418 			pathid = cam_sim_path(mpt->phydisk_sim);
2419 		} else {
2420 			pathid = cam_sim_path(mpt->sim);
2421 		}
2422 		MPTLOCK_2_CAMLOCK(mpt);
2423 		/*
2424 		 * Allocate a CCB, create a wildcard path for this bus,
2425 		 * and schedule a rescan.
2426 		 */
2427 		ccb = xpt_alloc_ccb_nowait();
2428 		if (ccb == NULL) {
2429 			mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2430 			CAMLOCK_2_MPTLOCK(mpt);
2431 			break;
2432 		}
2433 
2434 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2435 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2436 			CAMLOCK_2_MPTLOCK(mpt);
2437 			mpt_prt(mpt, "unable to create path for rescan\n");
2438 			xpt_free_ccb(ccb);
2439 			break;
2440 		}
2441 		xpt_rescan(ccb);
2442 		CAMLOCK_2_MPTLOCK(mpt);
2443 		break;
2444 	}
2445 #else
2446 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2447 		break;
2448 #endif
2449 	case MPI_EVENT_LINK_STATUS_CHANGE:
2450 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2451 		    (data1 >> 8) & 0xff,
2452 		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2453 		break;
2454 
2455 	case MPI_EVENT_LOOP_STATE_CHANGE:
2456 		switch ((data0 >> 16) & 0xff) {
2457 		case 0x01:
2458 			mpt_prt(mpt,
2459 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2460 			    "(Loop Initialization)\n",
2461 			    (data1 >> 8) & 0xff,
2462 			    (data0 >> 8) & 0xff,
2463 			    (data0     ) & 0xff);
2464 			switch ((data0 >> 8) & 0xff) {
2465 			case 0xF7:
2466 				if ((data0 & 0xff) == 0xF7) {
2467 					mpt_prt(mpt, "Device needs AL_PA\n");
2468 				} else {
2469 					mpt_prt(mpt, "Device %02x doesn't like "
2470 					    "FC performance\n",
2471 					    data0 & 0xFF);
2472 				}
2473 				break;
2474 			case 0xF8:
2475 				if ((data0 & 0xff) == 0xF7) {
2476 					mpt_prt(mpt, "Device had loop failure "
2477 					    "at its receiver prior to acquiring"
2478 					    " AL_PA\n");
2479 				} else {
2480 					mpt_prt(mpt, "Device %02x detected loop"
2481 					    " failure at its receiver\n",
2482 					    data0 & 0xFF);
2483 				}
2484 				break;
2485 			default:
2486 				mpt_prt(mpt, "Device %02x requests that device "
2487 				    "%02x reset itself\n",
2488 				    data0 & 0xFF,
2489 				    (data0 >> 8) & 0xFF);
2490 				break;
2491 			}
2492 			break;
2493 		case 0x02:
2494 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2495 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2496 			    (data1 >> 8) & 0xff, /* Port */
2497 			    (data0 >>  8) & 0xff, /* Character 3 */
2498 			    (data0      ) & 0xff  /* Character 4 */);
2499 			break;
2500 		case 0x03:
2501 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2502 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2503 			    (data1 >> 8) & 0xff, /* Port */
2504 			    (data0 >> 8) & 0xff, /* Character 3 */
2505 			    (data0     ) & 0xff  /* Character 4 */);
2506 			break;
2507 		default:
2508 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2509 			    "FC event (%02x %02x %02x)\n",
2510 			    (data1 >> 8) & 0xff, /* Port */
2511 			    (data0 >> 16) & 0xff, /* Event */
2512 			    (data0 >>  8) & 0xff, /* Character 3 */
2513 			    (data0      ) & 0xff  /* Character 4 */);
2514 		}
2515 		break;
2516 
2517 	case MPI_EVENT_LOGOUT:
2518 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2519 		    (data1 >> 8) & 0xff, data0);
2520 		break;
2521 	case MPI_EVENT_QUEUE_FULL:
2522 	{
2523 		struct cam_sim *sim;
2524 		struct cam_path *tmppath;
2525 		struct ccb_relsim crs;
2526 		PTR_EVENT_DATA_QUEUE_FULL pqf =
2527 		    (PTR_EVENT_DATA_QUEUE_FULL) msg->Data;
2528 		lun_id_t lun_id;
2529 
2530 		mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2531 		    "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2532 		if (mpt->phydisk_sim) {
2533 			sim = mpt->phydisk_sim;
2534 		} else {
2535 			sim = mpt->sim;
2536 		}
2537 		MPTLOCK_2_CAMLOCK(mpt);
2538 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2539 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2540 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2541 				mpt_prt(mpt, "unable to create a path to send "
2542 				    "XPT_REL_SIMQ");
2543 				CAMLOCK_2_MPTLOCK(mpt);
2544 				break;
2545 			}
2546 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2547 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2548 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2549 			crs.openings = pqf->CurrentDepth - 1;
2550 			xpt_action((union ccb *)&crs);
2551 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2552 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2553 			}
2554 			xpt_free_path(tmppath);
2555 		}
2556 		CAMLOCK_2_MPTLOCK(mpt);
2557 		break;
2558 	}
2559 	case MPI_EVENT_EVENT_CHANGE:
2560 	case MPI_EVENT_INTEGRATED_RAID:
2561 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2562 	case MPI_EVENT_SAS_SES:
2563 		break;
2564 	default:
2565 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2566 		    msg->Event & 0xFF);
2567 		return (0);
2568 	}
2569 	return (1);
2570 }
2571 
2572 /*
2573  * Reply path for all SCSI I/O requests, called from our
2574  * interrupt handler by extracting our handler index from
2575  * the MsgContext field of the reply from the IOC.
2576  *
2577  * This routine is optimized for the common case of a
2578  * completion without error.  All exception handling is
2579  * offloaded to non-inlined helper routines to minimize
2580  * cache footprint.
2581  */
2582 static int
2583 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2584     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2585 {
2586 	MSG_SCSI_IO_REQUEST *scsi_req;
2587 	union ccb *ccb;
2588 	target_id_t tgt;
2589 
2590 	if (req->state == REQ_STATE_FREE) {
2591 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2592 		return (TRUE);
2593 	}
2594 
2595 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2596 	ccb = req->ccb;
2597 	if (ccb == NULL) {
2598 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2599 		    req, req->serno);
2600 		return (TRUE);
2601 	}
2602 
2603 	tgt = scsi_req->TargetID;
2604 	mpt_req_untimeout(req, mpt_timeout, ccb);
2605 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2606 
2607 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2608 		bus_dmasync_op_t op;
2609 
2610 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2611 			op = BUS_DMASYNC_POSTREAD;
2612 		else
2613 			op = BUS_DMASYNC_POSTWRITE;
2614 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2615 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2616 	}
2617 
2618 	if (reply_frame == NULL) {
2619 		/*
2620 		 * Context only reply, completion without error status.
2621 		 */
2622 		ccb->csio.resid = 0;
2623 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2624 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2625 	} else {
2626 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2627 	}
2628 
2629 	if (mpt->outofbeer) {
2630 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2631 		mpt->outofbeer = 0;
2632 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2633 	}
2634 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2635 		struct scsi_inquiry_data *iq =
2636 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2637 		if (scsi_req->Function ==
2638 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2639 			/*
2640 			 * Fake out the device type so that only the
2641 			 * pass-thru device will attach.
2642 			 */
2643 			iq->device &= ~0x1F;
2644 			iq->device |= T_NODEVICE;
2645 		}
2646 	}
2647 	if (mpt->verbose == MPT_PRT_DEBUG) {
2648 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2649 		    req, req->serno);
2650 	}
2651 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2652 	MPTLOCK_2_CAMLOCK(mpt);
2653 	xpt_done(ccb);
2654 	CAMLOCK_2_MPTLOCK(mpt);
2655 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2656 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2657 	} else {
2658 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2659 		    req, req->serno);
2660 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2661 	}
2662 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2663 	    ("CCB req needed wakeup"));
2664 #ifdef	INVARIANTS
2665 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2666 #endif
2667 	mpt_free_request(mpt, req);
2668 	return (TRUE);
2669 }
2670 
2671 static int
2672 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2673     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2674 {
2675 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2676 
2677 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2678 #ifdef	INVARIANTS
2679 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2680 #endif
2681 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2682 	/* Record IOC Status and Response Code of TMF for any waiters. */
2683 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2684 	req->ResponseCode = tmf_reply->ResponseCode;
2685 
2686 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2687 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2688 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2689 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2690 		req->state |= REQ_STATE_DONE;
2691 		wakeup(req);
2692 	} else {
2693 		mpt->tmf_req->state = REQ_STATE_FREE;
2694 	}
2695 	return (TRUE);
2696 }
2697 
2698 /*
2699  * XXX: Move to definitions file
2700  */
2701 #define	ELS	0x22
2702 #define	FC4LS	0x32
2703 #define	ABTS	0x81
2704 #define	BA_ACC	0x84
2705 
2706 #define	LS_RJT	0x01
2707 #define	LS_ACC	0x02
2708 #define	PLOGI	0x03
2709 #define	LOGO	0x05
2710 #define SRR	0x14
2711 #define PRLI	0x20
2712 #define PRLO	0x21
2713 #define ADISC	0x52
2714 #define RSCN	0x61
2715 
2716 static void
2717 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2718     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2719 {
2720 	uint32_t fl;
2721 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2722 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2723 
2724 	/*
2725 	 * We are going to reuse the ELS request to send this response back.
2726 	 */
2727 	rsp = &tmp;
2728 	memset(rsp, 0, sizeof(*rsp));
2729 
2730 #ifdef	USE_IMMEDIATE_LINK_DATA
2731 	/*
2732 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2733 	 */
2734 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2735 #endif
2736 	rsp->RspLength = length;
2737 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2738 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2739 
2740 	/*
2741 	 * Copy over information from the original reply frame to
2742 	 * it's correct place in the response.
2743 	 */
2744 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2745 
2746 	/*
2747 	 * And now copy back the temporary area to the original frame.
2748 	 */
2749 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2750 	rsp = req->req_vbuf;
2751 
2752 #ifdef	USE_IMMEDIATE_LINK_DATA
2753 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2754 #else
2755 {
2756 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2757 	bus_addr_t paddr = req->req_pbuf;
2758 	paddr += MPT_RQSL(mpt);
2759 
2760 	fl =
2761 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2762 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2763 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2764 		MPI_SGE_FLAGS_END_OF_LIST	|
2765 		MPI_SGE_FLAGS_END_OF_BUFFER;
2766 	fl <<= MPI_SGE_FLAGS_SHIFT;
2767 	fl |= (length);
2768 	se->FlagsLength = htole32(fl);
2769 	se->Address = htole32((uint32_t) paddr);
2770 }
2771 #endif
2772 
2773 	/*
2774 	 * Send it on...
2775 	 */
2776 	mpt_send_cmd(mpt, req);
2777 }
2778 
2779 static int
2780 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2781     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2782 {
2783 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2784 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2785 	U8 rctl;
2786 	U8 type;
2787 	U8 cmd;
2788 	U16 status = le16toh(reply_frame->IOCStatus);
2789 	U32 *elsbuf;
2790 	int ioindex;
2791 	int do_refresh = TRUE;
2792 
2793 #ifdef	INVARIANTS
2794 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2795 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2796 	    req, req->serno, rp->Function));
2797 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2798 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2799 	} else {
2800 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2801 	}
2802 #endif
2803 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2804 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2805 	    req, req->serno, reply_frame, reply_frame->Function);
2806 
2807 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2808 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2809 		    status, reply_frame->Function);
2810 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2811 			/*
2812 			 * XXX: to get around shutdown issue
2813 			 */
2814 			mpt->disabled = 1;
2815 			return (TRUE);
2816 		}
2817 		return (TRUE);
2818 	}
2819 
2820 	/*
2821 	 * If the function of a link service response, we recycle the
2822 	 * response to be a refresh for a new link service request.
2823 	 *
2824 	 * The request pointer is bogus in this case and we have to fetch
2825 	 * it based upon the TransactionContext.
2826 	 */
2827 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2828 		/* Freddie Uncle Charlie Katie */
2829 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2830 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2831 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2832 				break;
2833 			}
2834 
2835 		KASSERT(ioindex < mpt->els_cmds_allocated,
2836 		    ("can't find my mommie!"));
2837 
2838 		/* remove from active list as we're going to re-post it */
2839 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2840 		req->state &= ~REQ_STATE_QUEUED;
2841 		req->state |= REQ_STATE_DONE;
2842 		mpt_fc_post_els(mpt, req, ioindex);
2843 		return (TRUE);
2844 	}
2845 
2846 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2847 		/* remove from active list as we're done */
2848 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2849 		req->state &= ~REQ_STATE_QUEUED;
2850 		req->state |= REQ_STATE_DONE;
2851 		if (req->state & REQ_STATE_TIMEDOUT) {
2852 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2853 			    "Sync Primitive Send Completed After Timeout\n");
2854 			mpt_free_request(mpt, req);
2855 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2856 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2857 			    "Async Primitive Send Complete\n");
2858 			mpt_free_request(mpt, req);
2859 		} else {
2860 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2861 			    "Sync Primitive Send Complete- Waking Waiter\n");
2862 			wakeup(req);
2863 		}
2864 		return (TRUE);
2865 	}
2866 
2867 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2868 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2869 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2870 		    rp->MsgLength, rp->MsgFlags);
2871 		return (TRUE);
2872 	}
2873 
2874 	if (rp->MsgLength <= 5) {
2875 		/*
2876 		 * This is just a ack of an original ELS buffer post
2877 		 */
2878 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2879 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2880 		return (TRUE);
2881 	}
2882 
2883 
2884 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2885 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2886 
2887 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2888 	cmd = be32toh(elsbuf[0]) >> 24;
2889 
2890 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2891 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2892 		return (TRUE);
2893 	}
2894 
2895 	ioindex = le32toh(rp->TransactionContext);
2896 	req = mpt->els_cmd_ptrs[ioindex];
2897 
2898 	if (rctl == ELS && type == 1) {
2899 		switch (cmd) {
2900 		case PRLI:
2901 			/*
2902 			 * Send back a PRLI ACC
2903 			 */
2904 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2905 			    le32toh(rp->Wwn.PortNameHigh),
2906 			    le32toh(rp->Wwn.PortNameLow));
2907 			elsbuf[0] = htobe32(0x02100014);
2908 			elsbuf[1] |= htobe32(0x00000100);
2909 			elsbuf[4] = htobe32(0x00000002);
2910 			if (mpt->role & MPT_ROLE_TARGET)
2911 				elsbuf[4] |= htobe32(0x00000010);
2912 			if (mpt->role & MPT_ROLE_INITIATOR)
2913 				elsbuf[4] |= htobe32(0x00000020);
2914 			/* remove from active list as we're done */
2915 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2916 			req->state &= ~REQ_STATE_QUEUED;
2917 			req->state |= REQ_STATE_DONE;
2918 			mpt_fc_els_send_response(mpt, req, rp, 20);
2919 			do_refresh = FALSE;
2920 			break;
2921 		case PRLO:
2922 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2923 			elsbuf[0] = htobe32(0x02100014);
2924 			elsbuf[1] = htobe32(0x08000100);
2925 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2926 			    le32toh(rp->Wwn.PortNameHigh),
2927 			    le32toh(rp->Wwn.PortNameLow));
2928 			/* remove from active list as we're done */
2929 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2930 			req->state &= ~REQ_STATE_QUEUED;
2931 			req->state |= REQ_STATE_DONE;
2932 			mpt_fc_els_send_response(mpt, req, rp, 20);
2933 			do_refresh = FALSE;
2934 			break;
2935 		default:
2936 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2937 			break;
2938 		}
2939 	} else if (rctl == ABTS && type == 0) {
2940 		uint16_t rx_id = le16toh(rp->Rxid);
2941 		uint16_t ox_id = le16toh(rp->Oxid);
2942 		request_t *tgt_req = NULL;
2943 
2944 		mpt_prt(mpt,
2945 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2946 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2947 		    le32toh(rp->Wwn.PortNameLow));
2948 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2949 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2950 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2951 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2952 		} else {
2953 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2954 		}
2955 		if (tgt_req) {
2956 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2957 			uint8_t *vbuf;
2958 			union ccb *ccb = tgt->ccb;
2959 			uint32_t ct_id;
2960 
2961 			vbuf = tgt_req->req_vbuf;
2962 			vbuf += MPT_RQSL(mpt);
2963 
2964 			/*
2965 			 * Check to make sure we have the correct command
2966 			 * The reply descriptor in the target state should
2967 			 * should contain an IoIndex that should match the
2968 			 * RX_ID.
2969 			 *
2970 			 * It'd be nice to have OX_ID to crosscheck with
2971 			 * as well.
2972 			 */
2973 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2974 
2975 			if (ct_id != rx_id) {
2976 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2977 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2978 				    rx_id, ct_id);
2979 				goto skip;
2980 			}
2981 
2982 			ccb = tgt->ccb;
2983 			if (ccb) {
2984 				mpt_prt(mpt,
2985 				    "CCB (%p): lun %u flags %x status %x\n",
2986 				    ccb, ccb->ccb_h.target_lun,
2987 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2988 			}
2989 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2990 			    "%x nxfers %x\n", tgt->state,
2991 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2992 			    tgt->nxfers);
2993   skip:
2994 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2995 				mpt_prt(mpt, "unable to start TargetAbort\n");
2996 			}
2997 		} else {
2998 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2999 		}
3000 		memset(elsbuf, 0, 5 * (sizeof (U32)));
3001 		elsbuf[0] = htobe32(0);
3002 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3003 		elsbuf[2] = htobe32(0x000ffff);
3004 		/*
3005 		 * Dork with the reply frame so that the reponse to it
3006 		 * will be correct.
3007 		 */
3008 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3009 		/* remove from active list as we're done */
3010 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3011 		req->state &= ~REQ_STATE_QUEUED;
3012 		req->state |= REQ_STATE_DONE;
3013 		mpt_fc_els_send_response(mpt, req, rp, 12);
3014 		do_refresh = FALSE;
3015 	} else {
3016 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3017 	}
3018 	if (do_refresh == TRUE) {
3019 		/* remove from active list as we're done */
3020 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3021 		req->state &= ~REQ_STATE_QUEUED;
3022 		req->state |= REQ_STATE_DONE;
3023 		mpt_fc_post_els(mpt, req, ioindex);
3024 	}
3025 	return (TRUE);
3026 }
3027 
3028 /*
3029  * Clean up all SCSI Initiator personality state in response
3030  * to a controller reset.
3031  */
3032 static void
3033 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3034 {
3035 	/*
3036 	 * The pending list is already run down by
3037 	 * the generic handler.  Perform the same
3038 	 * operation on the timed out request list.
3039 	 */
3040 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3041 				   MPI_IOCSTATUS_INVALID_STATE);
3042 
3043 	/*
3044 	 * XXX: We need to repost ELS and Target Command Buffers?
3045 	 */
3046 
3047 	/*
3048 	 * Inform the XPT that a bus reset has occurred.
3049 	 */
3050 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
3051 }
3052 
3053 /*
3054  * Parse additional completion information in the reply
3055  * frame for SCSI I/O requests.
3056  */
3057 static int
3058 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3059 			     MSG_DEFAULT_REPLY *reply_frame)
3060 {
3061 	union ccb *ccb;
3062 	MSG_SCSI_IO_REPLY *scsi_io_reply;
3063 	u_int ioc_status;
3064 	u_int sstate;
3065 	u_int loginfo;
3066 
3067 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3068 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3069 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3070 		("MPT SCSI I/O Handler called with incorrect reply type"));
3071 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3072 		("MPT SCSI I/O Handler called with continuation reply"));
3073 
3074 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3075 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
3076 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
3077 	ioc_status &= MPI_IOCSTATUS_MASK;
3078 	sstate = scsi_io_reply->SCSIState;
3079 
3080 	ccb = req->ccb;
3081 	ccb->csio.resid =
3082 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3083 
3084 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3085 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3086 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3087 		ccb->csio.sense_resid =
3088 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
3089 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3090 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
3091 	}
3092 
3093 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3094 		/*
3095 		 * Tag messages rejected, but non-tagged retry
3096 		 * was successful.
3097 XXXX
3098 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3099 		 */
3100 	}
3101 
3102 	switch(ioc_status) {
3103 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3104 		/*
3105 		 * XXX
3106 		 * Linux driver indicates that a zero
3107 		 * transfer length with this error code
3108 		 * indicates a CRC error.
3109 		 *
3110 		 * No need to swap the bytes for checking
3111 		 * against zero.
3112 		 */
3113 		if (scsi_io_reply->TransferCount == 0) {
3114 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3115 			break;
3116 		}
3117 		/* FALLTHROUGH */
3118 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3119 	case MPI_IOCSTATUS_SUCCESS:
3120 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3121 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3122 			/*
3123 			 * Status was never returned for this transaction.
3124 			 */
3125 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3126 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3127 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3128 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3129 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3130 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3131 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3132 
3133 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
3134 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3135 		} else
3136 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3137 		break;
3138 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3139 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3140 		break;
3141 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3142 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3143 		break;
3144 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3145 		/*
3146 		 * Since selection timeouts and "device really not
3147 		 * there" are grouped into this error code, report
3148 		 * selection timeout.  Selection timeouts are
3149 		 * typically retried before giving up on the device
3150 		 * whereas "device not there" errors are considered
3151 		 * unretryable.
3152 		 */
3153 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3154 		break;
3155 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3156 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3157 		break;
3158 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3159 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3160 		break;
3161 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3162 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3163 		break;
3164 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3165 		ccb->ccb_h.status = CAM_UA_TERMIO;
3166 		break;
3167 	case MPI_IOCSTATUS_INVALID_STATE:
3168 		/*
3169 		 * The IOC has been reset.  Emulate a bus reset.
3170 		 */
3171 		/* FALLTHROUGH */
3172 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3173 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3174 		break;
3175 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3176 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3177 		/*
3178 		 * Don't clobber any timeout status that has
3179 		 * already been set for this transaction.  We
3180 		 * want the SCSI layer to be able to differentiate
3181 		 * between the command we aborted due to timeout
3182 		 * and any innocent bystanders.
3183 		 */
3184 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3185 			break;
3186 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3187 		break;
3188 
3189 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3190 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3191 		break;
3192 	case MPI_IOCSTATUS_BUSY:
3193 		mpt_set_ccb_status(ccb, CAM_BUSY);
3194 		break;
3195 	case MPI_IOCSTATUS_INVALID_FUNCTION:
3196 	case MPI_IOCSTATUS_INVALID_SGL:
3197 	case MPI_IOCSTATUS_INTERNAL_ERROR:
3198 	case MPI_IOCSTATUS_INVALID_FIELD:
3199 	default:
3200 		/* XXX
3201 		 * Some of the above may need to kick
3202 		 * of a recovery action!!!!
3203 		 */
3204 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3205 		break;
3206 	}
3207 
3208 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3209 		mpt_freeze_ccb(ccb);
3210 	}
3211 
3212 	return (TRUE);
3213 }
3214 
3215 static void
3216 mpt_action(struct cam_sim *sim, union ccb *ccb)
3217 {
3218 	struct mpt_softc *mpt;
3219 	struct ccb_trans_settings *cts;
3220 	target_id_t tgt;
3221 	lun_id_t lun;
3222 	int raid_passthru;
3223 
3224 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3225 
3226 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
3227 	raid_passthru = (sim == mpt->phydisk_sim);
3228 	MPT_LOCK_ASSERT(mpt);
3229 
3230 	tgt = ccb->ccb_h.target_id;
3231 	lun = ccb->ccb_h.target_lun;
3232 	if (raid_passthru &&
3233 	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
3234 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
3235 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
3236 		CAMLOCK_2_MPTLOCK(mpt);
3237 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3238 			MPTLOCK_2_CAMLOCK(mpt);
3239 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3240 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3241 			xpt_done(ccb);
3242 			return;
3243 		}
3244 		MPTLOCK_2_CAMLOCK(mpt);
3245 	}
3246 	ccb->ccb_h.ccb_mpt_ptr = mpt;
3247 
3248 	switch (ccb->ccb_h.func_code) {
3249 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
3250 		/*
3251 		 * Do a couple of preliminary checks...
3252 		 */
3253 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3254 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3255 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3256 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3257 				break;
3258 			}
3259 		}
3260 		/* Max supported CDB length is 16 bytes */
3261 		/* XXX Unless we implement the new 32byte message type */
3262 		if (ccb->csio.cdb_len >
3263 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3264 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3265 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3266 			break;
3267 		}
3268 #ifdef	MPT_TEST_MULTIPATH
3269 		if (mpt->failure_id == ccb->ccb_h.target_id) {
3270 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3271 			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3272 			break;
3273 		}
3274 #endif
3275 		ccb->csio.scsi_status = SCSI_STATUS_OK;
3276 		mpt_start(sim, ccb);
3277 		return;
3278 
3279 	case XPT_RESET_BUS:
3280 		if (raid_passthru) {
3281 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3282 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3283 			break;
3284 		}
3285 	case XPT_RESET_DEV:
3286 		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3287 			if (bootverbose) {
3288 				xpt_print(ccb->ccb_h.path, "reset bus\n");
3289 			}
3290 		} else {
3291 			xpt_print(ccb->ccb_h.path, "reset device\n");
3292 		}
3293 		CAMLOCK_2_MPTLOCK(mpt);
3294 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3295 		MPTLOCK_2_CAMLOCK(mpt);
3296 
3297 		/*
3298 		 * mpt_bus_reset is always successful in that it
3299 		 * will fall back to a hard reset should a bus
3300 		 * reset attempt fail.
3301 		 */
3302 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3303 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3304 		break;
3305 
3306 	case XPT_ABORT:
3307 	{
3308 		union ccb *accb = ccb->cab.abort_ccb;
3309 		CAMLOCK_2_MPTLOCK(mpt);
3310 		switch (accb->ccb_h.func_code) {
3311 		case XPT_ACCEPT_TARGET_IO:
3312 		case XPT_IMMED_NOTIFY:
3313 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3314 			break;
3315 		case XPT_CONT_TARGET_IO:
3316 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3317 			ccb->ccb_h.status = CAM_UA_ABORT;
3318 			break;
3319 		case XPT_SCSI_IO:
3320 			ccb->ccb_h.status = CAM_UA_ABORT;
3321 			break;
3322 		default:
3323 			ccb->ccb_h.status = CAM_REQ_INVALID;
3324 			break;
3325 		}
3326 		MPTLOCK_2_CAMLOCK(mpt);
3327 		break;
3328 	}
3329 
3330 #ifdef	CAM_NEW_TRAN_CODE
3331 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3332 #else
3333 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3334 #endif
3335 #define	DP_DISC_ENABLE	0x1
3336 #define	DP_DISC_DISABL	0x2
3337 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3338 
3339 #define	DP_TQING_ENABLE	0x4
3340 #define	DP_TQING_DISABL	0x8
3341 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3342 
3343 #define	DP_WIDE		0x10
3344 #define	DP_NARROW	0x20
3345 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3346 
3347 #define	DP_SYNC		0x40
3348 
3349 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3350 	{
3351 #ifdef	CAM_NEW_TRAN_CODE
3352 		struct ccb_trans_settings_scsi *scsi;
3353 		struct ccb_trans_settings_spi *spi;
3354 #endif
3355 		uint8_t dval;
3356 		u_int period;
3357 		u_int offset;
3358 		int i, j;
3359 
3360 		cts = &ccb->cts;
3361 
3362 		if (mpt->is_fc || mpt->is_sas) {
3363 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3364 			break;
3365 		}
3366 
3367 #ifdef	CAM_NEW_TRAN_CODE
3368 		scsi = &cts->proto_specific.scsi;
3369 		spi = &cts->xport_specific.spi;
3370 
3371 		/*
3372 		 * We can be called just to valid transport and proto versions
3373 		 */
3374 		if (scsi->valid == 0 && spi->valid == 0) {
3375 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3376 			break;
3377 		}
3378 #endif
3379 
3380 		/*
3381 		 * Skip attempting settings on RAID volume disks.
3382 		 * Other devices on the bus get the normal treatment.
3383 		 */
3384 		if (mpt->phydisk_sim && raid_passthru == 0 &&
3385 		    mpt_is_raid_volume(mpt, tgt) != 0) {
3386 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3387 			    "no transfer settings for RAID vols\n");
3388 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3389 			break;
3390 		}
3391 
3392 		i = mpt->mpt_port_page2.PortSettings &
3393 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3394 		j = mpt->mpt_port_page2.PortFlags &
3395 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3396 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3397 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3398 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3399 			    "honoring BIOS transfer negotiations\n");
3400 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3401 			break;
3402 		}
3403 
3404 		dval = 0;
3405 		period = 0;
3406 		offset = 0;
3407 
3408 #ifndef	CAM_NEW_TRAN_CODE
3409 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3410 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3411 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3412 		}
3413 
3414 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3415 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3416 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3417 		}
3418 
3419 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3420 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3421 		}
3422 
3423 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3424 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3425 			dval |= DP_SYNC;
3426 			period = cts->sync_period;
3427 			offset = cts->sync_offset;
3428 		}
3429 #else
3430 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3431 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3432 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3433 		}
3434 
3435 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3436 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3437 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3438 		}
3439 
3440 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3441 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3442 			    DP_WIDE : DP_NARROW;
3443 		}
3444 
3445 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3446 			dval |= DP_SYNC;
3447 			offset = spi->sync_offset;
3448 		} else {
3449 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3450 			    &mpt->mpt_dev_page1[tgt];
3451 			offset = ptr->RequestedParameters;
3452 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3453 	    		offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3454 		}
3455 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3456 			dval |= DP_SYNC;
3457 			period = spi->sync_period;
3458 		} else {
3459 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3460 			    &mpt->mpt_dev_page1[tgt];
3461 			period = ptr->RequestedParameters;
3462 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3463 	    		period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3464 		}
3465 #endif
3466 		CAMLOCK_2_MPTLOCK(mpt);
3467 		if (dval & DP_DISC_ENABLE) {
3468 			mpt->mpt_disc_enable |= (1 << tgt);
3469 		} else if (dval & DP_DISC_DISABL) {
3470 			mpt->mpt_disc_enable &= ~(1 << tgt);
3471 		}
3472 		if (dval & DP_TQING_ENABLE) {
3473 			mpt->mpt_tag_enable |= (1 << tgt);
3474 		} else if (dval & DP_TQING_DISABL) {
3475 			mpt->mpt_tag_enable &= ~(1 << tgt);
3476 		}
3477 		if (dval & DP_WIDTH) {
3478 			mpt_setwidth(mpt, tgt, 1);
3479 		}
3480 		if (dval & DP_SYNC) {
3481 			mpt_setsync(mpt, tgt, period, offset);
3482 		}
3483 		if (dval == 0) {
3484 			MPTLOCK_2_CAMLOCK(mpt);
3485 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3486 			break;
3487 		}
3488 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3489 		    "set [%d]: 0x%x period 0x%x offset %d\n",
3490 		    tgt, dval, period, offset);
3491 		if (mpt_update_spi_config(mpt, tgt)) {
3492 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3493 		} else {
3494 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3495 		}
3496 		MPTLOCK_2_CAMLOCK(mpt);
3497 		break;
3498 	}
3499 	case XPT_GET_TRAN_SETTINGS:
3500 	{
3501 #ifdef	CAM_NEW_TRAN_CODE
3502 		struct ccb_trans_settings_scsi *scsi;
3503 		cts = &ccb->cts;
3504 		cts->protocol = PROTO_SCSI;
3505 		if (mpt->is_fc) {
3506 			struct ccb_trans_settings_fc *fc =
3507 			    &cts->xport_specific.fc;
3508 			cts->protocol_version = SCSI_REV_SPC;
3509 			cts->transport = XPORT_FC;
3510 			cts->transport_version = 0;
3511 			fc->valid = CTS_FC_VALID_SPEED;
3512 			fc->bitrate = 100000;
3513 		} else if (mpt->is_sas) {
3514 			struct ccb_trans_settings_sas *sas =
3515 			    &cts->xport_specific.sas;
3516 			cts->protocol_version = SCSI_REV_SPC2;
3517 			cts->transport = XPORT_SAS;
3518 			cts->transport_version = 0;
3519 			sas->valid = CTS_SAS_VALID_SPEED;
3520 			sas->bitrate = 300000;
3521 		} else {
3522 			cts->protocol_version = SCSI_REV_2;
3523 			cts->transport = XPORT_SPI;
3524 			cts->transport_version = 2;
3525 			if (mpt_get_spi_settings(mpt, cts) != 0) {
3526 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3527 				break;
3528 			}
3529 		}
3530 		scsi = &cts->proto_specific.scsi;
3531 		scsi->valid = CTS_SCSI_VALID_TQ;
3532 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3533 #else
3534 		cts = &ccb->cts;
3535 		if (mpt->is_fc) {
3536 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3537 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3538 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3539 		} else if (mpt->is_sas) {
3540 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3541 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3542 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3543 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3544 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3545 			break;
3546 		}
3547 #endif
3548 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3549 		break;
3550 	}
3551 	case XPT_CALC_GEOMETRY:
3552 	{
3553 		struct ccb_calc_geometry *ccg;
3554 
3555 		ccg = &ccb->ccg;
3556 		if (ccg->block_size == 0) {
3557 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3558 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3559 			break;
3560 		}
3561 		mpt_calc_geometry(ccg, /*extended*/1);
3562 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3563 		break;
3564 	}
3565 	case XPT_PATH_INQ:		/* Path routing inquiry */
3566 	{
3567 		struct ccb_pathinq *cpi = &ccb->cpi;
3568 
3569 		cpi->version_num = 1;
3570 		cpi->target_sprt = 0;
3571 		cpi->hba_eng_cnt = 0;
3572 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3573 		/*
3574 		 * FC cards report MAX_DEVICES of 512, but
3575 		 * the MSG_SCSI_IO_REQUEST target id field
3576 		 * is only 8 bits. Until we fix the driver
3577 		 * to support 'channels' for bus overflow,
3578 		 * just limit it.
3579 		 */
3580 		if (cpi->max_target > 255) {
3581 			cpi->max_target = 255;
3582 		}
3583 
3584 		/*
3585 		 * VMware ESX reports > 16 devices and then dies when we probe.
3586 		 */
3587 		if (mpt->is_spi && cpi->max_target > 15) {
3588 			cpi->max_target = 15;
3589 		}
3590 		cpi->max_lun = 7;
3591 		cpi->initiator_id = mpt->mpt_ini_id;
3592 		cpi->bus_id = cam_sim_bus(sim);
3593 
3594 		/*
3595 		 * The base speed is the speed of the underlying connection.
3596 		 */
3597 #ifdef	CAM_NEW_TRAN_CODE
3598 		cpi->protocol = PROTO_SCSI;
3599 		if (mpt->is_fc) {
3600 			cpi->hba_misc = PIM_NOBUSRESET;
3601 			cpi->base_transfer_speed = 100000;
3602 			cpi->hba_inquiry = PI_TAG_ABLE;
3603 			cpi->transport = XPORT_FC;
3604 			cpi->transport_version = 0;
3605 			cpi->protocol_version = SCSI_REV_SPC;
3606 		} else if (mpt->is_sas) {
3607 			cpi->hba_misc = PIM_NOBUSRESET;
3608 			cpi->base_transfer_speed = 300000;
3609 			cpi->hba_inquiry = PI_TAG_ABLE;
3610 			cpi->transport = XPORT_SAS;
3611 			cpi->transport_version = 0;
3612 			cpi->protocol_version = SCSI_REV_SPC2;
3613 		} else {
3614 			cpi->hba_misc = PIM_SEQSCAN;
3615 			cpi->base_transfer_speed = 3300;
3616 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3617 			cpi->transport = XPORT_SPI;
3618 			cpi->transport_version = 2;
3619 			cpi->protocol_version = SCSI_REV_2;
3620 		}
3621 #else
3622 		if (mpt->is_fc) {
3623 			cpi->hba_misc = PIM_NOBUSRESET;
3624 			cpi->base_transfer_speed = 100000;
3625 			cpi->hba_inquiry = PI_TAG_ABLE;
3626 		} else if (mpt->is_sas) {
3627 			cpi->hba_misc = PIM_NOBUSRESET;
3628 			cpi->base_transfer_speed = 300000;
3629 			cpi->hba_inquiry = PI_TAG_ABLE;
3630 		} else {
3631 			cpi->hba_misc = PIM_SEQSCAN;
3632 			cpi->base_transfer_speed = 3300;
3633 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3634 		}
3635 #endif
3636 
3637 		/*
3638 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3639 		 * wide and restrict it to one lun.
3640 		 */
3641 		if (raid_passthru) {
3642 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3643 			cpi->initiator_id = cpi->max_target + 1;
3644 			cpi->max_lun = 0;
3645 		}
3646 
3647 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3648 			cpi->hba_misc |= PIM_NOINITIATOR;
3649 		}
3650 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3651 			cpi->target_sprt =
3652 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3653 		} else {
3654 			cpi->target_sprt = 0;
3655 		}
3656 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3657 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3658 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3659 		cpi->unit_number = cam_sim_unit(sim);
3660 		cpi->ccb_h.status = CAM_REQ_CMP;
3661 		break;
3662 	}
3663 	case XPT_EN_LUN:		/* Enable LUN as a target */
3664 	{
3665 		int result;
3666 
3667 		CAMLOCK_2_MPTLOCK(mpt);
3668 		if (ccb->cel.enable)
3669 			result = mpt_enable_lun(mpt,
3670 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3671 		else
3672 			result = mpt_disable_lun(mpt,
3673 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3674 		MPTLOCK_2_CAMLOCK(mpt);
3675 		if (result == 0) {
3676 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3677 		} else {
3678 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3679 		}
3680 		break;
3681 	}
3682 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3683 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3684 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3685 	{
3686 		tgt_resource_t *trtp;
3687 		lun_id_t lun = ccb->ccb_h.target_lun;
3688 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3689 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3690 		ccb->ccb_h.flags = 0;
3691 
3692 		if (lun == CAM_LUN_WILDCARD) {
3693 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3694 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3695 				break;
3696 			}
3697 			trtp = &mpt->trt_wildcard;
3698 		} else if (lun >= MPT_MAX_LUNS) {
3699 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3700 			break;
3701 		} else {
3702 			trtp = &mpt->trt[lun];
3703 		}
3704 		CAMLOCK_2_MPTLOCK(mpt);
3705 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3706 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3707 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3708 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3709 			    sim_links.stqe);
3710 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3711 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3712 			    "Put FREE INOT lun %d\n", lun);
3713 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3714 			    sim_links.stqe);
3715 		} else {
3716 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3717 		}
3718 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3719 		MPTLOCK_2_CAMLOCK(mpt);
3720 		return;
3721 	}
3722 	case XPT_CONT_TARGET_IO:
3723 		CAMLOCK_2_MPTLOCK(mpt);
3724 		mpt_target_start_io(mpt, ccb);
3725 		MPTLOCK_2_CAMLOCK(mpt);
3726 		return;
3727 
3728 	default:
3729 		ccb->ccb_h.status = CAM_REQ_INVALID;
3730 		break;
3731 	}
3732 	xpt_done(ccb);
3733 }
3734 
3735 static int
3736 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3737 {
3738 #ifdef	CAM_NEW_TRAN_CODE
3739 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3740 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3741 #endif
3742 	target_id_t tgt;
3743 	uint32_t dval, pval, oval;
3744 	int rv;
3745 
3746 	if (IS_CURRENT_SETTINGS(cts) == 0) {
3747 		tgt = cts->ccb_h.target_id;
3748 	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3749 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3750 			return (-1);
3751 		}
3752 	} else {
3753 		tgt = cts->ccb_h.target_id;
3754 	}
3755 
3756 	/*
3757 	 * We aren't looking at Port Page 2 BIOS settings here-
3758 	 * sometimes these have been known to be bogus XXX.
3759 	 *
3760 	 * For user settings, we pick the max from port page 0
3761 	 *
3762 	 * For current settings we read the current settings out from
3763 	 * device page 0 for that target.
3764 	 */
3765 	if (IS_CURRENT_SETTINGS(cts)) {
3766 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3767 		dval = 0;
3768 
3769 		CAMLOCK_2_MPTLOCK(mpt);
3770 		tmp = mpt->mpt_dev_page0[tgt];
3771 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3772 		    sizeof(tmp), FALSE, 5000);
3773 		if (rv) {
3774 			MPTLOCK_2_CAMLOCK(mpt);
3775 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3776 			return (rv);
3777 		}
3778 		MPTLOCK_2_CAMLOCK(mpt);
3779 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3780 		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3781 		    tmp.NegotiatedParameters, tmp.Information);
3782 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3783 		    DP_WIDE : DP_NARROW;
3784 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3785 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3786 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3787 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3788 		oval = tmp.NegotiatedParameters;
3789 		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3790 		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3791 		pval = tmp.NegotiatedParameters;
3792 		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3793 		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3794 		mpt->mpt_dev_page0[tgt] = tmp;
3795 	} else {
3796 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3797 		oval = mpt->mpt_port_page0.Capabilities;
3798 		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3799 		pval = mpt->mpt_port_page0.Capabilities;
3800 		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3801 	}
3802 
3803 #ifndef	CAM_NEW_TRAN_CODE
3804 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3805 	cts->valid = 0;
3806 	cts->sync_period = pval;
3807 	cts->sync_offset = oval;
3808 	cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3809 	cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3810 	cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3811 	if (dval & DP_WIDE) {
3812 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3813 	} else {
3814 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3815 	}
3816 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3817 		cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3818 		if (dval & DP_DISC_ENABLE) {
3819 			cts->flags |= CCB_TRANS_DISC_ENB;
3820 		}
3821 		if (dval & DP_TQING_ENABLE) {
3822 			cts->flags |= CCB_TRANS_TAG_ENB;
3823 		}
3824 	}
3825 #else
3826 	spi->valid = 0;
3827 	scsi->valid = 0;
3828 	spi->flags = 0;
3829 	scsi->flags = 0;
3830 	spi->sync_offset = oval;
3831 	spi->sync_period = pval;
3832 	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3833 	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3834 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3835 	if (dval & DP_WIDE) {
3836 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3837 	} else {
3838 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3839 	}
3840 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3841 		scsi->valid = CTS_SCSI_VALID_TQ;
3842 		if (dval & DP_TQING_ENABLE) {
3843 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3844 		}
3845 		spi->valid |= CTS_SPI_VALID_DISC;
3846 		if (dval & DP_DISC_ENABLE) {
3847 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3848 		}
3849 	}
3850 #endif
3851 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3852 	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3853 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3854 	return (0);
3855 }
3856 
3857 static void
3858 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3859 {
3860 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3861 
3862 	ptr = &mpt->mpt_dev_page1[tgt];
3863 	if (onoff) {
3864 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3865 	} else {
3866 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3867 	}
3868 }
3869 
3870 static void
3871 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3872 {
3873 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3874 
3875 	ptr = &mpt->mpt_dev_page1[tgt];
3876 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3877 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3878 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3879 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3880 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3881 	if (period == 0) {
3882 		return;
3883 	}
3884 	ptr->RequestedParameters |=
3885 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3886 	ptr->RequestedParameters |=
3887 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3888 	if (period < 0xa) {
3889 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3890 	}
3891 	if (period < 0x9) {
3892 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3893 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3894 	}
3895 }
3896 
3897 static int
3898 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3899 {
3900 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3901 	int rv;
3902 
3903 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3904 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3905 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3906 	tmp = mpt->mpt_dev_page1[tgt];
3907 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3908 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3909 	if (rv) {
3910 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3911 		return (-1);
3912 	}
3913 	return (0);
3914 }
3915 
3916 static void
3917 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3918 {
3919 #if __FreeBSD_version >= 500000
3920 	cam_calc_geometry(ccg, extended);
3921 #else
3922 	uint32_t size_mb;
3923 	uint32_t secs_per_cylinder;
3924 
3925 	if (ccg->block_size == 0) {
3926 		ccg->ccb_h.status = CAM_REQ_INVALID;
3927 		return;
3928 	}
3929 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3930 	if (size_mb > 1024 && extended) {
3931 		ccg->heads = 255;
3932 		ccg->secs_per_track = 63;
3933 	} else {
3934 		ccg->heads = 64;
3935 		ccg->secs_per_track = 32;
3936 	}
3937 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3938 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3939 	ccg->ccb_h.status = CAM_REQ_CMP;
3940 #endif
3941 }
3942 
3943 /****************************** Timeout Recovery ******************************/
3944 static int
3945 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3946 {
3947 	int error;
3948 
3949 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3950 	    &mpt->recovery_thread, /*flags*/0,
3951 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3952 	return (error);
3953 }
3954 
3955 static void
3956 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3957 {
3958 	if (mpt->recovery_thread == NULL) {
3959 		return;
3960 	}
3961 	mpt->shutdwn_recovery = 1;
3962 	wakeup(mpt);
3963 	/*
3964 	 * Sleep on a slightly different location
3965 	 * for this interlock just for added safety.
3966 	 */
3967 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3968 }
3969 
3970 static void
3971 mpt_recovery_thread(void *arg)
3972 {
3973 	struct mpt_softc *mpt;
3974 
3975 	mpt = (struct mpt_softc *)arg;
3976 	MPT_LOCK(mpt);
3977 	for (;;) {
3978 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3979 			if (mpt->shutdwn_recovery == 0) {
3980 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3981 			}
3982 		}
3983 		if (mpt->shutdwn_recovery != 0) {
3984 			break;
3985 		}
3986 		mpt_recover_commands(mpt);
3987 	}
3988 	mpt->recovery_thread = NULL;
3989 	wakeup(&mpt->recovery_thread);
3990 	MPT_UNLOCK(mpt);
3991 	kproc_exit(0);
3992 }
3993 
3994 static int
3995 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3996     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3997 {
3998 	MSG_SCSI_TASK_MGMT *tmf_req;
3999 	int		    error;
4000 
4001 	/*
4002 	 * Wait for any current TMF request to complete.
4003 	 * We're only allowed to issue one TMF at a time.
4004 	 */
4005 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
4006 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
4007 	if (error != 0) {
4008 		mpt_reset(mpt, TRUE);
4009 		return (ETIMEDOUT);
4010 	}
4011 
4012 	mpt_assign_serno(mpt, mpt->tmf_req);
4013 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
4014 
4015 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
4016 	memset(tmf_req, 0, sizeof(*tmf_req));
4017 	tmf_req->TargetID = target;
4018 	tmf_req->Bus = channel;
4019 	tmf_req->ChainOffset = 0;
4020 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4021 	tmf_req->Reserved = 0;
4022 	tmf_req->TaskType = type;
4023 	tmf_req->Reserved1 = 0;
4024 	tmf_req->MsgFlags = flags;
4025 	tmf_req->MsgContext =
4026 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
4027 	memset(&tmf_req->LUN, 0,
4028 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
4029 	if (lun > 256) {
4030 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4031 		tmf_req->LUN[1] = lun & 0xff;
4032 	} else {
4033 		tmf_req->LUN[1] = lun;
4034 	}
4035 	tmf_req->TaskMsgContext = abort_ctx;
4036 
4037 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4038 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
4039 	    mpt->tmf_req->serno, tmf_req->MsgContext);
4040 	if (mpt->verbose > MPT_PRT_DEBUG) {
4041 		mpt_print_request(tmf_req);
4042 	}
4043 
4044 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4045 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4046 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4047 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4048 	if (error != MPT_OK) {
4049 		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4050 		mpt->tmf_req->state = REQ_STATE_FREE;
4051 		mpt_reset(mpt, TRUE);
4052 	}
4053 	return (error);
4054 }
4055 
4056 /*
4057  * When a command times out, it is placed on the requeust_timeout_list
4058  * and we wake our recovery thread.  The MPT-Fusion architecture supports
4059  * only a single TMF operation at a time, so we serially abort/bdr, etc,
4060  * the timedout transactions.  The next TMF is issued either by the
4061  * completion handler of the current TMF waking our recovery thread,
4062  * or the TMF timeout handler causing a hard reset sequence.
4063  */
4064 static void
4065 mpt_recover_commands(struct mpt_softc *mpt)
4066 {
4067 	request_t	   *req;
4068 	union ccb	   *ccb;
4069 	int		    error;
4070 
4071 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4072 		/*
4073 		 * No work to do- leave.
4074 		 */
4075 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4076 		return;
4077 	}
4078 
4079 	/*
4080 	 * Flush any commands whose completion coincides with their timeout.
4081 	 */
4082 	mpt_intr(mpt);
4083 
4084 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4085 		/*
4086 		 * The timedout commands have already
4087 		 * completed.  This typically means
4088 		 * that either the timeout value was on
4089 		 * the hairy edge of what the device
4090 		 * requires or - more likely - interrupts
4091 		 * are not happening.
4092 		 */
4093 		mpt_prt(mpt, "Timedout requests already complete. "
4094 		    "Interrupts may not be functioning.\n");
4095 		mpt_enable_ints(mpt);
4096 		return;
4097 	}
4098 
4099 	/*
4100 	 * We have no visibility into the current state of the
4101 	 * controller, so attempt to abort the commands in the
4102 	 * order they timed-out. For initiator commands, we
4103 	 * depend on the reply handler pulling requests off
4104 	 * the timeout list.
4105 	 */
4106 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4107 		uint16_t status;
4108 		uint8_t response;
4109 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4110 
4111 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4112 		    req, req->serno, hdrp->Function);
4113 		ccb = req->ccb;
4114 		if (ccb == NULL) {
4115 			mpt_prt(mpt, "null ccb in timed out request. "
4116 			    "Resetting Controller.\n");
4117 			mpt_reset(mpt, TRUE);
4118 			continue;
4119 		}
4120 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4121 
4122 		/*
4123 		 * Check to see if this is not an initiator command and
4124 		 * deal with it differently if it is.
4125 		 */
4126 		switch (hdrp->Function) {
4127 		case MPI_FUNCTION_SCSI_IO_REQUEST:
4128 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4129 			break;
4130 		default:
4131 			/*
4132 			 * XXX: FIX ME: need to abort target assists...
4133 			 */
4134 			mpt_prt(mpt, "just putting it back on the pend q\n");
4135 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4136 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4137 			    links);
4138 			continue;
4139 		}
4140 
4141 		error = mpt_scsi_send_tmf(mpt,
4142 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4143 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4144 		    htole32(req->index | scsi_io_handler_id), TRUE);
4145 
4146 		if (error != 0) {
4147 			/*
4148 			 * mpt_scsi_send_tmf hard resets on failure, so no
4149 			 * need to do so here.  Our queue should be emptied
4150 			 * by the hard reset.
4151 			 */
4152 			continue;
4153 		}
4154 
4155 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4156 		    REQ_STATE_DONE, TRUE, 500);
4157 
4158 		status = mpt->tmf_req->IOCStatus;
4159 		response = mpt->tmf_req->ResponseCode;
4160 		mpt->tmf_req->state = REQ_STATE_FREE;
4161 
4162 		if (error != 0) {
4163 			/*
4164 			 * If we've errored out,, reset the controller.
4165 			 */
4166 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4167 			    "Resetting controller\n");
4168 			mpt_reset(mpt, TRUE);
4169 			continue;
4170 		}
4171 
4172 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4173 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4174 			    "Resetting controller.\n", status);
4175 			mpt_reset(mpt, TRUE);
4176 			continue;
4177 		}
4178 
4179 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4180 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4181 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4182 			    "Resetting controller.\n", response);
4183 			mpt_reset(mpt, TRUE);
4184 			continue;
4185 		}
4186 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4187 	}
4188 }
4189 
4190 /************************ Target Mode Support ****************************/
4191 static void
4192 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4193 {
4194 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4195 	PTR_SGE_TRANSACTION32 tep;
4196 	PTR_SGE_SIMPLE32 se;
4197 	bus_addr_t paddr;
4198 	uint32_t fl;
4199 
4200 	paddr = req->req_pbuf;
4201 	paddr += MPT_RQSL(mpt);
4202 
4203 	fc = req->req_vbuf;
4204 	memset(fc, 0, MPT_REQUEST_AREA);
4205 	fc->BufferCount = 1;
4206 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4207 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
4208 
4209 	/*
4210 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
4211 	 * consist of a TE SGL element (with details length of zero)
4212 	 * followe by a SIMPLE SGL element which holds the address
4213 	 * of the buffer.
4214 	 */
4215 
4216 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4217 
4218 	tep->ContextSize = 4;
4219 	tep->Flags = 0;
4220 	tep->TransactionContext[0] = htole32(ioindex);
4221 
4222 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4223 	fl =
4224 		MPI_SGE_FLAGS_HOST_TO_IOC	|
4225 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4226 		MPI_SGE_FLAGS_LAST_ELEMENT	|
4227 		MPI_SGE_FLAGS_END_OF_LIST	|
4228 		MPI_SGE_FLAGS_END_OF_BUFFER;
4229 	fl <<= MPI_SGE_FLAGS_SHIFT;
4230 	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4231 	se->FlagsLength = htole32(fl);
4232 	se->Address = htole32((uint32_t) paddr);
4233 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4234 	    "add ELS index %d ioindex %d for %p:%u\n",
4235 	    req->index, ioindex, req, req->serno);
4236 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4237 	    ("mpt_fc_post_els: request not locked"));
4238 	mpt_send_cmd(mpt, req);
4239 }
4240 
4241 static void
4242 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4243 {
4244 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4245 	PTR_CMD_BUFFER_DESCRIPTOR cb;
4246 	bus_addr_t paddr;
4247 
4248 	paddr = req->req_pbuf;
4249 	paddr += MPT_RQSL(mpt);
4250 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4251 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4252 
4253 	fc = req->req_vbuf;
4254 	fc->BufferCount = 1;
4255 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4256 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4257 
4258 	cb = &fc->Buffer[0];
4259 	cb->IoIndex = htole16(ioindex);
4260 	cb->u.PhysicalAddress32 = htole32((U32) paddr);
4261 
4262 	mpt_check_doorbell(mpt);
4263 	mpt_send_cmd(mpt, req);
4264 }
4265 
4266 static int
4267 mpt_add_els_buffers(struct mpt_softc *mpt)
4268 {
4269 	int i;
4270 
4271 	if (mpt->is_fc == 0) {
4272 		return (TRUE);
4273 	}
4274 
4275 	if (mpt->els_cmds_allocated) {
4276 		return (TRUE);
4277 	}
4278 
4279 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4280 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4281 
4282 	if (mpt->els_cmd_ptrs == NULL) {
4283 		return (FALSE);
4284 	}
4285 
4286 	/*
4287 	 * Feed the chip some ELS buffer resources
4288 	 */
4289 	for (i = 0; i < MPT_MAX_ELS; i++) {
4290 		request_t *req = mpt_get_request(mpt, FALSE);
4291 		if (req == NULL) {
4292 			break;
4293 		}
4294 		req->state |= REQ_STATE_LOCKED;
4295 		mpt->els_cmd_ptrs[i] = req;
4296 		mpt_fc_post_els(mpt, req, i);
4297 	}
4298 
4299 	if (i == 0) {
4300 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
4301 		free(mpt->els_cmd_ptrs, M_DEVBUF);
4302 		mpt->els_cmd_ptrs = NULL;
4303 		return (FALSE);
4304 	}
4305 	if (i != MPT_MAX_ELS) {
4306 		mpt_lprt(mpt, MPT_PRT_INFO,
4307 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4308 	}
4309 	mpt->els_cmds_allocated = i;
4310 	return(TRUE);
4311 }
4312 
4313 static int
4314 mpt_add_target_commands(struct mpt_softc *mpt)
4315 {
4316 	int i, max;
4317 
4318 	if (mpt->tgt_cmd_ptrs) {
4319 		return (TRUE);
4320 	}
4321 
4322 	max = MPT_MAX_REQUESTS(mpt) >> 1;
4323 	if (max > mpt->mpt_max_tgtcmds) {
4324 		max = mpt->mpt_max_tgtcmds;
4325 	}
4326 	mpt->tgt_cmd_ptrs =
4327 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4328 	if (mpt->tgt_cmd_ptrs == NULL) {
4329 		mpt_prt(mpt,
4330 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4331 		return (FALSE);
4332 	}
4333 
4334 	for (i = 0; i < max; i++) {
4335 		request_t *req;
4336 
4337 		req = mpt_get_request(mpt, FALSE);
4338 		if (req == NULL) {
4339 			break;
4340 		}
4341 		req->state |= REQ_STATE_LOCKED;
4342 		mpt->tgt_cmd_ptrs[i] = req;
4343 		mpt_post_target_command(mpt, req, i);
4344 	}
4345 
4346 
4347 	if (i == 0) {
4348 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4349 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4350 		mpt->tgt_cmd_ptrs = NULL;
4351 		return (FALSE);
4352 	}
4353 
4354 	mpt->tgt_cmds_allocated = i;
4355 
4356 	if (i < max) {
4357 		mpt_lprt(mpt, MPT_PRT_INFO,
4358 		    "added %d of %d target bufs\n", i, max);
4359 	}
4360 	return (i);
4361 }
4362 
4363 static int
4364 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4365 {
4366 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4367 		mpt->twildcard = 1;
4368 	} else if (lun >= MPT_MAX_LUNS) {
4369 		return (EINVAL);
4370 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4371 		return (EINVAL);
4372 	}
4373 	if (mpt->tenabled == 0) {
4374 		if (mpt->is_fc) {
4375 			(void) mpt_fc_reset_link(mpt, 0);
4376 		}
4377 		mpt->tenabled = 1;
4378 	}
4379 	if (lun == CAM_LUN_WILDCARD) {
4380 		mpt->trt_wildcard.enabled = 1;
4381 	} else {
4382 		mpt->trt[lun].enabled = 1;
4383 	}
4384 	return (0);
4385 }
4386 
4387 static int
4388 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4389 {
4390 	int i;
4391 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4392 		mpt->twildcard = 0;
4393 	} else if (lun >= MPT_MAX_LUNS) {
4394 		return (EINVAL);
4395 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4396 		return (EINVAL);
4397 	}
4398 	if (lun == CAM_LUN_WILDCARD) {
4399 		mpt->trt_wildcard.enabled = 0;
4400 	} else {
4401 		mpt->trt[lun].enabled = 0;
4402 	}
4403 	for (i = 0; i < MPT_MAX_LUNS; i++) {
4404 		if (mpt->trt[lun].enabled) {
4405 			break;
4406 		}
4407 	}
4408 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4409 		if (mpt->is_fc) {
4410 			(void) mpt_fc_reset_link(mpt, 0);
4411 		}
4412 		mpt->tenabled = 0;
4413 	}
4414 	return (0);
4415 }
4416 
4417 /*
4418  * Called with MPT lock held
4419  */
4420 static void
4421 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4422 {
4423 	struct ccb_scsiio *csio = &ccb->csio;
4424 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4425 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4426 
4427 	switch (tgt->state) {
4428 	case TGT_STATE_IN_CAM:
4429 		break;
4430 	case TGT_STATE_MOVING_DATA:
4431 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4432 		xpt_freeze_simq(mpt->sim, 1);
4433 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4434 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4435 		MPTLOCK_2_CAMLOCK(mpt);
4436 		xpt_done(ccb);
4437 		CAMLOCK_2_MPTLOCK(mpt);
4438 		return;
4439 	default:
4440 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4441 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4442 		mpt_tgt_dump_req_state(mpt, cmd_req);
4443 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4444 		MPTLOCK_2_CAMLOCK(mpt);
4445 		xpt_done(ccb);
4446 		CAMLOCK_2_MPTLOCK(mpt);
4447 		return;
4448 	}
4449 
4450 	if (csio->dxfer_len) {
4451 		bus_dmamap_callback_t *cb;
4452 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4453 		request_t *req;
4454 
4455 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4456 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
4457 
4458 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4459 			if (mpt->outofbeer == 0) {
4460 				mpt->outofbeer = 1;
4461 				xpt_freeze_simq(mpt->sim, 1);
4462 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4463 			}
4464 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4465 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4466 			MPTLOCK_2_CAMLOCK(mpt);
4467 			xpt_done(ccb);
4468 			CAMLOCK_2_MPTLOCK(mpt);
4469 			return;
4470 		}
4471 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4472 		if (sizeof (bus_addr_t) > 4) {
4473 			cb = mpt_execute_req_a64;
4474 		} else {
4475 			cb = mpt_execute_req;
4476 		}
4477 
4478 		req->ccb = ccb;
4479 		ccb->ccb_h.ccb_req_ptr = req;
4480 
4481 		/*
4482 		 * Record the currently active ccb and the
4483 		 * request for it in our target state area.
4484 		 */
4485 		tgt->ccb = ccb;
4486 		tgt->req = req;
4487 
4488 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4489 		ta = req->req_vbuf;
4490 
4491 		if (mpt->is_sas) {
4492 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4493 			     cmd_req->req_vbuf;
4494 			ta->QueueTag = ssp->InitiatorTag;
4495 		} else if (mpt->is_spi) {
4496 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4497 			     cmd_req->req_vbuf;
4498 			ta->QueueTag = sp->Tag;
4499 		}
4500 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4501 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4502 		ta->ReplyWord = htole32(tgt->reply_desc);
4503 		if (csio->ccb_h.target_lun > 256) {
4504 			ta->LUN[0] =
4505 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4506 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4507 		} else {
4508 			ta->LUN[1] = csio->ccb_h.target_lun;
4509 		}
4510 
4511 		ta->RelativeOffset = tgt->bytes_xfered;
4512 		ta->DataLength = ccb->csio.dxfer_len;
4513 		if (ta->DataLength > tgt->resid) {
4514 			ta->DataLength = tgt->resid;
4515 		}
4516 
4517 		/*
4518 		 * XXX Should be done after data transfer completes?
4519 		 */
4520 		tgt->resid -= csio->dxfer_len;
4521 		tgt->bytes_xfered += csio->dxfer_len;
4522 
4523 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4524 			ta->TargetAssistFlags |=
4525 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4526 		}
4527 
4528 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4529 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4530 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4531 			ta->TargetAssistFlags |=
4532 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4533 		}
4534 #endif
4535 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4536 
4537 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4538 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4539 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4540 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4541 
4542 		MPTLOCK_2_CAMLOCK(mpt);
4543 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4544 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4545 				int error;
4546 				int s = splsoftvm();
4547 				error = bus_dmamap_load(mpt->buffer_dmat,
4548 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4549 				    cb, req, 0);
4550 				splx(s);
4551 				if (error == EINPROGRESS) {
4552 					xpt_freeze_simq(mpt->sim, 1);
4553 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4554 				}
4555 			} else {
4556 				/*
4557 				 * We have been given a pointer to single
4558 				 * physical buffer.
4559 				 */
4560 				struct bus_dma_segment seg;
4561 				seg.ds_addr = (bus_addr_t)
4562 				    (vm_offset_t)csio->data_ptr;
4563 				seg.ds_len = csio->dxfer_len;
4564 				(*cb)(req, &seg, 1, 0);
4565 			}
4566 		} else {
4567 			/*
4568 			 * We have been given a list of addresses.
4569 			 * This case could be easily supported but they are not
4570 			 * currently generated by the CAM subsystem so there
4571 			 * is no point in wasting the time right now.
4572 			 */
4573 			struct bus_dma_segment *sgs;
4574 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4575 				(*cb)(req, NULL, 0, EFAULT);
4576 			} else {
4577 				/* Just use the segments provided */
4578 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4579 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4580 			}
4581 		}
4582 		CAMLOCK_2_MPTLOCK(mpt);
4583 	} else {
4584 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4585 
4586 		/*
4587 		 * XXX: I don't know why this seems to happen, but
4588 		 * XXX: completing the CCB seems to make things happy.
4589 		 * XXX: This seems to happen if the initiator requests
4590 		 * XXX: enough data that we have to do multiple CTIOs.
4591 		 */
4592 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4593 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4594 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4595 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4596 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4597 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4598 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4599 			MPTLOCK_2_CAMLOCK(mpt);
4600 			xpt_done(ccb);
4601 			CAMLOCK_2_MPTLOCK(mpt);
4602 			return;
4603 		}
4604 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4605 			sp = sense;
4606 			memcpy(sp, &csio->sense_data,
4607 			   min(csio->sense_len, MPT_SENSE_SIZE));
4608 		}
4609 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4610 	}
4611 }
4612 
4613 static void
4614 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4615     uint32_t lun, int send, uint8_t *data, size_t length)
4616 {
4617 	mpt_tgt_state_t *tgt;
4618 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4619 	SGE_SIMPLE32 *se;
4620 	uint32_t flags;
4621 	uint8_t *dptr;
4622 	bus_addr_t pptr;
4623 	request_t *req;
4624 
4625 	/*
4626 	 * We enter with resid set to the data load for the command.
4627 	 */
4628 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4629 	if (length == 0 || tgt->resid == 0) {
4630 		tgt->resid = 0;
4631 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4632 		return;
4633 	}
4634 
4635 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4636 		mpt_prt(mpt, "out of resources- dropping local response\n");
4637 		return;
4638 	}
4639 	tgt->is_local = 1;
4640 
4641 
4642 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4643 	ta = req->req_vbuf;
4644 
4645 	if (mpt->is_sas) {
4646 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4647 		ta->QueueTag = ssp->InitiatorTag;
4648 	} else if (mpt->is_spi) {
4649 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4650 		ta->QueueTag = sp->Tag;
4651 	}
4652 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4653 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4654 	ta->ReplyWord = htole32(tgt->reply_desc);
4655 	if (lun > 256) {
4656 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4657 		ta->LUN[1] = lun & 0xff;
4658 	} else {
4659 		ta->LUN[1] = lun;
4660 	}
4661 	ta->RelativeOffset = 0;
4662 	ta->DataLength = length;
4663 
4664 	dptr = req->req_vbuf;
4665 	dptr += MPT_RQSL(mpt);
4666 	pptr = req->req_pbuf;
4667 	pptr += MPT_RQSL(mpt);
4668 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4669 
4670 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4671 	memset(se, 0,sizeof (*se));
4672 
4673 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4674 	if (send) {
4675 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4676 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4677 	}
4678 	se->Address = pptr;
4679 	MPI_pSGE_SET_LENGTH(se, length);
4680 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4681 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4682 	MPI_pSGE_SET_FLAGS(se, flags);
4683 
4684 	tgt->ccb = NULL;
4685 	tgt->req = req;
4686 	tgt->resid -= length;
4687 	tgt->bytes_xfered = length;
4688 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4689 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4690 #else
4691 	tgt->state = TGT_STATE_MOVING_DATA;
4692 #endif
4693 	mpt_send_cmd(mpt, req);
4694 }
4695 
4696 /*
4697  * Abort queued up CCBs
4698  */
4699 static cam_status
4700 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4701 {
4702 	struct mpt_hdr_stailq *lp;
4703 	struct ccb_hdr *srch;
4704 	int found = 0;
4705 	union ccb *accb = ccb->cab.abort_ccb;
4706 	tgt_resource_t *trtp;
4707 
4708 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4709 
4710 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4711 		trtp = &mpt->trt_wildcard;
4712 	} else {
4713 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4714 	}
4715 
4716 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4717 		lp = &trtp->atios;
4718 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4719 		lp = &trtp->inots;
4720 	} else {
4721 		return (CAM_REQ_INVALID);
4722 	}
4723 
4724 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4725 		if (srch == &accb->ccb_h) {
4726 			found = 1;
4727 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4728 			break;
4729 		}
4730 	}
4731 	if (found) {
4732 		accb->ccb_h.status = CAM_REQ_ABORTED;
4733 		xpt_done(accb);
4734 		return (CAM_REQ_CMP);
4735 	}
4736 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4737 	return (CAM_PATH_INVALID);
4738 }
4739 
4740 /*
4741  * Ask the MPT to abort the current target command
4742  */
4743 static int
4744 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4745 {
4746 	int error;
4747 	request_t *req;
4748 	PTR_MSG_TARGET_MODE_ABORT abtp;
4749 
4750 	req = mpt_get_request(mpt, FALSE);
4751 	if (req == NULL) {
4752 		return (-1);
4753 	}
4754 	abtp = req->req_vbuf;
4755 	memset(abtp, 0, sizeof (*abtp));
4756 
4757 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4758 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4759 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4760 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4761 	error = 0;
4762 	if (mpt->is_fc || mpt->is_sas) {
4763 		mpt_send_cmd(mpt, req);
4764 	} else {
4765 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4766 	}
4767 	return (error);
4768 }
4769 
4770 /*
4771  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4772  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4773  * FC929 to set bogus FC_RSP fields (nonzero residuals
4774  * but w/o RESID fields set). This causes QLogic initiators
4775  * to think maybe that a frame was lost.
4776  *
4777  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4778  * we use allocated requests to do TARGET_ASSIST and we
4779  * need to know when to release them.
4780  */
4781 
4782 static void
4783 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4784     uint8_t status, uint8_t const *sense_data)
4785 {
4786 	uint8_t *cmd_vbuf;
4787 	mpt_tgt_state_t *tgt;
4788 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4789 	request_t *req;
4790 	bus_addr_t paddr;
4791 	int resplen = 0;
4792 	uint32_t fl;
4793 
4794 	cmd_vbuf = cmd_req->req_vbuf;
4795 	cmd_vbuf += MPT_RQSL(mpt);
4796 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4797 
4798 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4799 		if (mpt->outofbeer == 0) {
4800 			mpt->outofbeer = 1;
4801 			xpt_freeze_simq(mpt->sim, 1);
4802 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4803 		}
4804 		if (ccb) {
4805 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4806 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4807 			MPTLOCK_2_CAMLOCK(mpt);
4808 			xpt_done(ccb);
4809 			CAMLOCK_2_MPTLOCK(mpt);
4810 		} else {
4811 			mpt_prt(mpt,
4812 			    "could not allocate status request- dropping\n");
4813 		}
4814 		return;
4815 	}
4816 	req->ccb = ccb;
4817 	if (ccb) {
4818 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4819 		ccb->ccb_h.ccb_req_ptr = req;
4820 	}
4821 
4822 	/*
4823 	 * Record the currently active ccb, if any, and the
4824 	 * request for it in our target state area.
4825 	 */
4826 	tgt->ccb = ccb;
4827 	tgt->req = req;
4828 	tgt->state = TGT_STATE_SENDING_STATUS;
4829 
4830 	tp = req->req_vbuf;
4831 	paddr = req->req_pbuf;
4832 	paddr += MPT_RQSL(mpt);
4833 
4834 	memset(tp, 0, sizeof (*tp));
4835 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4836 	if (mpt->is_fc) {
4837 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4838 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4839 		uint8_t *sts_vbuf;
4840 		uint32_t *rsp;
4841 
4842 		sts_vbuf = req->req_vbuf;
4843 		sts_vbuf += MPT_RQSL(mpt);
4844 		rsp = (uint32_t *) sts_vbuf;
4845 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4846 
4847 		/*
4848 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4849 		 * It has to be big-endian in memory and is organized
4850 		 * in 32 bit words, which are much easier to deal with
4851 		 * as words which are swizzled as needed.
4852 		 *
4853 		 * All we're filling here is the FC_RSP payload.
4854 		 * We may just have the chip synthesize it if
4855 		 * we have no residual and an OK status.
4856 		 *
4857 		 */
4858 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4859 
4860 		rsp[2] = status;
4861 		if (tgt->resid) {
4862 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4863 			rsp[3] = htobe32(tgt->resid);
4864 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4865 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4866 #endif
4867 		}
4868 		if (status == SCSI_STATUS_CHECK_COND) {
4869 			int i;
4870 
4871 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4872 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4873 			if (sense_data) {
4874 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4875 			} else {
4876 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4877 				    "TION but no sense data?\n");
4878 				memset(&rsp, 0, MPT_SENSE_SIZE);
4879 			}
4880 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4881 				rsp[i] = htobe32(rsp[i]);
4882 			}
4883 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4884 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4885 #endif
4886 		}
4887 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4888 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4889 #endif
4890 		rsp[2] = htobe32(rsp[2]);
4891 	} else if (mpt->is_sas) {
4892 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4893 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4894 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4895 	} else {
4896 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4897 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4898 		tp->StatusCode = status;
4899 		tp->QueueTag = htole16(sp->Tag);
4900 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4901 	}
4902 
4903 	tp->ReplyWord = htole32(tgt->reply_desc);
4904 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4905 
4906 #ifdef	WE_CAN_USE_AUTO_REPOST
4907 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4908 #endif
4909 	if (status == SCSI_STATUS_OK && resplen == 0) {
4910 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4911 	} else {
4912 		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4913 		fl =
4914 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4915 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4916 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4917 			MPI_SGE_FLAGS_END_OF_LIST	|
4918 			MPI_SGE_FLAGS_END_OF_BUFFER;
4919 		fl <<= MPI_SGE_FLAGS_SHIFT;
4920 		fl |= resplen;
4921 		tp->StatusDataSGE.FlagsLength = htole32(fl);
4922 	}
4923 
4924 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4925 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4926 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4927 	    req->serno, tgt->resid);
4928 	if (ccb) {
4929 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4930 		mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
4931 	}
4932 	mpt_send_cmd(mpt, req);
4933 }
4934 
4935 static void
4936 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4937     tgt_resource_t *trtp, int init_id)
4938 {
4939 	struct ccb_immed_notify *inot;
4940 	mpt_tgt_state_t *tgt;
4941 
4942 	tgt = MPT_TGT_STATE(mpt, req);
4943 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4944 	if (inot == NULL) {
4945 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4946 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4947 		return;
4948 	}
4949 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4950 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4951 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4952 
4953 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4954 	inot->sense_len = 0;
4955 	memset(inot->message_args, 0, sizeof (inot->message_args));
4956 	inot->initiator_id = init_id;	/* XXX */
4957 
4958 	/*
4959 	 * This is a somewhat grotesque attempt to map from task management
4960 	 * to old style SCSI messages. God help us all.
4961 	 */
4962 	switch (fc) {
4963 	case MPT_ABORT_TASK_SET:
4964 		inot->message_args[0] = MSG_ABORT_TAG;
4965 		break;
4966 	case MPT_CLEAR_TASK_SET:
4967 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4968 		break;
4969 	case MPT_TARGET_RESET:
4970 		inot->message_args[0] = MSG_TARGET_RESET;
4971 		break;
4972 	case MPT_CLEAR_ACA:
4973 		inot->message_args[0] = MSG_CLEAR_ACA;
4974 		break;
4975 	case MPT_TERMINATE_TASK:
4976 		inot->message_args[0] = MSG_ABORT_TAG;
4977 		break;
4978 	default:
4979 		inot->message_args[0] = MSG_NOOP;
4980 		break;
4981 	}
4982 	tgt->ccb = (union ccb *) inot;
4983 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4984 	MPTLOCK_2_CAMLOCK(mpt);
4985 	xpt_done((union ccb *)inot);
4986 	CAMLOCK_2_MPTLOCK(mpt);
4987 }
4988 
4989 static void
4990 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4991 {
4992 	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4993 	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4994 	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
4995 	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
4996 	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
4997 	     '0',  '0',  '0',  '1'
4998 	};
4999 	struct ccb_accept_tio *atiop;
5000 	lun_id_t lun;
5001 	int tag_action = 0;
5002 	mpt_tgt_state_t *tgt;
5003 	tgt_resource_t *trtp = NULL;
5004 	U8 *lunptr;
5005 	U8 *vbuf;
5006 	U16 itag;
5007 	U16 ioindex;
5008 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
5009 	uint8_t *cdbp;
5010 
5011 	/*
5012 	 * First, DMA sync the received command-
5013 	 * which is in the *request* * phys area.
5014 	 *
5015 	 * XXX: We could optimize this for a range
5016 	 */
5017 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
5018 	    BUS_DMASYNC_POSTREAD);
5019 
5020 	/*
5021 	 * Stash info for the current command where we can get at it later.
5022 	 */
5023 	vbuf = req->req_vbuf;
5024 	vbuf += MPT_RQSL(mpt);
5025 
5026 	/*
5027 	 * Get our state pointer set up.
5028 	 */
5029 	tgt = MPT_TGT_STATE(mpt, req);
5030 	if (tgt->state != TGT_STATE_LOADED) {
5031 		mpt_tgt_dump_req_state(mpt, req);
5032 		panic("bad target state in mpt_scsi_tgt_atio");
5033 	}
5034 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
5035 	tgt->state = TGT_STATE_IN_CAM;
5036 	tgt->reply_desc = reply_desc;
5037 	ioindex = GET_IO_INDEX(reply_desc);
5038 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5039 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
5040 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
5041 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
5042 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
5043 	}
5044 	if (mpt->is_fc) {
5045 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
5046 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
5047 		if (fc->FcpCntl[2]) {
5048 			/*
5049 			 * Task Management Request
5050 			 */
5051 			switch (fc->FcpCntl[2]) {
5052 			case 0x2:
5053 				fct = MPT_ABORT_TASK_SET;
5054 				break;
5055 			case 0x4:
5056 				fct = MPT_CLEAR_TASK_SET;
5057 				break;
5058 			case 0x20:
5059 				fct = MPT_TARGET_RESET;
5060 				break;
5061 			case 0x40:
5062 				fct = MPT_CLEAR_ACA;
5063 				break;
5064 			case 0x80:
5065 				fct = MPT_TERMINATE_TASK;
5066 				break;
5067 			default:
5068 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
5069 				    fc->FcpCntl[2]);
5070 				mpt_scsi_tgt_status(mpt, 0, req,
5071 				    SCSI_STATUS_OK, 0);
5072 				return;
5073 			}
5074 		} else {
5075 			switch (fc->FcpCntl[1]) {
5076 			case 0:
5077 				tag_action = MSG_SIMPLE_Q_TAG;
5078 				break;
5079 			case 1:
5080 				tag_action = MSG_HEAD_OF_Q_TAG;
5081 				break;
5082 			case 2:
5083 				tag_action = MSG_ORDERED_Q_TAG;
5084 				break;
5085 			default:
5086 				/*
5087 				 * Bah. Ignore Untagged Queing and ACA
5088 				 */
5089 				tag_action = MSG_SIMPLE_Q_TAG;
5090 				break;
5091 			}
5092 		}
5093 		tgt->resid = be32toh(fc->FcpDl);
5094 		cdbp = fc->FcpCdb;
5095 		lunptr = fc->FcpLun;
5096 		itag = be16toh(fc->OptionalOxid);
5097 	} else if (mpt->is_sas) {
5098 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5099 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5100 		cdbp = ssp->CDB;
5101 		lunptr = ssp->LogicalUnitNumber;
5102 		itag = ssp->InitiatorTag;
5103 	} else {
5104 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5105 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5106 		cdbp = sp->CDB;
5107 		lunptr = sp->LogicalUnitNumber;
5108 		itag = sp->Tag;
5109 	}
5110 
5111 	/*
5112 	 * Generate a simple lun
5113 	 */
5114 	switch (lunptr[0] & 0xc0) {
5115 	case 0x40:
5116 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5117 		break;
5118 	case 0:
5119 		lun = lunptr[1];
5120 		break;
5121 	default:
5122 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5123 		lun = 0xffff;
5124 		break;
5125 	}
5126 
5127 	/*
5128 	 * Deal with non-enabled or bad luns here.
5129 	 */
5130 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5131 	    mpt->trt[lun].enabled == 0) {
5132 		if (mpt->twildcard) {
5133 			trtp = &mpt->trt_wildcard;
5134 		} else if (fct == MPT_NIL_TMT_VALUE) {
5135 			/*
5136 			 * In this case, we haven't got an upstream listener
5137 			 * for either a specific lun or wildcard luns. We
5138 			 * have to make some sensible response. For regular
5139 			 * inquiry, just return some NOT HERE inquiry data.
5140 			 * For VPD inquiry, report illegal field in cdb.
5141 			 * For REQUEST SENSE, just return NO SENSE data.
5142 			 * REPORT LUNS gets illegal command.
5143 			 * All other commands get 'no such device'.
5144 			 */
5145 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5146 			size_t len;
5147 
5148 			memset(buf, 0, MPT_SENSE_SIZE);
5149 			cond = SCSI_STATUS_CHECK_COND;
5150 			buf[0] = 0xf0;
5151 			buf[2] = 0x5;
5152 			buf[7] = 0x8;
5153 			sp = buf;
5154 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5155 
5156 			switch (cdbp[0]) {
5157 			case INQUIRY:
5158 			{
5159 				if (cdbp[1] != 0) {
5160 					buf[12] = 0x26;
5161 					buf[13] = 0x01;
5162 					break;
5163 				}
5164 				len = min(tgt->resid, cdbp[4]);
5165 				len = min(len, sizeof (null_iqd));
5166 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5167 				    "local inquiry %ld bytes\n", (long) len);
5168 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5169 				    null_iqd, len);
5170 				return;
5171 			}
5172 			case REQUEST_SENSE:
5173 			{
5174 				buf[2] = 0x0;
5175 				len = min(tgt->resid, cdbp[4]);
5176 				len = min(len, sizeof (buf));
5177 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5178 				    "local reqsense %ld bytes\n", (long) len);
5179 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5180 				    buf, len);
5181 				return;
5182 			}
5183 			case REPORT_LUNS:
5184 				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5185 				buf[12] = 0x26;
5186 				return;
5187 			default:
5188 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5189 				    "CMD 0x%x to unmanaged lun %u\n",
5190 				    cdbp[0], lun);
5191 				buf[12] = 0x25;
5192 				break;
5193 			}
5194 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5195 			return;
5196 		}
5197 		/* otherwise, leave trtp NULL */
5198 	} else {
5199 		trtp = &mpt->trt[lun];
5200 	}
5201 
5202 	/*
5203 	 * Deal with any task management
5204 	 */
5205 	if (fct != MPT_NIL_TMT_VALUE) {
5206 		if (trtp == NULL) {
5207 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
5208 			    fct);
5209 			mpt_scsi_tgt_status(mpt, 0, req,
5210 			    SCSI_STATUS_OK, 0);
5211 		} else {
5212 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5213 			    GET_INITIATOR_INDEX(reply_desc));
5214 		}
5215 		return;
5216 	}
5217 
5218 
5219 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5220 	if (atiop == NULL) {
5221 		mpt_lprt(mpt, MPT_PRT_WARN,
5222 		    "no ATIOs for lun %u- sending back %s\n", lun,
5223 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
5224 		mpt_scsi_tgt_status(mpt, NULL, req,
5225 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5226 		    NULL);
5227 		return;
5228 	}
5229 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5230 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5231 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5232 	atiop->ccb_h.ccb_mpt_ptr = mpt;
5233 	atiop->ccb_h.status = CAM_CDB_RECVD;
5234 	atiop->ccb_h.target_lun = lun;
5235 	atiop->sense_len = 0;
5236 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5237 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5238 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5239 
5240 	/*
5241 	 * The tag we construct here allows us to find the
5242 	 * original request that the command came in with.
5243 	 *
5244 	 * This way we don't have to depend on anything but the
5245 	 * tag to find things when CCBs show back up from CAM.
5246 	 */
5247 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5248 	tgt->tag_id = atiop->tag_id;
5249 	if (tag_action) {
5250 		atiop->tag_action = tag_action;
5251 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5252 	}
5253 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5254 		int i;
5255 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5256 		    atiop->ccb_h.target_lun);
5257 		for (i = 0; i < atiop->cdb_len; i++) {
5258 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5259 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
5260 		}
5261 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5262 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5263 	}
5264 
5265 	MPTLOCK_2_CAMLOCK(mpt);
5266 	xpt_done((union ccb *)atiop);
5267 	CAMLOCK_2_MPTLOCK(mpt);
5268 }
5269 
5270 static void
5271 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5272 {
5273 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5274 
5275 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5276 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5277 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5278 	    tgt->tag_id, tgt->state);
5279 }
5280 
5281 static void
5282 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5283 {
5284 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5285 	    req->index, req->index, req->state);
5286 	mpt_tgt_dump_tgt_state(mpt, req);
5287 }
5288 
5289 static int
5290 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5291     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5292 {
5293 	int dbg;
5294 	union ccb *ccb;
5295 	U16 status;
5296 
5297 	if (reply_frame == NULL) {
5298 		/*
5299 		 * Figure out what the state of the command is.
5300 		 */
5301 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5302 
5303 #ifdef	INVARIANTS
5304 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5305 		if (tgt->req) {
5306 			mpt_req_not_spcl(mpt, tgt->req,
5307 			    "turbo scsi_tgt_reply associated req", __LINE__);
5308 		}
5309 #endif
5310 		switch(tgt->state) {
5311 		case TGT_STATE_LOADED:
5312 			/*
5313 			 * This is a new command starting.
5314 			 */
5315 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5316 			break;
5317 		case TGT_STATE_MOVING_DATA:
5318 		{
5319 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5320 
5321 			ccb = tgt->ccb;
5322 			if (tgt->req == NULL) {
5323 				panic("mpt: turbo target reply with null "
5324 				    "associated request moving data");
5325 				/* NOTREACHED */
5326 			}
5327 			if (ccb == NULL) {
5328 				if (tgt->is_local == 0) {
5329 					panic("mpt: turbo target reply with "
5330 					    "null associated ccb moving data");
5331 					/* NOTREACHED */
5332 				}
5333 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5334 				    "TARGET_ASSIST local done\n");
5335 				TAILQ_REMOVE(&mpt->request_pending_list,
5336 				    tgt->req, links);
5337 				mpt_free_request(mpt, tgt->req);
5338 				tgt->req = NULL;
5339 				mpt_scsi_tgt_status(mpt, NULL, req,
5340 				    0, NULL);
5341 				return (TRUE);
5342 			}
5343 			tgt->ccb = NULL;
5344 			tgt->nxfers++;
5345 			mpt_req_untimeout(req, mpt_timeout, ccb);
5346 			mpt_lprt(mpt, MPT_PRT_DEBUG,
5347 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5348 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5349 			/*
5350 			 * Free the Target Assist Request
5351 			 */
5352 			KASSERT(tgt->req->ccb == ccb,
5353 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5354 			    tgt->req->serno, tgt->req->ccb));
5355 			TAILQ_REMOVE(&mpt->request_pending_list,
5356 			    tgt->req, links);
5357 			mpt_free_request(mpt, tgt->req);
5358 			tgt->req = NULL;
5359 
5360 			/*
5361 			 * Do we need to send status now? That is, are
5362 			 * we done with all our data transfers?
5363 			 */
5364 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5365 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5366 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5367 				KASSERT(ccb->ccb_h.status,
5368 				    ("zero ccb sts at %d\n", __LINE__));
5369 				tgt->state = TGT_STATE_IN_CAM;
5370 				if (mpt->outofbeer) {
5371 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5372 					mpt->outofbeer = 0;
5373 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5374 				}
5375 				MPTLOCK_2_CAMLOCK(mpt);
5376 				xpt_done(ccb);
5377 				CAMLOCK_2_MPTLOCK(mpt);
5378 				break;
5379 			}
5380 			/*
5381 			 * Otherwise, send status (and sense)
5382 			 */
5383 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5384 				sp = sense;
5385 				memcpy(sp, &ccb->csio.sense_data,
5386 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5387 			}
5388 			mpt_scsi_tgt_status(mpt, ccb, req,
5389 			    ccb->csio.scsi_status, sp);
5390 			break;
5391 		}
5392 		case TGT_STATE_SENDING_STATUS:
5393 		case TGT_STATE_MOVING_DATA_AND_STATUS:
5394 		{
5395 			int ioindex;
5396 			ccb = tgt->ccb;
5397 
5398 			if (tgt->req == NULL) {
5399 				panic("mpt: turbo target reply with null "
5400 				    "associated request sending status");
5401 				/* NOTREACHED */
5402 			}
5403 
5404 			if (ccb) {
5405 				tgt->ccb = NULL;
5406 				if (tgt->state ==
5407 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5408 					tgt->nxfers++;
5409 				}
5410 				mpt_req_untimeout(req, mpt_timeout, ccb);
5411 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5412 					ccb->ccb_h.status |= CAM_SENT_SENSE;
5413 				}
5414 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5415 				    "TARGET_STATUS tag %x sts %x flgs %x req "
5416 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5417 				    ccb->ccb_h.flags, tgt->req);
5418 				/*
5419 				 * Free the Target Send Status Request
5420 				 */
5421 				KASSERT(tgt->req->ccb == ccb,
5422 				    ("tgt->req %p:%u tgt->req->ccb %p",
5423 				    tgt->req, tgt->req->serno, tgt->req->ccb));
5424 				/*
5425 				 * Notify CAM that we're done
5426 				 */
5427 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5428 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5429 				KASSERT(ccb->ccb_h.status,
5430 				    ("ZERO ccb sts at %d\n", __LINE__));
5431 				tgt->ccb = NULL;
5432 			} else {
5433 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5434 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
5435 				    tgt->req, tgt->req->serno);
5436 			}
5437 			TAILQ_REMOVE(&mpt->request_pending_list,
5438 			    tgt->req, links);
5439 			mpt_free_request(mpt, tgt->req);
5440 			tgt->req = NULL;
5441 
5442 			/*
5443 			 * And re-post the Command Buffer.
5444 			 * This will reset the state.
5445 			 */
5446 			ioindex = GET_IO_INDEX(reply_desc);
5447 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5448 			tgt->is_local = 0;
5449 			mpt_post_target_command(mpt, req, ioindex);
5450 
5451 			/*
5452 			 * And post a done for anyone who cares
5453 			 */
5454 			if (ccb) {
5455 				if (mpt->outofbeer) {
5456 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5457 					mpt->outofbeer = 0;
5458 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5459 				}
5460 				MPTLOCK_2_CAMLOCK(mpt);
5461 				xpt_done(ccb);
5462 				CAMLOCK_2_MPTLOCK(mpt);
5463 			}
5464 			break;
5465 		}
5466 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5467 			tgt->state = TGT_STATE_LOADED;
5468 			break;
5469 		default:
5470 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5471 			    "Reply Function\n", tgt->state);
5472 		}
5473 		return (TRUE);
5474 	}
5475 
5476 	status = le16toh(reply_frame->IOCStatus);
5477 	if (status != MPI_IOCSTATUS_SUCCESS) {
5478 		dbg = MPT_PRT_ERROR;
5479 	} else {
5480 		dbg = MPT_PRT_DEBUG1;
5481 	}
5482 
5483 	mpt_lprt(mpt, dbg,
5484 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5485 	     req, req->serno, reply_frame, reply_frame->Function, status);
5486 
5487 	switch (reply_frame->Function) {
5488 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5489 	{
5490 		mpt_tgt_state_t *tgt;
5491 #ifdef	INVARIANTS
5492 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5493 #endif
5494 		if (status != MPI_IOCSTATUS_SUCCESS) {
5495 			/*
5496 			 * XXX What to do?
5497 			 */
5498 			break;
5499 		}
5500 		tgt = MPT_TGT_STATE(mpt, req);
5501 		KASSERT(tgt->state == TGT_STATE_LOADING,
5502 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
5503 		mpt_assign_serno(mpt, req);
5504 		tgt->state = TGT_STATE_LOADED;
5505 		break;
5506 	}
5507 	case MPI_FUNCTION_TARGET_ASSIST:
5508 #ifdef	INVARIANTS
5509 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5510 #endif
5511 		mpt_prt(mpt, "target assist completion\n");
5512 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5513 		mpt_free_request(mpt, req);
5514 		break;
5515 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5516 #ifdef	INVARIANTS
5517 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5518 #endif
5519 		mpt_prt(mpt, "status send completion\n");
5520 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5521 		mpt_free_request(mpt, req);
5522 		break;
5523 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5524 	{
5525 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5526 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5527 		PTR_MSG_TARGET_MODE_ABORT abtp =
5528 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5529 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5530 #ifdef	INVARIANTS
5531 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5532 #endif
5533 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5534 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5535 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5536 		mpt_free_request(mpt, req);
5537 		break;
5538 	}
5539 	default:
5540 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5541 		    "0x%x\n", reply_frame->Function);
5542 		break;
5543 	}
5544 	return (TRUE);
5545 }
5546