xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 57c4583f70ab9d25b3aed17f20ec7843f9673539)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #if __FreeBSD_version >= 500000
108 #include <sys/sysctl.h>
109 #endif
110 #include <sys/callout.h>
111 #include <sys/kthread.h>
112 
113 static void mpt_poll(struct cam_sim *);
114 static timeout_t mpt_timeout;
115 static void mpt_action(struct cam_sim *, union ccb *);
116 static int
117 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
118 static void mpt_setwidth(struct mpt_softc *, int, int);
119 static void mpt_setsync(struct mpt_softc *, int, int, int);
120 static int mpt_update_spi_config(struct mpt_softc *, int);
121 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
122 
123 static mpt_reply_handler_t mpt_scsi_reply_handler;
124 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
125 static mpt_reply_handler_t mpt_fc_els_reply_handler;
126 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
127 					MSG_DEFAULT_REPLY *);
128 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
129 static int mpt_fc_reset_link(struct mpt_softc *, int);
130 
131 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
132 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
133 static void mpt_recovery_thread(void *arg);
134 static void mpt_recover_commands(struct mpt_softc *mpt);
135 
136 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
137     u_int, u_int, u_int, int);
138 
139 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
140 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
141 static int mpt_add_els_buffers(struct mpt_softc *mpt);
142 static int mpt_add_target_commands(struct mpt_softc *mpt);
143 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
145 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
146 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
147 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
148 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
149     uint8_t, uint8_t const *);
150 static void
151 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
152     tgt_resource_t *, int);
153 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
154 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
155 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
156 
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 
161 static mpt_probe_handler_t	mpt_cam_probe;
162 static mpt_attach_handler_t	mpt_cam_attach;
163 static mpt_enable_handler_t	mpt_cam_enable;
164 static mpt_ready_handler_t	mpt_cam_ready;
165 static mpt_event_handler_t	mpt_cam_event;
166 static mpt_reset_handler_t	mpt_cam_ioc_reset;
167 static mpt_detach_handler_t	mpt_cam_detach;
168 
169 static struct mpt_personality mpt_cam_personality =
170 {
171 	.name		= "mpt_cam",
172 	.probe		= mpt_cam_probe,
173 	.attach		= mpt_cam_attach,
174 	.enable		= mpt_cam_enable,
175 	.ready		= mpt_cam_ready,
176 	.event		= mpt_cam_event,
177 	.reset		= mpt_cam_ioc_reset,
178 	.detach		= mpt_cam_detach,
179 };
180 
181 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
182 
183 int
184 mpt_cam_probe(struct mpt_softc *mpt)
185 {
186 	int role;
187 
188 	/*
189 	 * Only attach to nodes that support the initiator or target role
190 	 * (or want to) or have RAID physical devices that need CAM pass-thru
191 	 * support.
192 	 */
193 	if (mpt->do_cfg_role) {
194 		role = mpt->cfg_role;
195 	} else {
196 		role = mpt->role;
197 	}
198 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
199 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
200 		return (0);
201 	}
202 	return (ENODEV);
203 }
204 
205 int
206 mpt_cam_attach(struct mpt_softc *mpt)
207 {
208 	struct cam_devq *devq;
209 	mpt_handler_t	 handler;
210 	int		 maxq;
211 	int		 error;
212 
213 	TAILQ_INIT(&mpt->request_timeout_list);
214 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
215 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
216 
217 	handler.reply_handler = mpt_scsi_reply_handler;
218 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
219 				     &scsi_io_handler_id);
220 	if (error != 0) {
221 		goto cleanup0;
222 	}
223 
224 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
225 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
226 				     &scsi_tmf_handler_id);
227 	if (error != 0) {
228 		goto cleanup0;
229 	}
230 
231 	/*
232 	 * If we're fibre channel and could support target mode, we register
233 	 * an ELS reply handler and give it resources.
234 	 */
235 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
236 		handler.reply_handler = mpt_fc_els_reply_handler;
237 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
238 		    &fc_els_handler_id);
239 		if (error != 0) {
240 			goto cleanup0;
241 		}
242 		if (mpt_add_els_buffers(mpt) == FALSE) {
243 			error = ENOMEM;
244 			goto cleanup0;
245 		}
246 		maxq -= mpt->els_cmds_allocated;
247 	}
248 
249 	/*
250 	 * If we support target mode, we register a reply handler for it,
251 	 * but don't add command resources until we actually enable target
252 	 * mode.
253 	 */
254 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
255 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
256 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
257 		    &mpt->scsi_tgt_handler_id);
258 		if (error != 0) {
259 			goto cleanup0;
260 		}
261 	}
262 
263 	/*
264 	 * We keep one request reserved for timeout TMF requests.
265 	 */
266 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
267 	if (mpt->tmf_req == NULL) {
268 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
269 		error = ENOMEM;
270 		goto cleanup0;
271 	}
272 
273 	/*
274 	 * Mark the request as free even though not on the free list.
275 	 * There is only one TMF request allowed to be outstanding at
276 	 * a time and the TMF routines perform their own allocation
277 	 * tracking using the standard state flags.
278 	 */
279 	mpt->tmf_req->state = REQ_STATE_FREE;
280 	maxq--;
281 
282 	if (mpt_spawn_recovery_thread(mpt) != 0) {
283 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
284 		error = ENOMEM;
285 		goto cleanup0;
286 	}
287 
288 	/*
289 	 * The rest of this is CAM foo, for which we need to drop our lock
290 	 */
291 	MPTLOCK_2_CAMLOCK(mpt);
292 
293 	/*
294 	 * Create the device queue for our SIM(s).
295 	 */
296 	devq = cam_simq_alloc(maxq);
297 	if (devq == NULL) {
298 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
299 		error = ENOMEM;
300 		goto cleanup;
301 	}
302 
303 	/*
304 	 * Construct our SIM entry.
305 	 */
306 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
307 	    mpt->unit, 1, maxq, devq);
308 	if (mpt->sim == NULL) {
309 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
310 		cam_simq_free(devq);
311 		error = ENOMEM;
312 		goto cleanup;
313 	}
314 
315 	/*
316 	 * Register exactly this bus.
317 	 */
318 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
319 		mpt_prt(mpt, "Bus registration Failed!\n");
320 		error = ENOMEM;
321 		goto cleanup;
322 	}
323 
324 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
325 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
326 		mpt_prt(mpt, "Unable to allocate Path!\n");
327 		error = ENOMEM;
328 		goto cleanup;
329 	}
330 
331 	/*
332 	 * Only register a second bus for RAID physical
333 	 * devices if the controller supports RAID.
334 	 */
335 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
336 		CAMLOCK_2_MPTLOCK(mpt);
337 		return (0);
338 	}
339 
340 	/*
341 	 * Create a "bus" to export all hidden disks to CAM.
342 	 */
343 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
344 	    mpt->unit, 1, maxq, devq);
345 	if (mpt->phydisk_sim == NULL) {
346 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
347 		error = ENOMEM;
348 		goto cleanup;
349 	}
350 
351 	/*
352 	 * Register this bus.
353 	 */
354 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
355 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
356 		error = ENOMEM;
357 		goto cleanup;
358 	}
359 
360 	if (xpt_create_path(&mpt->phydisk_path, NULL,
361 	    cam_sim_path(mpt->phydisk_sim),
362 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
363 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
364 		error = ENOMEM;
365 		goto cleanup;
366 	}
367 	CAMLOCK_2_MPTLOCK(mpt);
368 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
369 	return (0);
370 
371 cleanup:
372 	CAMLOCK_2_MPTLOCK(mpt);
373 cleanup0:
374 	mpt_cam_detach(mpt);
375 	return (error);
376 }
377 
378 /*
379  * Read FC configuration information
380  */
381 static int
382 mpt_read_config_info_fc(struct mpt_softc *mpt)
383 {
384 	char *topology = NULL;
385 	int rv;
386 
387 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
388 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
389 	if (rv) {
390 		return (-1);
391 	}
392 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
393 		 mpt->mpt_fcport_page0.Header.PageVersion,
394 		 mpt->mpt_fcport_page0.Header.PageLength,
395 		 mpt->mpt_fcport_page0.Header.PageNumber,
396 		 mpt->mpt_fcport_page0.Header.PageType);
397 
398 
399 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
400 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
401 	if (rv) {
402 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
403 		return (-1);
404 	}
405 
406 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
407 
408 	switch (mpt->mpt_fcport_page0.Flags &
409 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
410 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
411 		mpt->mpt_fcport_speed = 0;
412 		topology = "<NO LOOP>";
413 		break;
414 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
415 		topology = "N-Port";
416 		break;
417 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
418 		topology = "NL-Port";
419 		break;
420 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
421 		topology = "F-Port";
422 		break;
423 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
424 		topology = "FL-Port";
425 		break;
426 	default:
427 		mpt->mpt_fcport_speed = 0;
428 		topology = "?";
429 		break;
430 	}
431 
432 	mpt_lprt(mpt, MPT_PRT_INFO,
433 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
434 	    "Speed %u-Gbit\n", topology,
435 	    mpt->mpt_fcport_page0.WWNN.High,
436 	    mpt->mpt_fcport_page0.WWNN.Low,
437 	    mpt->mpt_fcport_page0.WWPN.High,
438 	    mpt->mpt_fcport_page0.WWPN.Low,
439 	    mpt->mpt_fcport_speed);
440 #if __FreeBSD_version >= 500000
441 	{
442 		struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
443 		struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
444 
445 		snprintf(mpt->scinfo.fc.wwnn,
446 		    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
447 		    mpt->mpt_fcport_page0.WWNN.High,
448 		    mpt->mpt_fcport_page0.WWNN.Low);
449 
450 		snprintf(mpt->scinfo.fc.wwpn,
451 		    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
452 		    mpt->mpt_fcport_page0.WWPN.High,
453 		    mpt->mpt_fcport_page0.WWPN.Low);
454 
455 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
456 		       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
457 		       "World Wide Node Name");
458 
459 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
460 		       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
461 		       "World Wide Port Name");
462 
463 	}
464 #endif
465 	return (0);
466 }
467 
468 /*
469  * Set FC configuration information.
470  */
471 static int
472 mpt_set_initial_config_fc(struct mpt_softc *mpt)
473 {
474 
475 	CONFIG_PAGE_FC_PORT_1 fc;
476 	U32 fl;
477 	int r, doit = 0;
478 	int role;
479 
480 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
481 	    &fc.Header, FALSE, 5000);
482 	if (r) {
483 		mpt_prt(mpt, "failed to read FC page 1 header\n");
484 		return (mpt_fc_reset_link(mpt, 1));
485 	}
486 
487 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
488 	    &fc.Header, sizeof (fc), FALSE, 5000);
489 	if (r) {
490 		mpt_prt(mpt, "failed to read FC page 1\n");
491 		return (mpt_fc_reset_link(mpt, 1));
492 	}
493 
494 	/*
495 	 * Check our flags to make sure we support the role we want.
496 	 */
497 	doit = 0;
498 	role = 0;
499 	fl = le32toh(fc.Flags);;
500 
501 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
502 		role |= MPT_ROLE_INITIATOR;
503 	}
504 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
505 		role |= MPT_ROLE_TARGET;
506 	}
507 
508 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
509 
510 	if (mpt->do_cfg_role == 0) {
511 		role = mpt->cfg_role;
512 	} else {
513 		mpt->do_cfg_role = 0;
514 	}
515 
516 	if (role != mpt->cfg_role) {
517 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
518 			if ((role & MPT_ROLE_INITIATOR) == 0) {
519 				mpt_prt(mpt, "adding initiator role\n");
520 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
521 				doit++;
522 			} else {
523 				mpt_prt(mpt, "keeping initiator role\n");
524 			}
525 		} else if (role & MPT_ROLE_INITIATOR) {
526 			mpt_prt(mpt, "removing initiator role\n");
527 			doit++;
528 		}
529 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
530 			if ((role & MPT_ROLE_TARGET) == 0) {
531 				mpt_prt(mpt, "adding target role\n");
532 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
533 				doit++;
534 			} else {
535 				mpt_prt(mpt, "keeping target role\n");
536 			}
537 		} else if (role & MPT_ROLE_TARGET) {
538 			mpt_prt(mpt, "removing target role\n");
539 			doit++;
540 		}
541 		mpt->role = mpt->cfg_role;
542 	}
543 
544 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
545 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
546 			mpt_prt(mpt, "adding OXID option\n");
547 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
548 			doit++;
549 		}
550 	}
551 
552 	if (doit) {
553 		fc.Flags = htole32(fl);
554 		r = mpt_write_cfg_page(mpt,
555 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
556 		    sizeof(fc), FALSE, 5000);
557 		if (r != 0) {
558 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
559 			return (0);
560 		}
561 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
562 		    "effect until next reboot or IOC reset\n");
563 	}
564 	return (0);
565 }
566 
567 /*
568  * Read SAS configuration information. Nothing to do yet.
569  */
570 static int
571 mpt_read_config_info_sas(struct mpt_softc *mpt)
572 {
573 	return (0);
574 }
575 
576 /*
577  * Set SAS configuration information. Nothing to do yet.
578  */
579 static int
580 mpt_set_initial_config_sas(struct mpt_softc *mpt)
581 {
582 	return (0);
583 }
584 
585 /*
586  * Read SCSI configuration information
587  */
588 static int
589 mpt_read_config_info_spi(struct mpt_softc *mpt)
590 {
591 	int rv, i;
592 
593 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
594 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
595 	if (rv) {
596 		return (-1);
597 	}
598 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
599 	    mpt->mpt_port_page0.Header.PageVersion,
600 	    mpt->mpt_port_page0.Header.PageLength,
601 	    mpt->mpt_port_page0.Header.PageNumber,
602 	    mpt->mpt_port_page0.Header.PageType);
603 
604 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
605 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
606 	if (rv) {
607 		return (-1);
608 	}
609 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
610 	    mpt->mpt_port_page1.Header.PageVersion,
611 	    mpt->mpt_port_page1.Header.PageLength,
612 	    mpt->mpt_port_page1.Header.PageNumber,
613 	    mpt->mpt_port_page1.Header.PageType);
614 
615 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
616 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
617 	if (rv) {
618 		return (-1);
619 	}
620 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
621 	    mpt->mpt_port_page2.Header.PageVersion,
622 	    mpt->mpt_port_page2.Header.PageLength,
623 	    mpt->mpt_port_page2.Header.PageNumber,
624 	    mpt->mpt_port_page2.Header.PageType);
625 
626 	for (i = 0; i < 16; i++) {
627 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
628 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
629 		if (rv) {
630 			return (-1);
631 		}
632 		mpt_lprt(mpt, MPT_PRT_DEBUG,
633 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
634 		    mpt->mpt_dev_page0[i].Header.PageVersion,
635 		    mpt->mpt_dev_page0[i].Header.PageLength,
636 		    mpt->mpt_dev_page0[i].Header.PageNumber,
637 		    mpt->mpt_dev_page0[i].Header.PageType);
638 
639 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
640 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
641 		if (rv) {
642 			return (-1);
643 		}
644 		mpt_lprt(mpt, MPT_PRT_DEBUG,
645 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
646 		    mpt->mpt_dev_page1[i].Header.PageVersion,
647 		    mpt->mpt_dev_page1[i].Header.PageLength,
648 		    mpt->mpt_dev_page1[i].Header.PageNumber,
649 		    mpt->mpt_dev_page1[i].Header.PageType);
650 	}
651 
652 	/*
653 	 * At this point, we don't *have* to fail. As long as we have
654 	 * valid config header information, we can (barely) lurch
655 	 * along.
656 	 */
657 
658 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
659 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
660 	if (rv) {
661 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
662 	} else {
663 		mpt_lprt(mpt, MPT_PRT_DEBUG,
664 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
665 		    mpt->mpt_port_page0.Capabilities,
666 		    mpt->mpt_port_page0.PhysicalInterface);
667 	}
668 
669 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
670 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
671 	if (rv) {
672 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
673 	} else {
674 		mpt_lprt(mpt, MPT_PRT_DEBUG,
675 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
676 		    mpt->mpt_port_page1.Configuration,
677 		    mpt->mpt_port_page1.OnBusTimerValue);
678 	}
679 
680 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
681 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
682 	if (rv) {
683 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
684 	} else {
685 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
686 		    "Port Page 2: Flags %x Settings %x\n",
687 		    mpt->mpt_port_page2.PortFlags,
688 		    mpt->mpt_port_page2.PortSettings);
689 		for (i = 0; i < 16; i++) {
690 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
691 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
692 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
693 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
694 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
695 		}
696 	}
697 
698 	for (i = 0; i < 16; i++) {
699 		rv = mpt_read_cur_cfg_page(mpt, i,
700 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
701 		    FALSE, 5000);
702 		if (rv) {
703 			mpt_prt(mpt,
704 			    "cannot read SPI Target %d Device Page 0\n", i);
705 			continue;
706 		}
707 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
708 		    "target %d page 0: Negotiated Params %x Information %x\n",
709 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
710 		    mpt->mpt_dev_page0[i].Information);
711 
712 		rv = mpt_read_cur_cfg_page(mpt, i,
713 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
714 		    FALSE, 5000);
715 		if (rv) {
716 			mpt_prt(mpt,
717 			    "cannot read SPI Target %d Device Page 1\n", i);
718 			continue;
719 		}
720 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
721 		    "target %d page 1: Requested Params %x Configuration %x\n",
722 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
723 		    mpt->mpt_dev_page1[i].Configuration);
724 	}
725 	return (0);
726 }
727 
728 /*
729  * Validate SPI configuration information.
730  *
731  * In particular, validate SPI Port Page 1.
732  */
733 static int
734 mpt_set_initial_config_spi(struct mpt_softc *mpt)
735 {
736 	int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
737 	int error;
738 
739 	mpt->mpt_disc_enable = 0xff;
740 	mpt->mpt_tag_enable = 0;
741 
742 	if (mpt->mpt_port_page1.Configuration != pp1val) {
743 		CONFIG_PAGE_SCSI_PORT_1 tmp;
744 
745 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
746 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
747 		tmp = mpt->mpt_port_page1;
748 		tmp.Configuration = pp1val;
749 		error = mpt_write_cur_cfg_page(mpt, 0,
750 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
751 		if (error) {
752 			return (-1);
753 		}
754 		error = mpt_read_cur_cfg_page(mpt, 0,
755 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
756 		if (error) {
757 			return (-1);
758 		}
759 		if (tmp.Configuration != pp1val) {
760 			mpt_prt(mpt,
761 			    "failed to reset SPI Port Page 1 Config value\n");
762 			return (-1);
763 		}
764 		mpt->mpt_port_page1 = tmp;
765 	}
766 
767 	/*
768 	 * The purpose of this exercise is to get
769 	 * all targets back to async/narrow.
770 	 *
771 	 * We skip this step if the BIOS has already negotiated
772 	 * speeds with the targets and does not require us to
773 	 * do Domain Validation.
774 	 */
775 	i = mpt->mpt_port_page2.PortSettings &
776 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
777 	j = mpt->mpt_port_page2.PortFlags &
778 	    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
779 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* &&
780 	    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) {
781 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
782 		    "honoring BIOS transfer negotiations\n");
783 	} else {
784 		for (i = 0; i < 16; i++) {
785 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
786 			mpt->mpt_dev_page1[i].Configuration = 0;
787 			(void) mpt_update_spi_config(mpt, i);
788 		}
789 	}
790 	return (0);
791 }
792 
793 int
794 mpt_cam_enable(struct mpt_softc *mpt)
795 {
796 	if (mpt->is_fc) {
797 		if (mpt_read_config_info_fc(mpt)) {
798 			return (EIO);
799 		}
800 		if (mpt_set_initial_config_fc(mpt)) {
801 			return (EIO);
802 		}
803 	} else if (mpt->is_sas) {
804 		if (mpt_read_config_info_sas(mpt)) {
805 			return (EIO);
806 		}
807 		if (mpt_set_initial_config_sas(mpt)) {
808 			return (EIO);
809 		}
810 	} else if (mpt->is_spi) {
811 		if (mpt_read_config_info_spi(mpt)) {
812 			return (EIO);
813 		}
814 		if (mpt_set_initial_config_spi(mpt)) {
815 			return (EIO);
816 		}
817 	}
818 	return (0);
819 }
820 
821 void
822 mpt_cam_ready(struct mpt_softc *mpt)
823 {
824 	/*
825 	 * If we're in target mode, hang out resources now
826 	 * so we don't cause the world to hang talking to us.
827 	 */
828 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
829 		/*
830 		 * Try to add some target command resources
831 		 */
832 		MPT_LOCK(mpt);
833 		if (mpt_add_target_commands(mpt) == FALSE) {
834 			mpt_prt(mpt, "failed to add target commands\n");
835 		}
836 		MPT_UNLOCK(mpt);
837 	}
838 }
839 
840 void
841 mpt_cam_detach(struct mpt_softc *mpt)
842 {
843 	mpt_handler_t handler;
844 
845 	mpt_terminate_recovery_thread(mpt);
846 
847 	handler.reply_handler = mpt_scsi_reply_handler;
848 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
849 			       scsi_io_handler_id);
850 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
851 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
852 			       scsi_tmf_handler_id);
853 	handler.reply_handler = mpt_fc_els_reply_handler;
854 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
855 			       fc_els_handler_id);
856 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
857 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
858 			       mpt->scsi_tgt_handler_id);
859 
860 	if (mpt->tmf_req != NULL) {
861 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
862 		mpt_free_request(mpt, mpt->tmf_req);
863 		mpt->tmf_req = NULL;
864 	}
865 
866 	if (mpt->sim != NULL) {
867 		MPTLOCK_2_CAMLOCK(mpt);
868 		xpt_free_path(mpt->path);
869 		xpt_bus_deregister(cam_sim_path(mpt->sim));
870 		cam_sim_free(mpt->sim, TRUE);
871 		mpt->sim = NULL;
872 		CAMLOCK_2_MPTLOCK(mpt);
873 	}
874 
875 	if (mpt->phydisk_sim != NULL) {
876 		MPTLOCK_2_CAMLOCK(mpt);
877 		xpt_free_path(mpt->phydisk_path);
878 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
879 		cam_sim_free(mpt->phydisk_sim, TRUE);
880 		mpt->phydisk_sim = NULL;
881 		CAMLOCK_2_MPTLOCK(mpt);
882 	}
883 }
884 
885 /* This routine is used after a system crash to dump core onto the swap device.
886  */
887 static void
888 mpt_poll(struct cam_sim *sim)
889 {
890 	struct mpt_softc *mpt;
891 
892 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
893 	MPT_LOCK(mpt);
894 	mpt_intr(mpt);
895 	MPT_UNLOCK(mpt);
896 }
897 
898 /*
899  * Watchdog timeout routine for SCSI requests.
900  */
901 static void
902 mpt_timeout(void *arg)
903 {
904 	union ccb	 *ccb;
905 	struct mpt_softc *mpt;
906 	request_t	 *req;
907 
908 	ccb = (union ccb *)arg;
909 	mpt = ccb->ccb_h.ccb_mpt_ptr;
910 
911 	MPT_LOCK(mpt);
912 	req = ccb->ccb_h.ccb_req_ptr;
913 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
914 	    req->serno, ccb, req->ccb);
915 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
916 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
917 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
918 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
919 		req->state |= REQ_STATE_TIMEDOUT;
920 		mpt_wakeup_recovery_thread(mpt);
921 	}
922 	MPT_UNLOCK(mpt);
923 }
924 
925 /*
926  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
927  *
928  * Takes a list of physical segments and builds the SGL for SCSI IO command
929  * and forwards the commard to the IOC after one last check that CAM has not
930  * aborted the transaction.
931  */
932 static void
933 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
934 {
935 	request_t *req, *trq;
936 	char *mpt_off;
937 	union ccb *ccb;
938 	struct mpt_softc *mpt;
939 	int seg, first_lim;
940 	uint32_t flags, nxt_off;
941 	void *sglp = NULL;
942 	MSG_REQUEST_HEADER *hdrp;
943 	SGE_SIMPLE64 *se;
944 	SGE_CHAIN64 *ce;
945 	int istgt = 0;
946 
947 	req = (request_t *)arg;
948 	ccb = req->ccb;
949 
950 	mpt = ccb->ccb_h.ccb_mpt_ptr;
951 	req = ccb->ccb_h.ccb_req_ptr;
952 
953 	hdrp = req->req_vbuf;
954 	mpt_off = req->req_vbuf;
955 
956 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
957 		error = EFBIG;
958 	}
959 
960 	if (error == 0) {
961 		switch (hdrp->Function) {
962 		case MPI_FUNCTION_SCSI_IO_REQUEST:
963 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
964 			istgt = 0;
965 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
966 			break;
967 		case MPI_FUNCTION_TARGET_ASSIST:
968 			istgt = 1;
969 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
970 			break;
971 		default:
972 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
973 			    hdrp->Function);
974 			error = EINVAL;
975 			break;
976 		}
977 	}
978 
979 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
980 		error = EFBIG;
981 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
982 		    nseg, mpt->max_seg_cnt);
983 	}
984 
985 bad:
986 	if (error != 0) {
987 		if (error != EFBIG && error != ENOMEM) {
988 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
989 		}
990 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
991 			cam_status status;
992 			mpt_freeze_ccb(ccb);
993 			if (error == EFBIG) {
994 				status = CAM_REQ_TOO_BIG;
995 			} else if (error == ENOMEM) {
996 				if (mpt->outofbeer == 0) {
997 					mpt->outofbeer = 1;
998 					xpt_freeze_simq(mpt->sim, 1);
999 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1000 					    "FREEZEQ\n");
1001 				}
1002 				status = CAM_REQUEUE_REQ;
1003 			} else {
1004 				status = CAM_REQ_CMP_ERR;
1005 			}
1006 			mpt_set_ccb_status(ccb, status);
1007 		}
1008 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1009 			request_t *cmd_req =
1010 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1011 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1012 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1013 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1014 		}
1015 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1016 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1017 		xpt_done(ccb);
1018 		CAMLOCK_2_MPTLOCK(mpt);
1019 		mpt_free_request(mpt, req);
1020 		MPTLOCK_2_CAMLOCK(mpt);
1021 		return;
1022 	}
1023 
1024 	/*
1025 	 * No data to transfer?
1026 	 * Just make a single simple SGL with zero length.
1027 	 */
1028 
1029 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1030 		int tidx = ((char *)sglp) - mpt_off;
1031 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1032 	}
1033 
1034 	if (nseg == 0) {
1035 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1036 		MPI_pSGE_SET_FLAGS(se1,
1037 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1038 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1039 		goto out;
1040 	}
1041 
1042 
1043 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1044 	if (istgt == 0) {
1045 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1046 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1047 		}
1048 	} else {
1049 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1050 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1051 		}
1052 	}
1053 
1054 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1055 		bus_dmasync_op_t op;
1056 		if (istgt == 0) {
1057 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1058 				op = BUS_DMASYNC_PREREAD;
1059 			} else {
1060 				op = BUS_DMASYNC_PREWRITE;
1061 			}
1062 		} else {
1063 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1064 				op = BUS_DMASYNC_PREWRITE;
1065 			} else {
1066 				op = BUS_DMASYNC_PREREAD;
1067 			}
1068 		}
1069 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1070 	}
1071 
1072 	/*
1073 	 * Okay, fill in what we can at the end of the command frame.
1074 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1075 	 * the command frame.
1076 	 *
1077 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1078 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1079 	 * that.
1080 	 */
1081 
1082 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1083 		first_lim = nseg;
1084 	} else {
1085 		/*
1086 		 * Leave room for CHAIN element
1087 		 */
1088 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1089 	}
1090 
1091 	se = (SGE_SIMPLE64 *) sglp;
1092 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1093 		uint32_t tf;
1094 
1095 		memset(se, 0, sizeof (*se));
1096 		se->Address.Low = dm_segs->ds_addr;
1097 		if (sizeof(bus_addr_t) > 4) {
1098 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
1099 		}
1100 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1101 		tf = flags;
1102 		if (seg == first_lim - 1) {
1103 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1104 		}
1105 		if (seg == nseg - 1) {
1106 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1107 				MPI_SGE_FLAGS_END_OF_BUFFER;
1108 		}
1109 		MPI_pSGE_SET_FLAGS(se, tf);
1110 	}
1111 
1112 	if (seg == nseg) {
1113 		goto out;
1114 	}
1115 
1116 	/*
1117 	 * Tell the IOC where to find the first chain element.
1118 	 */
1119 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1120 	nxt_off = MPT_RQSL(mpt);
1121 	trq = req;
1122 
1123 	/*
1124 	 * Make up the rest of the data segments out of a chain element
1125 	 * (contiained in the current request frame) which points to
1126 	 * SIMPLE64 elements in the next request frame, possibly ending
1127 	 * with *another* chain element (if there's more).
1128 	 */
1129 	while (seg < nseg) {
1130 		int this_seg_lim;
1131 		uint32_t tf, cur_off;
1132 		bus_addr_t chain_list_addr;
1133 
1134 		/*
1135 		 * Point to the chain descriptor. Note that the chain
1136 		 * descriptor is at the end of the *previous* list (whether
1137 		 * chain or simple).
1138 		 */
1139 		ce = (SGE_CHAIN64 *) se;
1140 
1141 		/*
1142 		 * Before we change our current pointer, make  sure we won't
1143 		 * overflow the request area with this frame. Note that we
1144 		 * test against 'greater than' here as it's okay in this case
1145 		 * to have next offset be just outside the request area.
1146 		 */
1147 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1148 			nxt_off = MPT_REQUEST_AREA;
1149 			goto next_chain;
1150 		}
1151 
1152 		/*
1153 		 * Set our SGE element pointer to the beginning of the chain
1154 		 * list and update our next chain list offset.
1155 		 */
1156 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1157 		cur_off = nxt_off;
1158 		nxt_off += MPT_RQSL(mpt);
1159 
1160 		/*
1161 		 * Now initialized the chain descriptor.
1162 		 */
1163 		memset(ce, 0, sizeof (*ce));
1164 
1165 		/*
1166 		 * Get the physical address of the chain list.
1167 		 */
1168 		chain_list_addr = trq->req_pbuf;
1169 		chain_list_addr += cur_off;
1170 		if (sizeof (bus_addr_t) > 4) {
1171 			ce->Address.High =
1172 			    (uint32_t) ((uint64_t)chain_list_addr >> 32);
1173 		}
1174 		ce->Address.Low = (uint32_t) chain_list_addr;
1175 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1176 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1177 
1178 		/*
1179 		 * If we have more than a frame's worth of segments left,
1180 		 * set up the chain list to have the last element be another
1181 		 * chain descriptor.
1182 		 */
1183 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1184 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1185 			/*
1186 			 * The length of the chain is the length in bytes of the
1187 			 * number of segments plus the next chain element.
1188 			 *
1189 			 * The next chain descriptor offset is the length,
1190 			 * in words, of the number of segments.
1191 			 */
1192 			ce->Length = (this_seg_lim - seg) *
1193 			    sizeof (SGE_SIMPLE64);
1194 			ce->NextChainOffset = ce->Length >> 2;
1195 			ce->Length += sizeof (SGE_CHAIN64);
1196 		} else {
1197 			this_seg_lim = nseg;
1198 			ce->Length = (this_seg_lim - seg) *
1199 			    sizeof (SGE_SIMPLE64);
1200 		}
1201 
1202 		/*
1203 		 * Fill in the chain list SGE elements with our segment data.
1204 		 *
1205 		 * If we're the last element in this chain list, set the last
1206 		 * element flag. If we're the completely last element period,
1207 		 * set the end of list and end of buffer flags.
1208 		 */
1209 		while (seg < this_seg_lim) {
1210 			memset(se, 0, sizeof (*se));
1211 			se->Address.Low = dm_segs->ds_addr;
1212 			if (sizeof (bus_addr_t) > 4) {
1213 				se->Address.High =
1214 				    ((uint64_t)dm_segs->ds_addr) >> 32;
1215 			}
1216 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1217 			tf = flags;
1218 			if (seg ==  this_seg_lim - 1) {
1219 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1220 			}
1221 			if (seg == nseg - 1) {
1222 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1223 					MPI_SGE_FLAGS_END_OF_BUFFER;
1224 			}
1225 			MPI_pSGE_SET_FLAGS(se, tf);
1226 			se++;
1227 			seg++;
1228 			dm_segs++;
1229 		}
1230 
1231     next_chain:
1232 		/*
1233 		 * If we have more segments to do and we've used up all of
1234 		 * the space in a request area, go allocate another one
1235 		 * and chain to that.
1236 		 */
1237 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1238 			request_t *nrq;
1239 
1240 			CAMLOCK_2_MPTLOCK(mpt);
1241 			nrq = mpt_get_request(mpt, FALSE);
1242 			MPTLOCK_2_CAMLOCK(mpt);
1243 
1244 			if (nrq == NULL) {
1245 				error = ENOMEM;
1246 				goto bad;
1247 			}
1248 
1249 			/*
1250 			 * Append the new request area on the tail of our list.
1251 			 */
1252 			if ((trq = req->chain) == NULL) {
1253 				req->chain = nrq;
1254 			} else {
1255 				while (trq->chain != NULL) {
1256 					trq = trq->chain;
1257 				}
1258 				trq->chain = nrq;
1259 			}
1260 			trq = nrq;
1261 			mpt_off = trq->req_vbuf;
1262 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1263 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1264 			}
1265 			nxt_off = 0;
1266 		}
1267 	}
1268 out:
1269 
1270 	/*
1271 	 * Last time we need to check if this CCB needs to be aborted.
1272 	 */
1273 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1274 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1275 			request_t *cmd_req =
1276 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1277 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1278 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1279 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1280 		}
1281 		mpt_prt(mpt,
1282 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1283 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1284 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1285 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1286 		}
1287 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1288 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1289 		xpt_done(ccb);
1290 		CAMLOCK_2_MPTLOCK(mpt);
1291 		mpt_free_request(mpt, req);
1292 		MPTLOCK_2_CAMLOCK(mpt);
1293 		return;
1294 	}
1295 
1296 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1297 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1298 		ccb->ccb_h.timeout_ch =
1299 			timeout(mpt_timeout, (caddr_t)ccb,
1300 				(ccb->ccb_h.timeout * hz) / 1000);
1301 	} else {
1302 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1303 	}
1304 	if (mpt->verbose > MPT_PRT_DEBUG) {
1305 		int nc = 0;
1306 		mpt_print_request(req->req_vbuf);
1307 		for (trq = req->chain; trq; trq = trq->chain) {
1308 			printf("  Additional Chain Area %d\n", nc++);
1309 			mpt_dump_sgl(trq->req_vbuf, 0);
1310 		}
1311 	}
1312 
1313 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1314 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1315 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1316 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1317 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1318 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1319 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1320 		} else {
1321 			tgt->state = TGT_STATE_MOVING_DATA;
1322 		}
1323 #else
1324 		tgt->state = TGT_STATE_MOVING_DATA;
1325 #endif
1326 	}
1327 	CAMLOCK_2_MPTLOCK(mpt);
1328 	mpt_send_cmd(mpt, req);
1329 	MPTLOCK_2_CAMLOCK(mpt);
1330 }
1331 
1332 static void
1333 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1334 {
1335 	request_t *req, *trq;
1336 	char *mpt_off;
1337 	union ccb *ccb;
1338 	struct mpt_softc *mpt;
1339 	int seg, first_lim;
1340 	uint32_t flags, nxt_off;
1341 	void *sglp = NULL;
1342 	MSG_REQUEST_HEADER *hdrp;
1343 	SGE_SIMPLE32 *se;
1344 	SGE_CHAIN32 *ce;
1345 	int istgt = 0;
1346 
1347 	req = (request_t *)arg;
1348 	ccb = req->ccb;
1349 
1350 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1351 	req = ccb->ccb_h.ccb_req_ptr;
1352 
1353 	hdrp = req->req_vbuf;
1354 	mpt_off = req->req_vbuf;
1355 
1356 
1357 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1358 		error = EFBIG;
1359 	}
1360 
1361 	if (error == 0) {
1362 		switch (hdrp->Function) {
1363 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1364 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1365 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1366 			break;
1367 		case MPI_FUNCTION_TARGET_ASSIST:
1368 			istgt = 1;
1369 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1370 			break;
1371 		default:
1372 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1373 			    hdrp->Function);
1374 			error = EINVAL;
1375 			break;
1376 		}
1377 	}
1378 
1379 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1380 		error = EFBIG;
1381 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1382 		    nseg, mpt->max_seg_cnt);
1383 	}
1384 
1385 bad:
1386 	if (error != 0) {
1387 		if (error != EFBIG && error != ENOMEM) {
1388 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1389 		}
1390 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1391 			cam_status status;
1392 			mpt_freeze_ccb(ccb);
1393 			if (error == EFBIG) {
1394 				status = CAM_REQ_TOO_BIG;
1395 			} else if (error == ENOMEM) {
1396 				if (mpt->outofbeer == 0) {
1397 					mpt->outofbeer = 1;
1398 					xpt_freeze_simq(mpt->sim, 1);
1399 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1400 					    "FREEZEQ\n");
1401 				}
1402 				status = CAM_REQUEUE_REQ;
1403 			} else {
1404 				status = CAM_REQ_CMP_ERR;
1405 			}
1406 			mpt_set_ccb_status(ccb, status);
1407 		}
1408 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1409 			request_t *cmd_req =
1410 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1411 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1412 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1413 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1414 		}
1415 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1416 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1417 		xpt_done(ccb);
1418 		CAMLOCK_2_MPTLOCK(mpt);
1419 		mpt_free_request(mpt, req);
1420 		MPTLOCK_2_CAMLOCK(mpt);
1421 		return;
1422 	}
1423 
1424 	/*
1425 	 * No data to transfer?
1426 	 * Just make a single simple SGL with zero length.
1427 	 */
1428 
1429 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1430 		int tidx = ((char *)sglp) - mpt_off;
1431 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1432 	}
1433 
1434 	if (nseg == 0) {
1435 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1436 		MPI_pSGE_SET_FLAGS(se1,
1437 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1438 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1439 		goto out;
1440 	}
1441 
1442 
1443 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1444 	if (istgt == 0) {
1445 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1446 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1447 		}
1448 	} else {
1449 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1450 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1451 		}
1452 	}
1453 
1454 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1455 		bus_dmasync_op_t op;
1456 		if (istgt) {
1457 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1458 				op = BUS_DMASYNC_PREREAD;
1459 			} else {
1460 				op = BUS_DMASYNC_PREWRITE;
1461 			}
1462 		} else {
1463 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1464 				op = BUS_DMASYNC_PREWRITE;
1465 			} else {
1466 				op = BUS_DMASYNC_PREREAD;
1467 			}
1468 		}
1469 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1470 	}
1471 
1472 	/*
1473 	 * Okay, fill in what we can at the end of the command frame.
1474 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1475 	 * the command frame.
1476 	 *
1477 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1478 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1479 	 * that.
1480 	 */
1481 
1482 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1483 		first_lim = nseg;
1484 	} else {
1485 		/*
1486 		 * Leave room for CHAIN element
1487 		 */
1488 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1489 	}
1490 
1491 	se = (SGE_SIMPLE32 *) sglp;
1492 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1493 		uint32_t tf;
1494 
1495 		memset(se, 0,sizeof (*se));
1496 		se->Address = dm_segs->ds_addr;
1497 
1498 
1499 
1500 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1501 		tf = flags;
1502 		if (seg == first_lim - 1) {
1503 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1504 		}
1505 		if (seg == nseg - 1) {
1506 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1507 				MPI_SGE_FLAGS_END_OF_BUFFER;
1508 		}
1509 		MPI_pSGE_SET_FLAGS(se, tf);
1510 	}
1511 
1512 	if (seg == nseg) {
1513 		goto out;
1514 	}
1515 
1516 	/*
1517 	 * Tell the IOC where to find the first chain element.
1518 	 */
1519 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1520 	nxt_off = MPT_RQSL(mpt);
1521 	trq = req;
1522 
1523 	/*
1524 	 * Make up the rest of the data segments out of a chain element
1525 	 * (contiained in the current request frame) which points to
1526 	 * SIMPLE32 elements in the next request frame, possibly ending
1527 	 * with *another* chain element (if there's more).
1528 	 */
1529 	while (seg < nseg) {
1530 		int this_seg_lim;
1531 		uint32_t tf, cur_off;
1532 		bus_addr_t chain_list_addr;
1533 
1534 		/*
1535 		 * Point to the chain descriptor. Note that the chain
1536 		 * descriptor is at the end of the *previous* list (whether
1537 		 * chain or simple).
1538 		 */
1539 		ce = (SGE_CHAIN32 *) se;
1540 
1541 		/*
1542 		 * Before we change our current pointer, make  sure we won't
1543 		 * overflow the request area with this frame. Note that we
1544 		 * test against 'greater than' here as it's okay in this case
1545 		 * to have next offset be just outside the request area.
1546 		 */
1547 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1548 			nxt_off = MPT_REQUEST_AREA;
1549 			goto next_chain;
1550 		}
1551 
1552 		/*
1553 		 * Set our SGE element pointer to the beginning of the chain
1554 		 * list and update our next chain list offset.
1555 		 */
1556 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1557 		cur_off = nxt_off;
1558 		nxt_off += MPT_RQSL(mpt);
1559 
1560 		/*
1561 		 * Now initialized the chain descriptor.
1562 		 */
1563 		memset(ce, 0, sizeof (*ce));
1564 
1565 		/*
1566 		 * Get the physical address of the chain list.
1567 		 */
1568 		chain_list_addr = trq->req_pbuf;
1569 		chain_list_addr += cur_off;
1570 
1571 
1572 
1573 		ce->Address = chain_list_addr;
1574 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1575 
1576 
1577 		/*
1578 		 * If we have more than a frame's worth of segments left,
1579 		 * set up the chain list to have the last element be another
1580 		 * chain descriptor.
1581 		 */
1582 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1583 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1584 			/*
1585 			 * The length of the chain is the length in bytes of the
1586 			 * number of segments plus the next chain element.
1587 			 *
1588 			 * The next chain descriptor offset is the length,
1589 			 * in words, of the number of segments.
1590 			 */
1591 			ce->Length = (this_seg_lim - seg) *
1592 			    sizeof (SGE_SIMPLE32);
1593 			ce->NextChainOffset = ce->Length >> 2;
1594 			ce->Length += sizeof (SGE_CHAIN32);
1595 		} else {
1596 			this_seg_lim = nseg;
1597 			ce->Length = (this_seg_lim - seg) *
1598 			    sizeof (SGE_SIMPLE32);
1599 		}
1600 
1601 		/*
1602 		 * Fill in the chain list SGE elements with our segment data.
1603 		 *
1604 		 * If we're the last element in this chain list, set the last
1605 		 * element flag. If we're the completely last element period,
1606 		 * set the end of list and end of buffer flags.
1607 		 */
1608 		while (seg < this_seg_lim) {
1609 			memset(se, 0, sizeof (*se));
1610 			se->Address = dm_segs->ds_addr;
1611 
1612 
1613 
1614 
1615 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1616 			tf = flags;
1617 			if (seg ==  this_seg_lim - 1) {
1618 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1619 			}
1620 			if (seg == nseg - 1) {
1621 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1622 					MPI_SGE_FLAGS_END_OF_BUFFER;
1623 			}
1624 			MPI_pSGE_SET_FLAGS(se, tf);
1625 			se++;
1626 			seg++;
1627 			dm_segs++;
1628 		}
1629 
1630     next_chain:
1631 		/*
1632 		 * If we have more segments to do and we've used up all of
1633 		 * the space in a request area, go allocate another one
1634 		 * and chain to that.
1635 		 */
1636 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1637 			request_t *nrq;
1638 
1639 			CAMLOCK_2_MPTLOCK(mpt);
1640 			nrq = mpt_get_request(mpt, FALSE);
1641 			MPTLOCK_2_CAMLOCK(mpt);
1642 
1643 			if (nrq == NULL) {
1644 				error = ENOMEM;
1645 				goto bad;
1646 			}
1647 
1648 			/*
1649 			 * Append the new request area on the tail of our list.
1650 			 */
1651 			if ((trq = req->chain) == NULL) {
1652 				req->chain = nrq;
1653 			} else {
1654 				while (trq->chain != NULL) {
1655 					trq = trq->chain;
1656 				}
1657 				trq->chain = nrq;
1658 			}
1659 			trq = nrq;
1660 			mpt_off = trq->req_vbuf;
1661 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1662 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1663 			}
1664 			nxt_off = 0;
1665 		}
1666 	}
1667 out:
1668 
1669 	/*
1670 	 * Last time we need to check if this CCB needs to be aborted.
1671 	 */
1672 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1673 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1674 			request_t *cmd_req =
1675 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1676 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1677 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1678 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1679 		}
1680 		mpt_prt(mpt,
1681 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1682 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1683 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1684 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1685 		}
1686 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1687 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1688 		xpt_done(ccb);
1689 		CAMLOCK_2_MPTLOCK(mpt);
1690 		mpt_free_request(mpt, req);
1691 		MPTLOCK_2_CAMLOCK(mpt);
1692 		return;
1693 	}
1694 
1695 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1696 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1697 		ccb->ccb_h.timeout_ch =
1698 			timeout(mpt_timeout, (caddr_t)ccb,
1699 				(ccb->ccb_h.timeout * hz) / 1000);
1700 	} else {
1701 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1702 	}
1703 	if (mpt->verbose > MPT_PRT_DEBUG) {
1704 		int nc = 0;
1705 		mpt_print_request(req->req_vbuf);
1706 		for (trq = req->chain; trq; trq = trq->chain) {
1707 			printf("  Additional Chain Area %d\n", nc++);
1708 			mpt_dump_sgl(trq->req_vbuf, 0);
1709 		}
1710 	}
1711 
1712 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1713 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1714 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1715 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1716 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1717 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1718 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1719 		} else {
1720 			tgt->state = TGT_STATE_MOVING_DATA;
1721 		}
1722 #else
1723 		tgt->state = TGT_STATE_MOVING_DATA;
1724 #endif
1725 	}
1726 	CAMLOCK_2_MPTLOCK(mpt);
1727 	mpt_send_cmd(mpt, req);
1728 	MPTLOCK_2_CAMLOCK(mpt);
1729 }
1730 
1731 static void
1732 mpt_start(struct cam_sim *sim, union ccb *ccb)
1733 {
1734 	request_t *req;
1735 	struct mpt_softc *mpt;
1736 	MSG_SCSI_IO_REQUEST *mpt_req;
1737 	struct ccb_scsiio *csio = &ccb->csio;
1738 	struct ccb_hdr *ccbh = &ccb->ccb_h;
1739 	bus_dmamap_callback_t *cb;
1740 	target_id_t tgt;
1741 	int raid_passthru;
1742 
1743 	/* Get the pointer for the physical addapter */
1744 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1745 	raid_passthru = (sim == mpt->phydisk_sim);
1746 
1747 	CAMLOCK_2_MPTLOCK(mpt);
1748 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
1749 		if (mpt->outofbeer == 0) {
1750 			mpt->outofbeer = 1;
1751 			xpt_freeze_simq(mpt->sim, 1);
1752 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1753 		}
1754 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1755 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
1756 		MPTLOCK_2_CAMLOCK(mpt);
1757 		xpt_done(ccb);
1758 		return;
1759 	}
1760 #ifdef	INVARIANTS
1761 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
1762 #endif
1763 	MPTLOCK_2_CAMLOCK(mpt);
1764 
1765 	if (sizeof (bus_addr_t) > 4) {
1766 		cb = mpt_execute_req_a64;
1767 	} else {
1768 		cb = mpt_execute_req;
1769 	}
1770 
1771 	/*
1772 	 * Link the ccb and the request structure so we can find
1773 	 * the other knowing either the request or the ccb
1774 	 */
1775 	req->ccb = ccb;
1776 	ccb->ccb_h.ccb_req_ptr = req;
1777 
1778 	/* Now we build the command for the IOC */
1779 	mpt_req = req->req_vbuf;
1780 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
1781 
1782 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1783 	if (raid_passthru) {
1784 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1785 		CAMLOCK_2_MPTLOCK(mpt);
1786 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1787 			MPTLOCK_2_CAMLOCK(mpt);
1788 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1789 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
1790 			xpt_done(ccb);
1791 			return;
1792 		}
1793 		MPTLOCK_2_CAMLOCK(mpt);
1794 		mpt_req->Bus = 0;	/* we never set bus here */
1795 	} else {
1796 		tgt = ccb->ccb_h.target_id;
1797 		mpt_req->Bus = 0;	/* XXX */
1798 
1799 	}
1800 	mpt_req->SenseBufferLength =
1801 		(csio->sense_len < MPT_SENSE_SIZE) ?
1802 		 csio->sense_len : MPT_SENSE_SIZE;
1803 
1804 	/*
1805 	 * We use the message context to find the request structure when we
1806 	 * Get the command completion interrupt from the IOC.
1807 	 */
1808 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1809 
1810 	/* Which physical device to do the I/O on */
1811 	mpt_req->TargetID = tgt;
1812 
1813 	/* We assume a single level LUN type */
1814 	if (ccb->ccb_h.target_lun >= 256) {
1815 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1816 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1817 	} else {
1818 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1819 	}
1820 
1821 	/* Set the direction of the transfer */
1822 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1823 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1824 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1825 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1826 	} else {
1827 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1828 	}
1829 
1830 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1831 		switch(ccb->csio.tag_action) {
1832 		case MSG_HEAD_OF_Q_TAG:
1833 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
1834 			break;
1835 		case MSG_ACA_TASK:
1836 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
1837 			break;
1838 		case MSG_ORDERED_Q_TAG:
1839 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
1840 			break;
1841 		case MSG_SIMPLE_Q_TAG:
1842 		default:
1843 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1844 			break;
1845 		}
1846 	} else {
1847 		if (mpt->is_fc || mpt->is_sas) {
1848 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1849 		} else {
1850 			/* XXX No such thing for a target doing packetized. */
1851 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
1852 		}
1853 	}
1854 
1855 	if (mpt->is_spi) {
1856 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1857 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
1858 		}
1859 	}
1860 
1861 	/* Copy the scsi command block into place */
1862 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1863 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
1864 	} else {
1865 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
1866 	}
1867 
1868 	mpt_req->CDBLength = csio->cdb_len;
1869 	mpt_req->DataLength = csio->dxfer_len;
1870 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
1871 
1872 	/*
1873 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
1874 	 */
1875 	if (mpt->verbose == MPT_PRT_DEBUG) {
1876 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
1877 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
1878 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
1879 		if (mpt_req->Control != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
1880 			mpt_prtc(mpt, "(%s %u byte%s ",
1881 			    (mpt_req->Control == MPI_SCSIIO_CONTROL_READ)?
1882 			    "read" : "write",  csio->dxfer_len,
1883 			    (csio->dxfer_len == 1)? ")" : "s)");
1884 		}
1885 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
1886 		    ccb->ccb_h.target_lun, req, req->serno);
1887 	}
1888 
1889 	/*
1890 	 * If we have any data to send with this command map it into bus space.
1891 	 */
1892 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1893 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1894 			/*
1895 			 * We've been given a pointer to a single buffer.
1896 			 */
1897 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1898 				/*
1899 				 * Virtual address that needs to translated into
1900 				 * one or more physical address ranges.
1901 				 */
1902 				int error;
1903 				int s = splsoftvm();
1904 				error = bus_dmamap_load(mpt->buffer_dmat,
1905 				    req->dmap, csio->data_ptr, csio->dxfer_len,
1906 				    cb, req, 0);
1907 				splx(s);
1908 				if (error == EINPROGRESS) {
1909 					/*
1910 					 * So as to maintain ordering,
1911 					 * freeze the controller queue
1912 					 * until our mapping is
1913 					 * returned.
1914 					 */
1915 					xpt_freeze_simq(mpt->sim, 1);
1916 					ccbh->status |= CAM_RELEASE_SIMQ;
1917 				}
1918 			} else {
1919 				/*
1920 				 * We have been given a pointer to single
1921 				 * physical buffer.
1922 				 */
1923 				struct bus_dma_segment seg;
1924 				seg.ds_addr =
1925 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1926 				seg.ds_len = csio->dxfer_len;
1927 				(*cb)(req, &seg, 1, 0);
1928 			}
1929 		} else {
1930 			/*
1931 			 * We have been given a list of addresses.
1932 			 * This case could be easily supported but they are not
1933 			 * currently generated by the CAM subsystem so there
1934 			 * is no point in wasting the time right now.
1935 			 */
1936 			struct bus_dma_segment *segs;
1937 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1938 				(*cb)(req, NULL, 0, EFAULT);
1939 			} else {
1940 				/* Just use the segments provided */
1941 				segs = (struct bus_dma_segment *)csio->data_ptr;
1942 				(*cb)(req, segs, csio->sglist_cnt, 0);
1943 			}
1944 		}
1945 	} else {
1946 		(*cb)(req, NULL, 0, 0);
1947 	}
1948 }
1949 
1950 static int
1951 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
1952     int sleep_ok)
1953 {
1954 	int   error;
1955 	uint16_t status;
1956 	uint8_t response;
1957 
1958 	error = mpt_scsi_send_tmf(mpt,
1959 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
1960 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
1961 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1962 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
1963 	    0,	/* XXX How do I get the channel ID? */
1964 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
1965 	    lun != CAM_LUN_WILDCARD ? lun : 0,
1966 	    0, sleep_ok);
1967 
1968 	if (error != 0) {
1969 		/*
1970 		 * mpt_scsi_send_tmf hard resets on failure, so no
1971 		 * need to do so here.
1972 		 */
1973 		mpt_prt(mpt,
1974 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
1975 		return (EIO);
1976 	}
1977 
1978 	/* Wait for bus reset to be processed by the IOC. */
1979 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
1980 	    REQ_STATE_DONE, sleep_ok, 5000);
1981 
1982 	status = mpt->tmf_req->IOCStatus;
1983 	response = mpt->tmf_req->ResponseCode;
1984 	mpt->tmf_req->state = REQ_STATE_FREE;
1985 
1986 	if (error) {
1987 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
1988 		    "Resetting controller.\n");
1989 		mpt_reset(mpt, TRUE);
1990 		return (ETIMEDOUT);
1991 	}
1992 
1993 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1994 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
1995 		    "Resetting controller.\n", status);
1996 		mpt_reset(mpt, TRUE);
1997 		return (EIO);
1998 	}
1999 
2000 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2001 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2002 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2003 		    "Resetting controller.\n", response);
2004 		mpt_reset(mpt, TRUE);
2005 		return (EIO);
2006 	}
2007 	return (0);
2008 }
2009 
2010 static int
2011 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2012 {
2013 	int r = 0;
2014 	request_t *req;
2015 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2016 
2017  	req = mpt_get_request(mpt, FALSE);
2018 	if (req == NULL) {
2019 		return (ENOMEM);
2020 	}
2021 	fc = req->req_vbuf;
2022 	memset(fc, 0, sizeof(*fc));
2023 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2024 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2025 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2026 	mpt_send_cmd(mpt, req);
2027 	if (dowait) {
2028 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2029 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2030 		if (r == 0) {
2031 			mpt_free_request(mpt, req);
2032 		}
2033 	}
2034 	return (r);
2035 }
2036 
2037 static int
2038 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2039 	      MSG_EVENT_NOTIFY_REPLY *msg)
2040 {
2041 
2042 	switch(msg->Event & 0xFF) {
2043 	case MPI_EVENT_UNIT_ATTENTION:
2044 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2045 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
2046 		break;
2047 
2048 	case MPI_EVENT_IOC_BUS_RESET:
2049 		/* We generated a bus reset */
2050 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2051 		    (msg->Data[0] >> 8) & 0xff);
2052 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2053 		break;
2054 
2055 	case MPI_EVENT_EXT_BUS_RESET:
2056 		/* Someone else generated a bus reset */
2057 		mpt_prt(mpt, "External Bus Reset Detected\n");
2058 		/*
2059 		 * These replies don't return EventData like the MPI
2060 		 * spec says they do
2061 		 */
2062 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2063 		break;
2064 
2065 	case MPI_EVENT_RESCAN:
2066 		/*
2067 		 * In general this means a device has been added to the loop.
2068 		 */
2069 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
2070 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
2071 		break;
2072 
2073 	case MPI_EVENT_LINK_STATUS_CHANGE:
2074 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2075 		    (msg->Data[1] >> 8) & 0xff,
2076 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
2077 		break;
2078 
2079 	case MPI_EVENT_LOOP_STATE_CHANGE:
2080 		switch ((msg->Data[0] >> 16) & 0xff) {
2081 		case 0x01:
2082 			mpt_prt(mpt,
2083 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2084 			    "(Loop Initialization)\n",
2085 			    (msg->Data[1] >> 8) & 0xff,
2086 			    (msg->Data[0] >> 8) & 0xff,
2087 			    (msg->Data[0]     ) & 0xff);
2088 			switch ((msg->Data[0] >> 8) & 0xff) {
2089 			case 0xF7:
2090 				if ((msg->Data[0] & 0xff) == 0xF7) {
2091 					mpt_prt(mpt, "Device needs AL_PA\n");
2092 				} else {
2093 					mpt_prt(mpt, "Device %02x doesn't like "
2094 					    "FC performance\n",
2095 					    msg->Data[0] & 0xFF);
2096 				}
2097 				break;
2098 			case 0xF8:
2099 				if ((msg->Data[0] & 0xff) == 0xF7) {
2100 					mpt_prt(mpt, "Device had loop failure "
2101 					    "at its receiver prior to acquiring"
2102 					    " AL_PA\n");
2103 				} else {
2104 					mpt_prt(mpt, "Device %02x detected loop"
2105 					    " failure at its receiver\n",
2106 					    msg->Data[0] & 0xFF);
2107 				}
2108 				break;
2109 			default:
2110 				mpt_prt(mpt, "Device %02x requests that device "
2111 				    "%02x reset itself\n",
2112 				    msg->Data[0] & 0xFF,
2113 				    (msg->Data[0] >> 8) & 0xFF);
2114 				break;
2115 			}
2116 			break;
2117 		case 0x02:
2118 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2119 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2120 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2121 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2122 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2123 			break;
2124 		case 0x03:
2125 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2126 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2127 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2128 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
2129 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
2130 			break;
2131 		default:
2132 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2133 			    "FC event (%02x %02x %02x)\n",
2134 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2135 			    (msg->Data[0] >> 16) & 0xff, /* Event */
2136 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2137 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2138 		}
2139 		break;
2140 
2141 	case MPI_EVENT_LOGOUT:
2142 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2143 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
2144 		break;
2145 	case MPI_EVENT_EVENT_CHANGE:
2146 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2147 		    "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
2148 		break;
2149 	case MPI_EVENT_QUEUE_FULL:
2150 	{
2151 		struct cam_sim *sim;
2152 		struct cam_path *tmppath;
2153 		struct ccb_relsim crs;
2154 		PTR_EVENT_DATA_QUEUE_FULL pqf =
2155 		    (PTR_EVENT_DATA_QUEUE_FULL) msg->Data;
2156 		lun_id_t lun_id;
2157 
2158 		mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2159 		    "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2160 		if (mpt->phydisk_sim) {
2161 			sim = mpt->phydisk_sim;
2162 		} else {
2163 			sim = mpt->sim;
2164 		}
2165 		MPTLOCK_2_CAMLOCK(mpt);
2166 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2167 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2168 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2169 				mpt_prt(mpt, "unable to create a path to send "
2170 				    "XPT_REL_SIMQ");
2171 				CAMLOCK_2_MPTLOCK(mpt);
2172 				break;
2173 			}
2174 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2175 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2176 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2177 			crs.openings = pqf->CurrentDepth - 1;
2178 			xpt_action((union ccb *)&crs);
2179 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2180 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2181 			}
2182 			xpt_free_path(tmppath);
2183 		}
2184 		CAMLOCK_2_MPTLOCK(mpt);
2185 		break;
2186 	}
2187 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2188 	{
2189 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2190 		    "mpt_cam_event: SAS_DEVICE_STATUS_CHANGE\n");
2191 		break;
2192 	}
2193 	case MPI_EVENT_SAS_SES:
2194 	{
2195 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2196 		    "mpt_cam_event: MPI_EVENT_SAS_SES\n");
2197 		break;
2198 	}
2199 	default:
2200 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2201 		    msg->Event & 0xFF);
2202 		return (0);
2203 	}
2204 	return (1);
2205 }
2206 
2207 /*
2208  * Reply path for all SCSI I/O requests, called from our
2209  * interrupt handler by extracting our handler index from
2210  * the MsgContext field of the reply from the IOC.
2211  *
2212  * This routine is optimized for the common case of a
2213  * completion without error.  All exception handling is
2214  * offloaded to non-inlined helper routines to minimize
2215  * cache footprint.
2216  */
2217 static int
2218 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2219     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2220 {
2221 	MSG_SCSI_IO_REQUEST *scsi_req;
2222 	union ccb *ccb;
2223 	target_id_t tgt;
2224 
2225 	if (req->state == REQ_STATE_FREE) {
2226 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2227 		return (TRUE);
2228 	}
2229 
2230 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2231 	ccb = req->ccb;
2232 	if (ccb == NULL) {
2233 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2234 		    req, req->serno);
2235 		return (TRUE);
2236 	}
2237 
2238 	tgt = scsi_req->TargetID;
2239 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2240 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2241 
2242 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2243 		bus_dmasync_op_t op;
2244 
2245 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2246 			op = BUS_DMASYNC_POSTREAD;
2247 		else
2248 			op = BUS_DMASYNC_POSTWRITE;
2249 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2250 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2251 	}
2252 
2253 	if (reply_frame == NULL) {
2254 		/*
2255 		 * Context only reply, completion without error status.
2256 		 */
2257 		ccb->csio.resid = 0;
2258 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2259 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2260 	} else {
2261 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2262 	}
2263 
2264 	if (mpt->outofbeer) {
2265 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2266 		mpt->outofbeer = 0;
2267 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2268 	}
2269 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2270 		struct scsi_inquiry_data *iq =
2271 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2272 		if (scsi_req->Function ==
2273 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2274 			/*
2275 			 * Fake out the device type so that only the
2276 			 * pass-thru device will attach.
2277 			 */
2278 			iq->device &= ~0x1F;
2279 			iq->device |= T_NODEVICE;
2280 		}
2281 	}
2282 	if (mpt->verbose == MPT_PRT_DEBUG) {
2283 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2284 		    req, req->serno);
2285 	}
2286 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2287 	MPTLOCK_2_CAMLOCK(mpt);
2288 	xpt_done(ccb);
2289 	CAMLOCK_2_MPTLOCK(mpt);
2290 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2291 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2292 	} else {
2293 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2294 		    req, req->serno);
2295 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2296 	}
2297 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2298 	    ("CCB req needed wakeup"));
2299 #ifdef	INVARIANTS
2300 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2301 #endif
2302 	mpt_free_request(mpt, req);
2303 	return (TRUE);
2304 }
2305 
2306 static int
2307 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2308     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2309 {
2310 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2311 
2312 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2313 #ifdef	INVARIANTS
2314 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2315 #endif
2316 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2317 	/* Record IOC Status and Response Code of TMF for any waiters. */
2318 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2319 	req->ResponseCode = tmf_reply->ResponseCode;
2320 
2321 	mpt_lprt(mpt, MPT_PRT_INFO, "TMF complete: req %p:%u status 0x%x\n",
2322 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2323 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2324 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2325 		req->state |= REQ_STATE_DONE;
2326 		wakeup(req);
2327 	} else {
2328 		mpt->tmf_req->state = REQ_STATE_FREE;
2329 	}
2330 	return (TRUE);
2331 }
2332 
2333 /*
2334  * XXX: Move to definitions file
2335  */
2336 #define	ELS	0x22
2337 #define	FC4LS	0x32
2338 #define	ABTS	0x81
2339 #define	BA_ACC	0x84
2340 
2341 #define	LS_RJT	0x01
2342 #define	LS_ACC	0x02
2343 #define	PLOGI	0x03
2344 #define	LOGO	0x05
2345 #define SRR	0x14
2346 #define PRLI	0x20
2347 #define PRLO	0x21
2348 #define ADISC	0x52
2349 #define RSCN	0x61
2350 
2351 static void
2352 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2353     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2354 {
2355 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2356 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2357 
2358 	/*
2359 	 * We are going to reuse the ELS request to send this response back.
2360 	 */
2361 	rsp = &tmp;
2362 	memset(rsp, 0, sizeof(*rsp));
2363 
2364 #ifdef	USE_IMMEDIATE_LINK_DATA
2365 	/*
2366 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2367 	 */
2368 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2369 #endif
2370 	rsp->RspLength = length;
2371 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2372 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2373 
2374 	/*
2375 	 * Copy over information from the original reply frame to
2376 	 * it's correct place in the response.
2377 	 */
2378 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2379 
2380 	/*
2381 	 * And now copy back the temporary area to the original frame.
2382 	 */
2383 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2384 	rsp = req->req_vbuf;
2385 
2386 #ifdef	USE_IMMEDIATE_LINK_DATA
2387 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2388 #else
2389 {
2390 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2391 	bus_addr_t paddr = req->req_pbuf;
2392 	paddr += MPT_RQSL(mpt);
2393 
2394 	se->FlagsLength =
2395 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2396 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2397 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2398 		MPI_SGE_FLAGS_END_OF_LIST	|
2399 		MPI_SGE_FLAGS_END_OF_BUFFER;
2400 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2401 	se->FlagsLength |= (length);
2402 	se->Address = (uint32_t) paddr;
2403 }
2404 #endif
2405 
2406 	/*
2407 	 * Send it on...
2408 	 */
2409 	mpt_send_cmd(mpt, req);
2410 }
2411 
2412 static int
2413 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2414     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2415 {
2416 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2417 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2418 	U8 rctl;
2419 	U8 type;
2420 	U8 cmd;
2421 	U16 status = le16toh(reply_frame->IOCStatus);
2422 	U32 *elsbuf;
2423 	int ioindex;
2424 	int do_refresh = TRUE;
2425 
2426 #ifdef	INVARIANTS
2427 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2428 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2429 	    req, req->serno, rp->Function));
2430 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2431 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2432 	} else {
2433 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2434 	}
2435 #endif
2436 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2437 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2438 	    req, req->serno, reply_frame, reply_frame->Function);
2439 
2440 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2441 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2442 		    status, reply_frame->Function);
2443 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2444 			/*
2445 			 * XXX: to get around shutdown issue
2446 			 */
2447 			mpt->disabled = 1;
2448 			return (TRUE);
2449 		}
2450 		return (TRUE);
2451 	}
2452 
2453 	/*
2454 	 * If the function of a link service response, we recycle the
2455 	 * response to be a refresh for a new link service request.
2456 	 *
2457 	 * The request pointer is bogus in this case and we have to fetch
2458 	 * it based upon the TransactionContext.
2459 	 */
2460 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2461 		/* Freddie Uncle Charlie Katie */
2462 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2463 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2464 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2465 				break;
2466 			}
2467 
2468 		KASSERT(ioindex < mpt->els_cmds_allocated,
2469 		    ("can't find my mommie!"));
2470 
2471 		/* remove from active list as we're going to re-post it */
2472 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2473 		req->state &= ~REQ_STATE_QUEUED;
2474 		req->state |= REQ_STATE_DONE;
2475 		mpt_fc_post_els(mpt, req, ioindex);
2476 		return (TRUE);
2477 	}
2478 
2479 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2480 		/* remove from active list as we're done */
2481 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2482 		req->state &= ~REQ_STATE_QUEUED;
2483 		req->state |= REQ_STATE_DONE;
2484 		if (req->state & REQ_STATE_TIMEDOUT) {
2485 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2486 			    "Sync Primitive Send Completed After Timeout\n");
2487 			mpt_free_request(mpt, req);
2488 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2489 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2490 			    "Async Primitive Send Complete\n");
2491 			mpt_free_request(mpt, req);
2492 		} else {
2493 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2494 			    "Sync Primitive Send Complete- Waking Waiter\n");
2495 			wakeup(req);
2496 		}
2497 		return (TRUE);
2498 	}
2499 
2500 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2501 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2502 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2503 		    rp->MsgLength, rp->MsgFlags);
2504 		return (TRUE);
2505 	}
2506 
2507 	if (rp->MsgLength <= 5) {
2508 		/*
2509 		 * This is just a ack of an original ELS buffer post
2510 		 */
2511 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2512 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2513 		return (TRUE);
2514 	}
2515 
2516 
2517 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2518 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2519 
2520 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2521 	cmd = be32toh(elsbuf[0]) >> 24;
2522 
2523 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2524 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2525 		return (TRUE);
2526 	}
2527 
2528 	ioindex = le32toh(rp->TransactionContext);
2529 	req = mpt->els_cmd_ptrs[ioindex];
2530 
2531 	if (rctl == ELS && type == 1) {
2532 		switch (cmd) {
2533 		case PRLI:
2534 			/*
2535 			 * Send back a PRLI ACC
2536 			 */
2537 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2538 			    le32toh(rp->Wwn.PortNameHigh),
2539 			    le32toh(rp->Wwn.PortNameLow));
2540 			elsbuf[0] = htobe32(0x02100014);
2541 			elsbuf[1] |= htobe32(0x00000100);
2542 			elsbuf[4] = htobe32(0x00000002);
2543 			if (mpt->role & MPT_ROLE_TARGET)
2544 				elsbuf[4] |= htobe32(0x00000010);
2545 			if (mpt->role & MPT_ROLE_INITIATOR)
2546 				elsbuf[4] |= htobe32(0x00000020);
2547 			/* remove from active list as we're done */
2548 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2549 			req->state &= ~REQ_STATE_QUEUED;
2550 			req->state |= REQ_STATE_DONE;
2551 			mpt_fc_els_send_response(mpt, req, rp, 20);
2552 			do_refresh = FALSE;
2553 			break;
2554 		case PRLO:
2555 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2556 			elsbuf[0] = htobe32(0x02100014);
2557 			elsbuf[1] = htobe32(0x08000100);
2558 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2559 			    le32toh(rp->Wwn.PortNameHigh),
2560 			    le32toh(rp->Wwn.PortNameLow));
2561 			/* remove from active list as we're done */
2562 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2563 			req->state &= ~REQ_STATE_QUEUED;
2564 			req->state |= REQ_STATE_DONE;
2565 			mpt_fc_els_send_response(mpt, req, rp, 20);
2566 			do_refresh = FALSE;
2567 			break;
2568 		default:
2569 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2570 			break;
2571 		}
2572 	} else if (rctl == ABTS && type == 0) {
2573 		uint16_t rx_id = le16toh(rp->Rxid);
2574 		uint16_t ox_id = le16toh(rp->Oxid);
2575 		request_t *tgt_req = NULL;
2576 
2577 		mpt_prt(mpt,
2578 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2579 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2580 		    le32toh(rp->Wwn.PortNameLow));
2581 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2582 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2583 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2584 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2585 		} else {
2586 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2587 		}
2588 		if (tgt_req) {
2589 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2590 			uint8_t *vbuf;
2591 			union ccb *ccb = tgt->ccb;
2592 			uint32_t ct_id;
2593 
2594 			vbuf = tgt_req->req_vbuf;
2595 			vbuf += MPT_RQSL(mpt);
2596 
2597 			/*
2598 			 * Check to make sure we have the correct command
2599 			 * The reply descriptor in the target state should
2600 			 * should contain an IoIndex that should match the
2601 			 * RX_ID.
2602 			 *
2603 			 * It'd be nice to have OX_ID to crosscheck with
2604 			 * as well.
2605 			 */
2606 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2607 
2608 			if (ct_id != rx_id) {
2609 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2610 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2611 				    rx_id, ct_id);
2612 				goto skip;
2613 			}
2614 
2615 			ccb = tgt->ccb;
2616 			if (ccb) {
2617 				mpt_prt(mpt,
2618 				    "CCB (%p): lun %u flags %x status %x\n",
2619 				    ccb, ccb->ccb_h.target_lun,
2620 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2621 			}
2622 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2623 			    "%x nxfers %x\n", tgt->state,
2624 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2625 			    tgt->nxfers);
2626   skip:
2627 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2628 				mpt_prt(mpt, "unable to start TargetAbort\n");
2629 			}
2630 		} else {
2631 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2632 		}
2633 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2634 		elsbuf[0] = htobe32(0);
2635 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2636 		elsbuf[2] = htobe32(0x000ffff);
2637 		/*
2638 		 * Dork with the reply frame so that the reponse to it
2639 		 * will be correct.
2640 		 */
2641 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2642 		/* remove from active list as we're done */
2643 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2644 		req->state &= ~REQ_STATE_QUEUED;
2645 		req->state |= REQ_STATE_DONE;
2646 		mpt_fc_els_send_response(mpt, req, rp, 12);
2647 		do_refresh = FALSE;
2648 	} else {
2649 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2650 	}
2651 	if (do_refresh == TRUE) {
2652 		/* remove from active list as we're done */
2653 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2654 		req->state &= ~REQ_STATE_QUEUED;
2655 		req->state |= REQ_STATE_DONE;
2656 		mpt_fc_post_els(mpt, req, ioindex);
2657 	}
2658 	return (TRUE);
2659 }
2660 
2661 /*
2662  * Clean up all SCSI Initiator personality state in response
2663  * to a controller reset.
2664  */
2665 static void
2666 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2667 {
2668 	/*
2669 	 * The pending list is already run down by
2670 	 * the generic handler.  Perform the same
2671 	 * operation on the timed out request list.
2672 	 */
2673 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
2674 				   MPI_IOCSTATUS_INVALID_STATE);
2675 
2676 	/*
2677 	 * XXX: We need to repost ELS and Target Command Buffers?
2678 	 */
2679 
2680 	/*
2681 	 * Inform the XPT that a bus reset has occurred.
2682 	 */
2683 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
2684 }
2685 
2686 /*
2687  * Parse additional completion information in the reply
2688  * frame for SCSI I/O requests.
2689  */
2690 static int
2691 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
2692 			     MSG_DEFAULT_REPLY *reply_frame)
2693 {
2694 	union ccb *ccb;
2695 	MSG_SCSI_IO_REPLY *scsi_io_reply;
2696 	u_int ioc_status;
2697 	u_int sstate;
2698 	u_int loginfo;
2699 
2700 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
2701 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
2702 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
2703 		("MPT SCSI I/O Handler called with incorrect reply type"));
2704 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
2705 		("MPT SCSI I/O Handler called with continuation reply"));
2706 
2707 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
2708 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
2709 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
2710 	ioc_status &= MPI_IOCSTATUS_MASK;
2711 	sstate = scsi_io_reply->SCSIState;
2712 
2713 	ccb = req->ccb;
2714 	ccb->csio.resid =
2715 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
2716 
2717 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
2718 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
2719 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2720 		ccb->csio.sense_resid =
2721 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
2722 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
2723 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
2724 	}
2725 
2726 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
2727 		/*
2728 		 * Tag messages rejected, but non-tagged retry
2729 		 * was successful.
2730 XXXX
2731 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
2732 		 */
2733 	}
2734 
2735 	switch(ioc_status) {
2736 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2737 		/*
2738 		 * XXX
2739 		 * Linux driver indicates that a zero
2740 		 * transfer length with this error code
2741 		 * indicates a CRC error.
2742 		 *
2743 		 * No need to swap the bytes for checking
2744 		 * against zero.
2745 		 */
2746 		if (scsi_io_reply->TransferCount == 0) {
2747 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2748 			break;
2749 		}
2750 		/* FALLTHROUGH */
2751 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
2752 	case MPI_IOCSTATUS_SUCCESS:
2753 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
2754 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
2755 			/*
2756 			 * Status was never returned for this transaction.
2757 			 */
2758 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
2759 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
2760 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
2761 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
2762 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
2763 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
2764 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
2765 
2766 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
2767 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2768 		} else
2769 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2770 		break;
2771 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
2772 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
2773 		break;
2774 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
2775 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2776 		break;
2777 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2778 		/*
2779 		 * Since selection timeouts and "device really not
2780 		 * there" are grouped into this error code, report
2781 		 * selection timeout.  Selection timeouts are
2782 		 * typically retried before giving up on the device
2783 		 * whereas "device not there" errors are considered
2784 		 * unretryable.
2785 		 */
2786 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2787 		break;
2788 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2789 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
2790 		break;
2791 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
2792 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
2793 		break;
2794 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
2795 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
2796 		break;
2797 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2798 		ccb->ccb_h.status = CAM_UA_TERMIO;
2799 		break;
2800 	case MPI_IOCSTATUS_INVALID_STATE:
2801 		/*
2802 		 * The IOC has been reset.  Emulate a bus reset.
2803 		 */
2804 		/* FALLTHROUGH */
2805 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
2806 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2807 		break;
2808 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
2809 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
2810 		/*
2811 		 * Don't clobber any timeout status that has
2812 		 * already been set for this transaction.  We
2813 		 * want the SCSI layer to be able to differentiate
2814 		 * between the command we aborted due to timeout
2815 		 * and any innocent bystanders.
2816 		 */
2817 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
2818 			break;
2819 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
2820 		break;
2821 
2822 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2823 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
2824 		break;
2825 	case MPI_IOCSTATUS_BUSY:
2826 		mpt_set_ccb_status(ccb, CAM_BUSY);
2827 		break;
2828 	case MPI_IOCSTATUS_INVALID_FUNCTION:
2829 	case MPI_IOCSTATUS_INVALID_SGL:
2830 	case MPI_IOCSTATUS_INTERNAL_ERROR:
2831 	case MPI_IOCSTATUS_INVALID_FIELD:
2832 	default:
2833 		/* XXX
2834 		 * Some of the above may need to kick
2835 		 * of a recovery action!!!!
2836 		 */
2837 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2838 		break;
2839 	}
2840 
2841 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2842 		mpt_freeze_ccb(ccb);
2843 	}
2844 
2845 	return (TRUE);
2846 }
2847 
2848 static void
2849 mpt_action(struct cam_sim *sim, union ccb *ccb)
2850 {
2851 	struct mpt_softc *mpt;
2852 	struct ccb_trans_settings *cts;
2853 	target_id_t tgt;
2854 	lun_id_t lun;
2855 	int raid_passthru;
2856 
2857 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2858 
2859 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
2860 	KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
2861 	raid_passthru = (sim == mpt->phydisk_sim);
2862 
2863 	tgt = ccb->ccb_h.target_id;
2864 	lun = ccb->ccb_h.target_lun;
2865 	if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ &&
2866 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
2867 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
2868 		CAMLOCK_2_MPTLOCK(mpt);
2869 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2870 			MPTLOCK_2_CAMLOCK(mpt);
2871 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2872 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2873 			xpt_done(ccb);
2874 			return;
2875 		}
2876 		MPTLOCK_2_CAMLOCK(mpt);
2877 	}
2878 	ccb->ccb_h.ccb_mpt_ptr = mpt;
2879 
2880 	switch (ccb->ccb_h.func_code) {
2881 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2882 		/*
2883 		 * Do a couple of preliminary checks...
2884 		 */
2885 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2886 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2887 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2888 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2889 				break;
2890 			}
2891 		}
2892 		/* Max supported CDB length is 16 bytes */
2893 		/* XXX Unless we implement the new 32byte message type */
2894 		if (ccb->csio.cdb_len >
2895 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
2896 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2897 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2898 			break;
2899 		}
2900 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2901 		mpt_start(sim, ccb);
2902 		return;
2903 
2904 	case XPT_RESET_BUS:
2905 	case XPT_RESET_DEV:
2906 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2907 			ccb->ccb_h.func_code == XPT_RESET_BUS ?
2908 			"XPT_RESET_BUS\n" : "XPT_RESET_DEV\n");
2909 
2910 		CAMLOCK_2_MPTLOCK(mpt);
2911 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
2912 		MPTLOCK_2_CAMLOCK(mpt);
2913 
2914 		/*
2915 		 * mpt_bus_reset is always successful in that it
2916 		 * will fall back to a hard reset should a bus
2917 		 * reset attempt fail.
2918 		 */
2919 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2920 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2921 		break;
2922 
2923 	case XPT_ABORT:
2924 	{
2925 		union ccb *accb = ccb->cab.abort_ccb;
2926 		CAMLOCK_2_MPTLOCK(mpt);
2927 		switch (accb->ccb_h.func_code) {
2928 		case XPT_ACCEPT_TARGET_IO:
2929 		case XPT_IMMED_NOTIFY:
2930 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
2931 			break;
2932 		case XPT_CONT_TARGET_IO:
2933 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
2934 			ccb->ccb_h.status = CAM_UA_ABORT;
2935 			break;
2936 		case XPT_SCSI_IO:
2937 			ccb->ccb_h.status = CAM_UA_ABORT;
2938 			break;
2939 		default:
2940 			ccb->ccb_h.status = CAM_REQ_INVALID;
2941 			break;
2942 		}
2943 		MPTLOCK_2_CAMLOCK(mpt);
2944 		break;
2945 	}
2946 
2947 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
2948 #define	DP_DISC_ENABLE	0x1
2949 #define	DP_DISC_DISABL	0x2
2950 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
2951 
2952 #define	DP_TQING_ENABLE	0x4
2953 #define	DP_TQING_DISABL	0x8
2954 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
2955 
2956 #define	DP_WIDE		0x10
2957 #define	DP_NARROW	0x20
2958 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
2959 
2960 #define	DP_SYNC		0x40
2961 
2962 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2963 	{
2964 		struct ccb_trans_settings_scsi *scsi;
2965 		struct ccb_trans_settings_spi *spi;
2966 		uint8_t dval;
2967 		u_int period;
2968 		u_int offset;
2969 		int i, j;
2970 
2971 		cts = &ccb->cts;
2972 
2973 		if (mpt->is_fc || mpt->is_sas) {
2974 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2975 			break;
2976 		}
2977 
2978 		/*
2979 		 * Skip attempting settings on RAID volume disks.
2980 		 * Other devices on the bus get the normal treatment.
2981 		 */
2982 		if (mpt->phydisk_sim && raid_passthru == 0 &&
2983 		    mpt_is_raid_volume(mpt, tgt) != 0) {
2984 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
2985 			    "skipping transfer settings for RAID volumes\n");
2986 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2987 			break;
2988 		}
2989 
2990 		i = mpt->mpt_port_page2.PortSettings &
2991 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
2992 		j = mpt->mpt_port_page2.PortFlags &
2993 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
2994 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
2995 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
2996 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
2997 			    "honoring BIOS transfer negotiations\n");
2998 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2999 			break;
3000 		}
3001 
3002 		dval = 0;
3003 		period = 0;
3004 		offset = 0;
3005 
3006 		scsi = &cts->proto_specific.scsi;
3007 		spi = &cts->xport_specific.spi;
3008 
3009 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3010 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3011 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3012 		}
3013 
3014 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3015 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3016 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3017 		}
3018 
3019 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3020 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3021 			    DP_WIDE : DP_NARROW;
3022 		}
3023 
3024 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3025 			dval |= DP_SYNC;
3026 			offset = spi->sync_offset;
3027 		} else {
3028 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3029 			    &mpt->mpt_dev_page1[tgt];
3030 			offset = ptr->RequestedParameters;
3031 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3032 	    		offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3033 		}
3034 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3035 			dval |= DP_SYNC;
3036 			period = spi->sync_period;
3037 		} else {
3038 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3039 			    &mpt->mpt_dev_page1[tgt];
3040 			period = ptr->RequestedParameters;
3041 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3042 	    		period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3043 		}
3044 		CAMLOCK_2_MPTLOCK(mpt);
3045 		if (dval & DP_DISC_ENABLE) {
3046 			mpt->mpt_disc_enable |= (1 << tgt);
3047 		} else if (dval & DP_DISC_DISABL) {
3048 			mpt->mpt_disc_enable &= ~(1 << tgt);
3049 		}
3050 		if (dval & DP_TQING_ENABLE) {
3051 			mpt->mpt_tag_enable |= (1 << tgt);
3052 		} else if (dval & DP_TQING_DISABL) {
3053 			mpt->mpt_tag_enable &= ~(1 << tgt);
3054 		}
3055 		if (dval & DP_WIDTH) {
3056 			mpt_setwidth(mpt, tgt, 1);
3057 		}
3058 		if (dval & DP_SYNC) {
3059 			mpt_setsync(mpt, tgt, period, offset);
3060 		}
3061 		if (dval == 0) {
3062 			MPTLOCK_2_CAMLOCK(mpt);
3063 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3064 			break;
3065 		}
3066 
3067 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3068 		    "Set Settings[%d]: 0x%x period 0x%x offset %d\n", tgt,
3069 		    dval, period , offset);
3070 		if (mpt_update_spi_config(mpt, tgt)) {
3071 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3072 		} else {
3073 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3074 		}
3075 		MPTLOCK_2_CAMLOCK(mpt);
3076 		break;
3077 	}
3078 	case XPT_GET_TRAN_SETTINGS:
3079 		cts = &ccb->cts;
3080 		if (mpt->is_fc) {
3081 			struct ccb_trans_settings_fc *fc =
3082 			    &cts->xport_specific.fc;
3083 
3084 			cts->protocol = PROTO_SCSI;
3085 			cts->protocol_version = SCSI_REV_SPC;
3086 			cts->transport = XPORT_FC;
3087 			cts->transport_version = 0;
3088 
3089 			fc->valid = CTS_FC_VALID_SPEED;
3090 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
3091 			/* XXX: need a port database for each target */
3092 		} else if (mpt->is_sas) {
3093 			struct ccb_trans_settings_sas *sas =
3094 			    &cts->xport_specific.sas;
3095 
3096 			cts->protocol = PROTO_SCSI;
3097 			cts->protocol_version = SCSI_REV_SPC2;
3098 			cts->transport = XPORT_SAS;
3099 			cts->transport_version = 0;
3100 
3101 			sas->valid = CTS_SAS_VALID_SPEED;
3102 			sas->bitrate = 300000;	/* XXX: Default 3Gbps */
3103 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3104 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3105 			break;
3106 		}
3107 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3108 		break;
3109 
3110 	case XPT_CALC_GEOMETRY:
3111 	{
3112 		struct ccb_calc_geometry *ccg;
3113 
3114 		ccg = &ccb->ccg;
3115 		if (ccg->block_size == 0) {
3116 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3117 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3118 			break;
3119 		}
3120 		mpt_calc_geometry(ccg, /*extended*/1);
3121 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3122 		break;
3123 	}
3124 	case XPT_PATH_INQ:		/* Path routing inquiry */
3125 	{
3126 		struct ccb_pathinq *cpi = &ccb->cpi;
3127 
3128 		cpi->version_num = 1;
3129 		cpi->target_sprt = 0;
3130 		cpi->hba_eng_cnt = 0;
3131 		cpi->max_target = mpt->mpt_max_devices - 1;
3132 		/*
3133 		 * XXX: FC cards report MAX_DEVICES of 512- but we
3134 		 * XXX: seem to hang when going higher than 255.
3135 		 */
3136 		if (cpi->max_target > 255)
3137 			cpi->max_target = 255;
3138 		/*
3139 		 * XXX: VMware ESX reports > 16 devices and then dies
3140 		 * XXX: when we probe.
3141 		 */
3142 		if (mpt->is_spi && cpi->max_target > 15)
3143 			cpi->max_target = 15;
3144 		cpi->max_lun = 7;
3145 		cpi->initiator_id = mpt->mpt_ini_id;
3146 
3147 		cpi->bus_id = cam_sim_bus(sim);
3148 		/*
3149 		 * Actual speed for each device varies.
3150 		 *
3151 		 * The base speed is the speed of the underlying connection.
3152 		 * This is strictly determined for SPI (async, narrow). If
3153 		 * link is up for Fibre Channel, then speed can be gotten
3154 		 * from that.
3155 		 */
3156 		if (mpt->is_fc) {
3157 			cpi->hba_misc = PIM_NOBUSRESET;
3158 			cpi->base_transfer_speed =
3159 			    mpt->mpt_fcport_speed * 100000;
3160 			cpi->hba_inquiry = PI_TAG_ABLE;
3161                         cpi->transport = XPORT_FC;
3162                         cpi->transport_version = 0;
3163 		} else if (mpt->is_sas) {
3164 			cpi->hba_misc = PIM_NOBUSRESET;
3165 			cpi->base_transfer_speed = 300000;
3166 			cpi->hba_inquiry = PI_TAG_ABLE;
3167                         cpi->transport = XPORT_SAS;
3168                         cpi->transport_version = 0;
3169 		} else {
3170 			cpi->hba_misc = PIM_SEQSCAN;
3171 			cpi->base_transfer_speed = 3300;
3172 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3173                         cpi->transport = XPORT_SPI;
3174                         cpi->transport_version = 2;
3175 		}
3176 
3177                 cpi->protocol = PROTO_SCSI;
3178                 cpi->protocol_version = SCSI_REV_2;
3179 		/*
3180 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3181 		 * wide, restrict it to one lun and have it *not* be a bus
3182 		 * that can have a SCSI bus reset.
3183 		 */
3184 		if (raid_passthru) {
3185 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3186 			cpi->initiator_id = cpi->max_target + 1;
3187 			cpi->max_lun = 0;
3188 			cpi->hba_misc |= PIM_NOBUSRESET;
3189 		}
3190 
3191 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3192 			cpi->hba_misc |= PIM_NOINITIATOR;
3193 		}
3194 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3195 			cpi->target_sprt =
3196 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3197 		} else {
3198 			cpi->target_sprt = 0;
3199 		}
3200 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3201 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3202 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3203 		cpi->unit_number = cam_sim_unit(sim);
3204 		cpi->ccb_h.status = CAM_REQ_CMP;
3205 		break;
3206 	}
3207 	case XPT_EN_LUN:		/* Enable LUN as a target */
3208 	{
3209 		int result;
3210 
3211 		CAMLOCK_2_MPTLOCK(mpt);
3212 		if (ccb->cel.enable)
3213 			result = mpt_enable_lun(mpt,
3214 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3215 		else
3216 			result = mpt_disable_lun(mpt,
3217 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3218 		MPTLOCK_2_CAMLOCK(mpt);
3219 		if (result == 0) {
3220 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3221 		} else {
3222 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3223 		}
3224 		break;
3225 	}
3226 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3227 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3228 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3229 	{
3230 		tgt_resource_t *trtp;
3231 		lun_id_t lun = ccb->ccb_h.target_lun;
3232 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3233 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3234 		ccb->ccb_h.flags = 0;
3235 
3236 		if (lun == CAM_LUN_WILDCARD) {
3237 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3238 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3239 				break;
3240 			}
3241 			trtp = &mpt->trt_wildcard;
3242 		} else if (lun >= MPT_MAX_LUNS) {
3243 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3244 			break;
3245 		} else {
3246 			trtp = &mpt->trt[lun];
3247 		}
3248 		CAMLOCK_2_MPTLOCK(mpt);
3249 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3250 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3251 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3252 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3253 			    sim_links.stqe);
3254 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3255 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3256 			    "Put FREE INOT lun %d\n", lun);
3257 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3258 			    sim_links.stqe);
3259 		} else {
3260 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3261 		}
3262 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3263 		MPTLOCK_2_CAMLOCK(mpt);
3264 		return;
3265 	}
3266 	case XPT_CONT_TARGET_IO:
3267 		CAMLOCK_2_MPTLOCK(mpt);
3268 		mpt_target_start_io(mpt, ccb);
3269 		MPTLOCK_2_CAMLOCK(mpt);
3270 		return;
3271 
3272 	default:
3273 		ccb->ccb_h.status = CAM_REQ_INVALID;
3274 		break;
3275 	}
3276 	xpt_done(ccb);
3277 }
3278 
3279 static int
3280 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3281 {
3282 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3283 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3284 	target_id_t tgt;
3285 	uint8_t dval, pval, oval;
3286 	int rv;
3287 
3288 	if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3289 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3290 			return (-1);
3291 		}
3292 	} else {
3293 		tgt = cts->ccb_h.target_id;
3294 	}
3295 
3296 	/*
3297 	 * XXX: We aren't looking Port Page 2 BIOS settings here.
3298 	 * XXX: For goal settings, we pick the max from port page 0
3299 	 *
3300 	 * For current settings we read the current settings out from
3301 	 * device page 0 for that target.
3302 	 */
3303 	if (IS_CURRENT_SETTINGS(cts)) {
3304 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3305 		dval = 0;
3306 
3307 		CAMLOCK_2_MPTLOCK(mpt);
3308 		tmp = mpt->mpt_dev_page0[tgt];
3309 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3310 		    sizeof(tmp), FALSE, 5000);
3311 		if (rv) {
3312 			MPTLOCK_2_CAMLOCK(mpt);
3313 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3314 			return (rv);
3315 		}
3316 		MPTLOCK_2_CAMLOCK(mpt);
3317 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3318 		    DP_WIDE : DP_NARROW;
3319 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3320 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3321 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3322 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3323 		oval = (tmp.NegotiatedParameters >> 16) & 0xff;
3324 		pval = (tmp.NegotiatedParameters >>  8) & 0xff;
3325 		mpt->mpt_dev_page0[tgt] = tmp;
3326 	} else {
3327 		/*
3328 		 * XXX: Just make theoretical maximum.
3329 		 */
3330 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE;
3331 		oval = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
3332 		pval = (mpt->mpt_port_page0.Capabilities >>  8) & 0xff;
3333 	}
3334 	cts->protocol = PROTO_SCSI;
3335 	cts->protocol_version = SCSI_REV_2;
3336 	cts->transport = XPORT_SPI;
3337 	cts->transport_version = 2;
3338 
3339 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
3340 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
3341 	if (dval & DP_DISC_ENABLE) {
3342 		spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3343 	}
3344 	if (dval & DP_TQING_ENABLE) {
3345 		scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3346 	}
3347 	if (oval && pval) {
3348 		spi->sync_offset = oval;
3349 		spi->sync_period = pval;
3350 		spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3351 		spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3352 	}
3353 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3354 	if (dval & DP_WIDE) {
3355 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3356 	} else {
3357 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3358 	}
3359 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3360 		scsi->valid = CTS_SCSI_VALID_TQ;
3361 		spi->valid |= CTS_SPI_VALID_DISC;
3362 	} else {
3363 		scsi->valid = 0;
3364 	}
3365 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3366 	    "mpt_get_spi_settings[%d]: %s 0x%x period 0x%x offset %d\n", tgt,
3367 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3368 	return (0);
3369 }
3370 
3371 static void
3372 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3373 {
3374 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3375 
3376 	ptr = &mpt->mpt_dev_page1[tgt];
3377 	if (onoff) {
3378 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3379 	} else {
3380 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3381 	}
3382 }
3383 
3384 static void
3385 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3386 {
3387 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3388 
3389 	ptr = &mpt->mpt_dev_page1[tgt];
3390 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3391 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3392 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3393 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3394 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3395 	if (period == 0) {
3396 		return;
3397 	}
3398 	ptr->RequestedParameters |=
3399 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3400 	ptr->RequestedParameters |=
3401 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3402 	if (period < 0xa) {
3403 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3404 	}
3405 	if (period < 0x9) {
3406 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3407 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3408 	}
3409 }
3410 
3411 static int
3412 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3413 {
3414 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3415 	int rv;
3416 
3417 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3418 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3419 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3420 	tmp = mpt->mpt_dev_page1[tgt];
3421 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3422 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3423 	if (rv) {
3424 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3425 		return (-1);
3426 	}
3427 	return (0);
3428 }
3429 
3430 static void
3431 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3432 {
3433 #if __FreeBSD_version >= 500000
3434 	cam_calc_geometry(ccg, extended);
3435 #else
3436 	uint32_t size_mb;
3437 	uint32_t secs_per_cylinder;
3438 
3439 	if (ccg->block_size == 0) {
3440 		ccg->ccb_h.status = CAM_REQ_INVALID;
3441 		return;
3442 	}
3443 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3444 	if (size_mb > 1024 && extended) {
3445 		ccg->heads = 255;
3446 		ccg->secs_per_track = 63;
3447 	} else {
3448 		ccg->heads = 64;
3449 		ccg->secs_per_track = 32;
3450 	}
3451 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3452 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3453 	ccg->ccb_h.status = CAM_REQ_CMP;
3454 #endif
3455 }
3456 
3457 /****************************** Timeout Recovery ******************************/
3458 static int
3459 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3460 {
3461 	int error;
3462 
3463 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3464 	    &mpt->recovery_thread, /*flags*/0,
3465 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3466 	return (error);
3467 }
3468 
3469 static void
3470 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3471 {
3472 	if (mpt->recovery_thread == NULL) {
3473 		return;
3474 	}
3475 	mpt->shutdwn_recovery = 1;
3476 	wakeup(mpt);
3477 	/*
3478 	 * Sleep on a slightly different location
3479 	 * for this interlock just for added safety.
3480 	 */
3481 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3482 }
3483 
3484 static void
3485 mpt_recovery_thread(void *arg)
3486 {
3487 	struct mpt_softc *mpt;
3488 
3489 #if __FreeBSD_version >= 500000
3490 	mtx_lock(&Giant);
3491 #endif
3492 	mpt = (struct mpt_softc *)arg;
3493 	MPT_LOCK(mpt);
3494 	for (;;) {
3495 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3496 			if (mpt->shutdwn_recovery == 0) {
3497 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3498 			}
3499 		}
3500 		if (mpt->shutdwn_recovery != 0) {
3501 			break;
3502 		}
3503 		mpt_recover_commands(mpt);
3504 	}
3505 	mpt->recovery_thread = NULL;
3506 	wakeup(&mpt->recovery_thread);
3507 	MPT_UNLOCK(mpt);
3508 #if __FreeBSD_version >= 500000
3509 	mtx_unlock(&Giant);
3510 #endif
3511 	kthread_exit(0);
3512 }
3513 
3514 static int
3515 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3516     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3517 {
3518 	MSG_SCSI_TASK_MGMT *tmf_req;
3519 	int		    error;
3520 
3521 	/*
3522 	 * Wait for any current TMF request to complete.
3523 	 * We're only allowed to issue one TMF at a time.
3524 	 */
3525 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3526 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3527 	if (error != 0) {
3528 		mpt_reset(mpt, TRUE);
3529 		return (ETIMEDOUT);
3530 	}
3531 
3532 	mpt_assign_serno(mpt, mpt->tmf_req);
3533 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3534 
3535 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3536 	memset(tmf_req, 0, sizeof(*tmf_req));
3537 	tmf_req->TargetID = target;
3538 	tmf_req->Bus = channel;
3539 	tmf_req->ChainOffset = 0;
3540 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3541 	tmf_req->Reserved = 0;
3542 	tmf_req->TaskType = type;
3543 	tmf_req->Reserved1 = 0;
3544 	tmf_req->MsgFlags = flags;
3545 	tmf_req->MsgContext =
3546 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3547 	memset(&tmf_req->LUN, 0,
3548 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
3549 	if (lun > 256) {
3550 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3551 		tmf_req->LUN[1] = lun & 0xff;
3552 	} else {
3553 		tmf_req->LUN[1] = lun;
3554 	}
3555 	tmf_req->TaskMsgContext = abort_ctx;
3556 
3557 	mpt_lprt(mpt, MPT_PRT_INFO,
3558 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3559 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3560 	if (mpt->verbose > MPT_PRT_DEBUG) {
3561 		mpt_print_request(tmf_req);
3562 	}
3563 
3564 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3565 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3566 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3567 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3568 	if (error != MPT_OK) {
3569 		mpt_reset(mpt, TRUE);
3570 	}
3571 	return (error);
3572 }
3573 
3574 /*
3575  * When a command times out, it is placed on the requeust_timeout_list
3576  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3577  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3578  * the timedout transactions.  The next TMF is issued either by the
3579  * completion handler of the current TMF waking our recovery thread,
3580  * or the TMF timeout handler causing a hard reset sequence.
3581  */
3582 static void
3583 mpt_recover_commands(struct mpt_softc *mpt)
3584 {
3585 	request_t	   *req;
3586 	union ccb	   *ccb;
3587 	int		    error;
3588 
3589 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3590 		/*
3591 		 * No work to do- leave.
3592 		 */
3593 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3594 		return;
3595 	}
3596 
3597 	/*
3598 	 * Flush any commands whose completion coincides with their timeout.
3599 	 */
3600 	mpt_intr(mpt);
3601 
3602 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3603 		/*
3604 		 * The timedout commands have already
3605 		 * completed.  This typically means
3606 		 * that either the timeout value was on
3607 		 * the hairy edge of what the device
3608 		 * requires or - more likely - interrupts
3609 		 * are not happening.
3610 		 */
3611 		mpt_prt(mpt, "Timedout requests already complete. "
3612 		    "Interrupts may not be functioning.\n");
3613 		mpt_enable_ints(mpt);
3614 		return;
3615 	}
3616 
3617 	/*
3618 	 * We have no visibility into the current state of the
3619 	 * controller, so attempt to abort the commands in the
3620 	 * order they timed-out. For initiator commands, we
3621 	 * depend on the reply handler pulling requests off
3622 	 * the timeout list.
3623 	 */
3624 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3625 		uint16_t status;
3626 		uint8_t response;
3627 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3628 
3629 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3630 		    req, req->serno, hdrp->Function);
3631 		ccb = req->ccb;
3632 		if (ccb == NULL) {
3633 			mpt_prt(mpt, "null ccb in timed out request. "
3634 			    "Resetting Controller.\n");
3635 			mpt_reset(mpt, TRUE);
3636 			continue;
3637 		}
3638 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3639 
3640 		/*
3641 		 * Check to see if this is not an initiator command and
3642 		 * deal with it differently if it is.
3643 		 */
3644 		switch (hdrp->Function) {
3645 		case MPI_FUNCTION_SCSI_IO_REQUEST:
3646 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
3647 			break;
3648 		default:
3649 			/*
3650 			 * XXX: FIX ME: need to abort target assists...
3651 			 */
3652 			mpt_prt(mpt, "just putting it back on the pend q\n");
3653 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
3654 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
3655 			    links);
3656 			continue;
3657 		}
3658 
3659 		error = mpt_scsi_send_tmf(mpt,
3660 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3661 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
3662 		    htole32(req->index | scsi_io_handler_id), TRUE);
3663 
3664 		if (error != 0) {
3665 			/*
3666 			 * mpt_scsi_send_tmf hard resets on failure, so no
3667 			 * need to do so here.  Our queue should be emptied
3668 			 * by the hard reset.
3669 			 */
3670 			continue;
3671 		}
3672 
3673 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
3674 		    REQ_STATE_DONE, TRUE, 500);
3675 
3676 		status = mpt->tmf_req->IOCStatus;
3677 		response = mpt->tmf_req->ResponseCode;
3678 		mpt->tmf_req->state = REQ_STATE_FREE;
3679 
3680 		if (error != 0) {
3681 			/*
3682 			 * If we've errored out,, reset the controller.
3683 			 */
3684 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
3685 			    "Resetting controller\n");
3686 			mpt_reset(mpt, TRUE);
3687 			continue;
3688 		}
3689 
3690 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
3691 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
3692 			    "Resetting controller.\n", status);
3693 			mpt_reset(mpt, TRUE);
3694 			continue;
3695 		}
3696 
3697 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
3698 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
3699 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
3700 			    "Resetting controller.\n", response);
3701 			mpt_reset(mpt, TRUE);
3702 			continue;
3703 		}
3704 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
3705 	}
3706 }
3707 
3708 /************************ Target Mode Support ****************************/
3709 static void
3710 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
3711 {
3712 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3713 	PTR_SGE_TRANSACTION32 tep;
3714 	PTR_SGE_SIMPLE32 se;
3715 	bus_addr_t paddr;
3716 
3717 	paddr = req->req_pbuf;
3718 	paddr += MPT_RQSL(mpt);
3719 
3720 	fc = req->req_vbuf;
3721 	memset(fc, 0, MPT_REQUEST_AREA);
3722 	fc->BufferCount = 1;
3723 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3724 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
3725 
3726 	/*
3727 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
3728 	 * consist of a TE SGL element (with details length of zero)
3729 	 * followe by a SIMPLE SGL element which holds the address
3730 	 * of the buffer.
3731 	 */
3732 
3733 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3734 
3735 	tep->ContextSize = 4;
3736 	tep->Flags = 0;
3737 	tep->TransactionContext[0] = htole32(ioindex);
3738 
3739 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3740 	se->FlagsLength =
3741 		MPI_SGE_FLAGS_HOST_TO_IOC	|
3742 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
3743 		MPI_SGE_FLAGS_LAST_ELEMENT	|
3744 		MPI_SGE_FLAGS_END_OF_LIST	|
3745 		MPI_SGE_FLAGS_END_OF_BUFFER;
3746 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
3747 	se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3748 	se->Address = (uint32_t) paddr;
3749 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3750 	    "add ELS index %d ioindex %d for %p:%u\n",
3751 	    req->index, ioindex, req, req->serno);
3752 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
3753 	    ("mpt_fc_post_els: request not locked"));
3754 	mpt_send_cmd(mpt, req);
3755 }
3756 
3757 static void
3758 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3759 {
3760 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3761 	PTR_CMD_BUFFER_DESCRIPTOR cb;
3762 	bus_addr_t paddr;
3763 
3764 	paddr = req->req_pbuf;
3765 	paddr += MPT_RQSL(mpt);
3766 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3767 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
3768 
3769 	fc = req->req_vbuf;
3770 	fc->BufferCount = 1;
3771 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3772 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3773 
3774 	cb = &fc->Buffer[0];
3775 	cb->IoIndex = htole16(ioindex);
3776 	cb->u.PhysicalAddress32 = (U32) paddr;
3777 
3778 	mpt_check_doorbell(mpt);
3779 	mpt_send_cmd(mpt, req);
3780 }
3781 
3782 static int
3783 mpt_add_els_buffers(struct mpt_softc *mpt)
3784 {
3785 	int i;
3786 
3787 	if (mpt->is_fc == 0) {
3788 		return (TRUE);
3789 	}
3790 
3791 	if (mpt->els_cmds_allocated) {
3792 		return (TRUE);
3793 	}
3794 
3795 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
3796 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3797 
3798 	if (mpt->els_cmd_ptrs == NULL) {
3799 		return (FALSE);
3800 	}
3801 
3802 	/*
3803 	 * Feed the chip some ELS buffer resources
3804 	 */
3805 	for (i = 0; i < MPT_MAX_ELS; i++) {
3806 		request_t *req = mpt_get_request(mpt, FALSE);
3807 		if (req == NULL) {
3808 			break;
3809 		}
3810 		req->state |= REQ_STATE_LOCKED;
3811 		mpt->els_cmd_ptrs[i] = req;
3812 		mpt_fc_post_els(mpt, req, i);
3813 	}
3814 
3815 	if (i == 0) {
3816 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
3817 		free(mpt->els_cmd_ptrs, M_DEVBUF);
3818 		mpt->els_cmd_ptrs = NULL;
3819 		return (FALSE);
3820 	}
3821 	if (i != MPT_MAX_ELS) {
3822 		mpt_lprt(mpt, MPT_PRT_INFO,
3823 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
3824 	}
3825 	mpt->els_cmds_allocated = i;
3826 	return(TRUE);
3827 }
3828 
3829 static int
3830 mpt_add_target_commands(struct mpt_softc *mpt)
3831 {
3832 	int i, max;
3833 
3834 	if (mpt->tgt_cmd_ptrs) {
3835 		return (TRUE);
3836 	}
3837 
3838 	max = MPT_MAX_REQUESTS(mpt) >> 1;
3839 	if (max > mpt->mpt_max_tgtcmds) {
3840 		max = mpt->mpt_max_tgtcmds;
3841 	}
3842 	mpt->tgt_cmd_ptrs =
3843 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
3844 	if (mpt->tgt_cmd_ptrs == NULL) {
3845 		mpt_prt(mpt,
3846 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
3847 		return (FALSE);
3848 	}
3849 
3850 	for (i = 0; i < max; i++) {
3851 		request_t *req;
3852 
3853 		req = mpt_get_request(mpt, FALSE);
3854 		if (req == NULL) {
3855 			break;
3856 		}
3857 		req->state |= REQ_STATE_LOCKED;
3858 		mpt->tgt_cmd_ptrs[i] = req;
3859 		mpt_post_target_command(mpt, req, i);
3860 	}
3861 
3862 
3863 	if (i == 0) {
3864 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
3865 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
3866 		mpt->tgt_cmd_ptrs = NULL;
3867 		return (FALSE);
3868 	}
3869 
3870 	mpt->tgt_cmds_allocated = i;
3871 
3872 	if (i < max) {
3873 		mpt_lprt(mpt, MPT_PRT_INFO,
3874 		    "added %d of %d target bufs\n", i, max);
3875 	}
3876 	return (i);
3877 }
3878 
3879 static int
3880 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3881 {
3882 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3883 		mpt->twildcard = 1;
3884 	} else if (lun >= MPT_MAX_LUNS) {
3885 		return (EINVAL);
3886 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3887 		return (EINVAL);
3888 	}
3889 	if (mpt->tenabled == 0) {
3890 		if (mpt->is_fc) {
3891 			(void) mpt_fc_reset_link(mpt, 0);
3892 		}
3893 		mpt->tenabled = 1;
3894 	}
3895 	if (lun == CAM_LUN_WILDCARD) {
3896 		mpt->trt_wildcard.enabled = 1;
3897 	} else {
3898 		mpt->trt[lun].enabled = 1;
3899 	}
3900 	return (0);
3901 }
3902 
3903 static int
3904 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3905 {
3906 	int i;
3907 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3908 		mpt->twildcard = 0;
3909 	} else if (lun >= MPT_MAX_LUNS) {
3910 		return (EINVAL);
3911 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3912 		return (EINVAL);
3913 	}
3914 	if (lun == CAM_LUN_WILDCARD) {
3915 		mpt->trt_wildcard.enabled = 0;
3916 	} else {
3917 		mpt->trt[lun].enabled = 0;
3918 	}
3919 	for (i = 0; i < MPT_MAX_LUNS; i++) {
3920 		if (mpt->trt[lun].enabled) {
3921 			break;
3922 		}
3923 	}
3924 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
3925 		if (mpt->is_fc) {
3926 			(void) mpt_fc_reset_link(mpt, 0);
3927 		}
3928 		mpt->tenabled = 0;
3929 	}
3930 	return (0);
3931 }
3932 
3933 /*
3934  * Called with MPT lock held
3935  */
3936 static void
3937 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
3938 {
3939 	struct ccb_scsiio *csio = &ccb->csio;
3940 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
3941 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
3942 
3943 	switch (tgt->state) {
3944 	case TGT_STATE_IN_CAM:
3945 		break;
3946 	case TGT_STATE_MOVING_DATA:
3947 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3948 		xpt_freeze_simq(mpt->sim, 1);
3949 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3950 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3951 		MPTLOCK_2_CAMLOCK(mpt);
3952 		xpt_done(ccb);
3953 		CAMLOCK_2_MPTLOCK(mpt);
3954 		return;
3955 	default:
3956 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
3957 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
3958 		mpt_tgt_dump_req_state(mpt, cmd_req);
3959 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3960 		MPTLOCK_2_CAMLOCK(mpt);
3961 		xpt_done(ccb);
3962 		CAMLOCK_2_MPTLOCK(mpt);
3963 		return;
3964 	}
3965 
3966 	if (csio->dxfer_len) {
3967 		bus_dmamap_callback_t *cb;
3968 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
3969 		request_t *req;
3970 
3971 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
3972 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
3973 
3974 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
3975 			if (mpt->outofbeer == 0) {
3976 				mpt->outofbeer = 1;
3977 				xpt_freeze_simq(mpt->sim, 1);
3978 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
3979 			}
3980 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3981 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3982 			MPTLOCK_2_CAMLOCK(mpt);
3983 			xpt_done(ccb);
3984 			CAMLOCK_2_MPTLOCK(mpt);
3985 			return;
3986 		}
3987 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
3988 		if (sizeof (bus_addr_t) > 4) {
3989 			cb = mpt_execute_req_a64;
3990 		} else {
3991 			cb = mpt_execute_req;
3992 		}
3993 
3994 		req->ccb = ccb;
3995 		ccb->ccb_h.ccb_req_ptr = req;
3996 
3997 		/*
3998 		 * Record the currently active ccb and the
3999 		 * request for it in our target state area.
4000 		 */
4001 		tgt->ccb = ccb;
4002 		tgt->req = req;
4003 
4004 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4005 		ta = req->req_vbuf;
4006 
4007 		if (mpt->is_sas) {
4008 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4009 			     cmd_req->req_vbuf;
4010 			ta->QueueTag = ssp->InitiatorTag;
4011 		} else if (mpt->is_spi) {
4012 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4013 			     cmd_req->req_vbuf;
4014 			ta->QueueTag = sp->Tag;
4015 		}
4016 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4017 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4018 		ta->ReplyWord = htole32(tgt->reply_desc);
4019 		if (csio->ccb_h.target_lun > 256) {
4020 			ta->LUN[0] =
4021 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4022 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4023 		} else {
4024 			ta->LUN[1] = csio->ccb_h.target_lun;
4025 		}
4026 
4027 		ta->RelativeOffset = tgt->bytes_xfered;
4028 		ta->DataLength = ccb->csio.dxfer_len;
4029 		if (ta->DataLength > tgt->resid) {
4030 			ta->DataLength = tgt->resid;
4031 		}
4032 
4033 		/*
4034 		 * XXX Should be done after data transfer completes?
4035 		 */
4036 		tgt->resid -= csio->dxfer_len;
4037 		tgt->bytes_xfered += csio->dxfer_len;
4038 
4039 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4040 			ta->TargetAssistFlags |=
4041 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4042 		}
4043 
4044 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4045 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4046 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4047 			ta->TargetAssistFlags |=
4048 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4049 		}
4050 #endif
4051 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4052 
4053 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4054 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4055 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4056 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4057 
4058 		MPTLOCK_2_CAMLOCK(mpt);
4059 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4060 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4061 				int error;
4062 				int s = splsoftvm();
4063 				error = bus_dmamap_load(mpt->buffer_dmat,
4064 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4065 				    cb, req, 0);
4066 				splx(s);
4067 				if (error == EINPROGRESS) {
4068 					xpt_freeze_simq(mpt->sim, 1);
4069 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4070 				}
4071 			} else {
4072 				/*
4073 				 * We have been given a pointer to single
4074 				 * physical buffer.
4075 				 */
4076 				struct bus_dma_segment seg;
4077 				seg.ds_addr = (bus_addr_t)
4078 				    (vm_offset_t)csio->data_ptr;
4079 				seg.ds_len = csio->dxfer_len;
4080 				(*cb)(req, &seg, 1, 0);
4081 			}
4082 		} else {
4083 			/*
4084 			 * We have been given a list of addresses.
4085 			 * This case could be easily supported but they are not
4086 			 * currently generated by the CAM subsystem so there
4087 			 * is no point in wasting the time right now.
4088 			 */
4089 			struct bus_dma_segment *sgs;
4090 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4091 				(*cb)(req, NULL, 0, EFAULT);
4092 			} else {
4093 				/* Just use the segments provided */
4094 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4095 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4096 			}
4097 		}
4098 		CAMLOCK_2_MPTLOCK(mpt);
4099 	} else {
4100 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4101 
4102 		/*
4103 		 * XXX: I don't know why this seems to happen, but
4104 		 * XXX: completing the CCB seems to make things happy.
4105 		 * XXX: This seems to happen if the initiator requests
4106 		 * XXX: enough data that we have to do multiple CTIOs.
4107 		 */
4108 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4109 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4110 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4111 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4112 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4113 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4114 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4115 			MPTLOCK_2_CAMLOCK(mpt);
4116 			xpt_done(ccb);
4117 			CAMLOCK_2_MPTLOCK(mpt);
4118 			return;
4119 		}
4120 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4121 			sp = sense;
4122 			memcpy(sp, &csio->sense_data,
4123 			   min(csio->sense_len, MPT_SENSE_SIZE));
4124 		}
4125 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4126 	}
4127 }
4128 
4129 static void
4130 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4131     uint32_t lun, int send, uint8_t *data, size_t length)
4132 {
4133 	mpt_tgt_state_t *tgt;
4134 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4135 	SGE_SIMPLE32 *se;
4136 	uint32_t flags;
4137 	uint8_t *dptr;
4138 	bus_addr_t pptr;
4139 	request_t *req;
4140 
4141 	if (length == 0) {
4142 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4143 		return;
4144 	}
4145 
4146 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4147 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4148 		mpt_prt(mpt, "out of resources- dropping local response\n");
4149 		return;
4150 	}
4151 	tgt->is_local = 1;
4152 
4153 
4154 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4155 	ta = req->req_vbuf;
4156 
4157 	if (mpt->is_sas) {
4158 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4159 		ta->QueueTag = ssp->InitiatorTag;
4160 	} else if (mpt->is_spi) {
4161 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4162 		ta->QueueTag = sp->Tag;
4163 	}
4164 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4165 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4166 	ta->ReplyWord = htole32(tgt->reply_desc);
4167 	if (lun > 256) {
4168 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4169 		ta->LUN[1] = lun & 0xff;
4170 	} else {
4171 		ta->LUN[1] = lun;
4172 	}
4173 	ta->RelativeOffset = 0;
4174 	ta->DataLength = length;
4175 
4176 	dptr = req->req_vbuf;
4177 	dptr += MPT_RQSL(mpt);
4178 	pptr = req->req_pbuf;
4179 	pptr += MPT_RQSL(mpt);
4180 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4181 
4182 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4183 	memset(se, 0,sizeof (*se));
4184 
4185 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4186 	if (send) {
4187 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4188 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4189 	}
4190 	se->Address = pptr;
4191 	MPI_pSGE_SET_LENGTH(se, length);
4192 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4193 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4194 	MPI_pSGE_SET_FLAGS(se, flags);
4195 
4196 	tgt->ccb = NULL;
4197 	tgt->req = req;
4198 	tgt->resid = 0;
4199 	tgt->bytes_xfered = length;
4200 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4201 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4202 #else
4203 	tgt->state = TGT_STATE_MOVING_DATA;
4204 #endif
4205 	mpt_send_cmd(mpt, req);
4206 }
4207 
4208 /*
4209  * Abort queued up CCBs
4210  */
4211 static cam_status
4212 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4213 {
4214 	struct mpt_hdr_stailq *lp;
4215 	struct ccb_hdr *srch;
4216 	int found = 0;
4217 	union ccb *accb = ccb->cab.abort_ccb;
4218 	tgt_resource_t *trtp;
4219 
4220 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4221 
4222 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4223 		trtp = &mpt->trt_wildcard;
4224 	} else {
4225 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4226 	}
4227 
4228 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4229 		lp = &trtp->atios;
4230 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4231 		lp = &trtp->inots;
4232 	} else {
4233 		return (CAM_REQ_INVALID);
4234 	}
4235 
4236 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4237 		if (srch == &accb->ccb_h) {
4238 			found = 1;
4239 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4240 			break;
4241 		}
4242 	}
4243 	if (found) {
4244 		accb->ccb_h.status = CAM_REQ_ABORTED;
4245 		xpt_done(accb);
4246 		return (CAM_REQ_CMP);
4247 	}
4248 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4249 	return (CAM_PATH_INVALID);
4250 }
4251 
4252 /*
4253  * Ask the MPT to abort the current target command
4254  */
4255 static int
4256 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4257 {
4258 	int error;
4259 	request_t *req;
4260 	PTR_MSG_TARGET_MODE_ABORT abtp;
4261 
4262 	req = mpt_get_request(mpt, FALSE);
4263 	if (req == NULL) {
4264 		return (-1);
4265 	}
4266 	abtp = req->req_vbuf;
4267 	memset(abtp, 0, sizeof (*abtp));
4268 
4269 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4270 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4271 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4272 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4273 	error = 0;
4274 	if (mpt->is_fc || mpt->is_sas) {
4275 		mpt_send_cmd(mpt, req);
4276 	} else {
4277 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4278 	}
4279 	return (error);
4280 }
4281 
4282 /*
4283  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4284  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4285  * FC929 to set bogus FC_RSP fields (nonzero residuals
4286  * but w/o RESID fields set). This causes QLogic initiators
4287  * to think maybe that a frame was lost.
4288  *
4289  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4290  * we use allocated requests to do TARGET_ASSIST and we
4291  * need to know when to release them.
4292  */
4293 
4294 static void
4295 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4296     uint8_t status, uint8_t const *sense_data)
4297 {
4298 	uint8_t *cmd_vbuf;
4299 	mpt_tgt_state_t *tgt;
4300 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4301 	request_t *req;
4302 	bus_addr_t paddr;
4303 	int resplen = 0;
4304 
4305 	cmd_vbuf = cmd_req->req_vbuf;
4306 	cmd_vbuf += MPT_RQSL(mpt);
4307 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4308 
4309 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4310 		if (mpt->outofbeer == 0) {
4311 			mpt->outofbeer = 1;
4312 			xpt_freeze_simq(mpt->sim, 1);
4313 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4314 		}
4315 		if (ccb) {
4316 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4317 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4318 			MPTLOCK_2_CAMLOCK(mpt);
4319 			xpt_done(ccb);
4320 			CAMLOCK_2_MPTLOCK(mpt);
4321 		} else {
4322 			mpt_prt(mpt,
4323 			    "could not allocate status request- dropping\n");
4324 		}
4325 		return;
4326 	}
4327 	req->ccb = ccb;
4328 	if (ccb) {
4329 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4330 		ccb->ccb_h.ccb_req_ptr = req;
4331 	}
4332 
4333 	/*
4334 	 * Record the currently active ccb, if any, and the
4335 	 * request for it in our target state area.
4336 	 */
4337 	tgt->ccb = ccb;
4338 	tgt->req = req;
4339 	tgt->state = TGT_STATE_SENDING_STATUS;
4340 
4341 	tp = req->req_vbuf;
4342 	paddr = req->req_pbuf;
4343 	paddr += MPT_RQSL(mpt);
4344 
4345 	memset(tp, 0, sizeof (*tp));
4346 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4347 	if (mpt->is_fc) {
4348 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4349 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4350 		uint8_t *sts_vbuf;
4351 		uint32_t *rsp;
4352 
4353 		sts_vbuf = req->req_vbuf;
4354 		sts_vbuf += MPT_RQSL(mpt);
4355 		rsp = (uint32_t *) sts_vbuf;
4356 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4357 
4358 		/*
4359 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4360 		 * It has to be big-endian in memory and is organized
4361 		 * in 32 bit words, which are much easier to deal with
4362 		 * as words which are swizzled as needed.
4363 		 *
4364 		 * All we're filling here is the FC_RSP payload.
4365 		 * We may just have the chip synthesize it if
4366 		 * we have no residual and an OK status.
4367 		 *
4368 		 */
4369 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4370 
4371 		rsp[2] = status;
4372 		if (tgt->resid) {
4373 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4374 			rsp[3] = htobe32(tgt->resid);
4375 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4376 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4377 #endif
4378 		}
4379 		if (status == SCSI_STATUS_CHECK_COND) {
4380 			int i;
4381 
4382 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4383 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4384 			if (sense_data) {
4385 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4386 			} else {
4387 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4388 				    "TION but no sense data?\n");
4389 				memset(&rsp, 0, MPT_SENSE_SIZE);
4390 			}
4391 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4392 				rsp[i] = htobe32(rsp[i]);
4393 			}
4394 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4395 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4396 #endif
4397 		}
4398 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4399 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4400 #endif
4401 		rsp[2] = htobe32(rsp[2]);
4402 	} else if (mpt->is_sas) {
4403 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4404 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4405 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4406 	} else {
4407 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4408 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4409 		tp->StatusCode = status;
4410 		tp->QueueTag = htole16(sp->Tag);
4411 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4412 	}
4413 
4414 	tp->ReplyWord = htole32(tgt->reply_desc);
4415 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4416 
4417 #ifdef	WE_CAN_USE_AUTO_REPOST
4418 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4419 #endif
4420 	if (status == SCSI_STATUS_OK && resplen == 0) {
4421 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4422 	} else {
4423 		tp->StatusDataSGE.u.Address32 = (uint32_t) paddr;
4424 		tp->StatusDataSGE.FlagsLength =
4425 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4426 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4427 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4428 			MPI_SGE_FLAGS_END_OF_LIST	|
4429 			MPI_SGE_FLAGS_END_OF_BUFFER;
4430 		tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
4431 		tp->StatusDataSGE.FlagsLength |= resplen;
4432 	}
4433 
4434 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4435 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4436 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4437 	    req->serno, tgt->resid);
4438 	if (ccb) {
4439 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4440 		ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
4441 	}
4442 	mpt_send_cmd(mpt, req);
4443 }
4444 
4445 static void
4446 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4447     tgt_resource_t *trtp, int init_id)
4448 {
4449 	struct ccb_immed_notify *inot;
4450 	mpt_tgt_state_t *tgt;
4451 
4452 	tgt = MPT_TGT_STATE(mpt, req);
4453 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4454 	if (inot == NULL) {
4455 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4456 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4457 		return;
4458 	}
4459 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4460 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4461 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4462 
4463 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4464 	inot->sense_len = 0;
4465 	memset(inot->message_args, 0, sizeof (inot->message_args));
4466 	inot->initiator_id = init_id;	/* XXX */
4467 
4468 	/*
4469 	 * This is a somewhat grotesque attempt to map from task management
4470 	 * to old style SCSI messages. God help us all.
4471 	 */
4472 	switch (fc) {
4473 	case MPT_ABORT_TASK_SET:
4474 		inot->message_args[0] = MSG_ABORT_TAG;
4475 		break;
4476 	case MPT_CLEAR_TASK_SET:
4477 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4478 		break;
4479 	case MPT_TARGET_RESET:
4480 		inot->message_args[0] = MSG_TARGET_RESET;
4481 		break;
4482 	case MPT_CLEAR_ACA:
4483 		inot->message_args[0] = MSG_CLEAR_ACA;
4484 		break;
4485 	case MPT_TERMINATE_TASK:
4486 		inot->message_args[0] = MSG_ABORT_TAG;
4487 		break;
4488 	default:
4489 		inot->message_args[0] = MSG_NOOP;
4490 		break;
4491 	}
4492 	tgt->ccb = (union ccb *) inot;
4493 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4494 	MPTLOCK_2_CAMLOCK(mpt);
4495 	xpt_done((union ccb *)inot);
4496 	CAMLOCK_2_MPTLOCK(mpt);
4497 }
4498 
4499 static void
4500 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4501 {
4502 	struct ccb_accept_tio *atiop;
4503 	lun_id_t lun;
4504 	int tag_action = 0;
4505 	mpt_tgt_state_t *tgt;
4506 	tgt_resource_t *trtp = NULL;
4507 	U8 *lunptr;
4508 	U8 *vbuf;
4509 	U16 itag;
4510 	U16 ioindex;
4511 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4512 	uint8_t *cdbp;
4513 
4514 	/*
4515 	 * First, DMA sync the received command-
4516 	 * which is in the *request* * phys area.
4517 	 *
4518 	 * XXX: We could optimize this for a range
4519 	 */
4520 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
4521 	    BUS_DMASYNC_POSTREAD);
4522 
4523 	/*
4524 	 * Stash info for the current command where we can get at it later.
4525 	 */
4526 	vbuf = req->req_vbuf;
4527 	vbuf += MPT_RQSL(mpt);
4528 
4529 	/*
4530 	 * Get our state pointer set up.
4531 	 */
4532 	tgt = MPT_TGT_STATE(mpt, req);
4533 	if (tgt->state != TGT_STATE_LOADED) {
4534 		mpt_tgt_dump_req_state(mpt, req);
4535 		panic("bad target state in mpt_scsi_tgt_atio");
4536 	}
4537 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4538 	tgt->state = TGT_STATE_IN_CAM;
4539 	tgt->reply_desc = reply_desc;
4540 	ioindex = GET_IO_INDEX(reply_desc);
4541 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4542 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4543 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4544 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4545 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4546 	}
4547 	if (mpt->is_fc) {
4548 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4549 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4550 		if (fc->FcpCntl[2]) {
4551 			/*
4552 			 * Task Management Request
4553 			 */
4554 			switch (fc->FcpCntl[2]) {
4555 			case 0x2:
4556 				fct = MPT_ABORT_TASK_SET;
4557 				break;
4558 			case 0x4:
4559 				fct = MPT_CLEAR_TASK_SET;
4560 				break;
4561 			case 0x20:
4562 				fct = MPT_TARGET_RESET;
4563 				break;
4564 			case 0x40:
4565 				fct = MPT_CLEAR_ACA;
4566 				break;
4567 			case 0x80:
4568 				fct = MPT_TERMINATE_TASK;
4569 				break;
4570 			default:
4571 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4572 				    fc->FcpCntl[2]);
4573 				mpt_scsi_tgt_status(mpt, 0, req,
4574 				    SCSI_STATUS_OK, 0);
4575 				return;
4576 			}
4577 		} else {
4578 			switch (fc->FcpCntl[1]) {
4579 			case 0:
4580 				tag_action = MSG_SIMPLE_Q_TAG;
4581 				break;
4582 			case 1:
4583 				tag_action = MSG_HEAD_OF_Q_TAG;
4584 				break;
4585 			case 2:
4586 				tag_action = MSG_ORDERED_Q_TAG;
4587 				break;
4588 			default:
4589 				/*
4590 				 * Bah. Ignore Untagged Queing and ACA
4591 				 */
4592 				tag_action = MSG_SIMPLE_Q_TAG;
4593 				break;
4594 			}
4595 		}
4596 		tgt->resid = be32toh(fc->FcpDl);
4597 		cdbp = fc->FcpCdb;
4598 		lunptr = fc->FcpLun;
4599 		itag = be16toh(fc->OptionalOxid);
4600 	} else if (mpt->is_sas) {
4601 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4602 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4603 		cdbp = ssp->CDB;
4604 		lunptr = ssp->LogicalUnitNumber;
4605 		itag = ssp->InitiatorTag;
4606 	} else {
4607 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4608 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4609 		cdbp = sp->CDB;
4610 		lunptr = sp->LogicalUnitNumber;
4611 		itag = sp->Tag;
4612 	}
4613 
4614 	/*
4615 	 * Generate a simple lun
4616 	 */
4617 	switch (lunptr[0] & 0xc0) {
4618 	case 0x40:
4619 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4620 		break;
4621 	case 0:
4622 		lun = lunptr[1];
4623 		break;
4624 	default:
4625 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4626 		lun = 0xffff;
4627 		break;
4628 	}
4629 
4630 	/*
4631 	 * Deal with non-enabled or bad luns here.
4632 	 */
4633 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4634 	    mpt->trt[lun].enabled == 0) {
4635 		if (mpt->twildcard) {
4636 			trtp = &mpt->trt_wildcard;
4637 		} else if (fct == MPT_NIL_TMT_VALUE) {
4638 			/*
4639 			 * In this case, we haven't got an upstream listener
4640 			 * for either a specific lun or wildcard luns. We
4641 			 * have to make some sensible response. For regular
4642 			 * inquiry, just return some NOT HERE inquiry data.
4643 			 * For VPD inquiry, report illegal field in cdb.
4644 			 * For REQUEST SENSE, just return NO SENSE data.
4645 			 * REPORT LUNS gets illegal command.
4646 			 * All other commands get 'no such device'.
4647 			 */
4648 
4649 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
4650 
4651 			mpt_prt(mpt, "CMD 0x%x to unmanaged lun %u\n",
4652 			    cdbp[0], lun);
4653 
4654 			memset(buf, 0, MPT_SENSE_SIZE);
4655 			cond = SCSI_STATUS_CHECK_COND;
4656 			buf[0] = 0xf0;
4657 			buf[2] = 0x5;
4658 			buf[7] = 0x8;
4659 			sp = buf;
4660 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4661 
4662 			switch (cdbp[0]) {
4663 			case INQUIRY:
4664 			{
4665 				static uint8_t iqd[8] = {
4666 				    0x7f, 0x0, 0x4, 0x12, 0x0
4667 				};
4668 				if (cdbp[1] != 0) {
4669 					buf[12] = 0x26;
4670 					buf[13] = 0x01;
4671 					break;
4672 				}
4673 				mpt_prt(mpt, "local inquiry\n");
4674 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4675 				    iqd, sizeof (iqd));
4676 				return;
4677 			}
4678 			case REQUEST_SENSE:
4679 			{
4680 				buf[2] = 0x0;
4681 				mpt_prt(mpt, "local request sense\n");
4682 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4683 				    buf, sizeof (buf));
4684 				return;
4685 			}
4686 			case REPORT_LUNS:
4687 				buf[12] = 0x26;
4688 				break;
4689 			default:
4690 				buf[12] = 0x25;
4691 				break;
4692 			}
4693 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
4694 			return;
4695 		}
4696 		/* otherwise, leave trtp NULL */
4697 	} else {
4698 		trtp = &mpt->trt[lun];
4699 	}
4700 
4701 	/*
4702 	 * Deal with any task management
4703 	 */
4704 	if (fct != MPT_NIL_TMT_VALUE) {
4705 		if (trtp == NULL) {
4706 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
4707 			    fct);
4708 			mpt_scsi_tgt_status(mpt, 0, req,
4709 			    SCSI_STATUS_OK, 0);
4710 		} else {
4711 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
4712 			    GET_INITIATOR_INDEX(reply_desc));
4713 		}
4714 		return;
4715 	}
4716 
4717 
4718 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
4719 	if (atiop == NULL) {
4720 		mpt_lprt(mpt, MPT_PRT_WARN,
4721 		    "no ATIOs for lun %u- sending back %s\n", lun,
4722 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
4723 		mpt_scsi_tgt_status(mpt, NULL, req,
4724 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
4725 		    NULL);
4726 		return;
4727 	}
4728 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
4729 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4730 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
4731 	atiop->ccb_h.ccb_mpt_ptr = mpt;
4732 	atiop->ccb_h.status = CAM_CDB_RECVD;
4733 	atiop->ccb_h.target_lun = lun;
4734 	atiop->sense_len = 0;
4735 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
4736 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
4737 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
4738 
4739 	/*
4740 	 * The tag we construct here allows us to find the
4741 	 * original request that the command came in with.
4742 	 *
4743 	 * This way we don't have to depend on anything but the
4744 	 * tag to find things when CCBs show back up from CAM.
4745 	 */
4746 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4747 	tgt->tag_id = atiop->tag_id;
4748 	if (tag_action) {
4749 		atiop->tag_action = tag_action;
4750 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
4751 	}
4752 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4753 		int i;
4754 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
4755 		    atiop->ccb_h.target_lun);
4756 		for (i = 0; i < atiop->cdb_len; i++) {
4757 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
4758 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
4759 		}
4760 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
4761 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
4762 	}
4763 
4764 	MPTLOCK_2_CAMLOCK(mpt);
4765 	xpt_done((union ccb *)atiop);
4766 	CAMLOCK_2_MPTLOCK(mpt);
4767 }
4768 
4769 static void
4770 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
4771 {
4772 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4773 
4774 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
4775 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
4776 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
4777 	    tgt->tag_id, tgt->state);
4778 }
4779 
4780 static void
4781 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
4782 {
4783 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
4784 	    req->index, req->index, req->state);
4785 	mpt_tgt_dump_tgt_state(mpt, req);
4786 }
4787 
4788 static int
4789 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
4790     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4791 {
4792 	int dbg;
4793 	union ccb *ccb;
4794 	U16 status;
4795 
4796 	if (reply_frame == NULL) {
4797 		/*
4798 		 * Figure out what the state of the command is.
4799 		 */
4800 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4801 
4802 #ifdef	INVARIANTS
4803 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
4804 		if (tgt->req) {
4805 			mpt_req_not_spcl(mpt, tgt->req,
4806 			    "turbo scsi_tgt_reply associated req", __LINE__);
4807 		}
4808 #endif
4809 		switch(tgt->state) {
4810 		case TGT_STATE_LOADED:
4811 			/*
4812 			 * This is a new command starting.
4813 			 */
4814 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
4815 			break;
4816 		case TGT_STATE_MOVING_DATA:
4817 		{
4818 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4819 
4820 			ccb = tgt->ccb;
4821 			if (tgt->req == NULL) {
4822 				panic("mpt: turbo target reply with null "
4823 				    "associated request moving data");
4824 				/* NOTREACHED */
4825 			}
4826 			if (ccb == NULL) {
4827 				if (tgt->is_local == 0) {
4828 					panic("mpt: turbo target reply with "
4829 					    "null associated ccb moving data");
4830 					/* NOTREACHED */
4831 				}
4832 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4833 				    "TARGET_ASSIST local done\n");
4834 				TAILQ_REMOVE(&mpt->request_pending_list,
4835 				    tgt->req, links);
4836 				mpt_free_request(mpt, tgt->req);
4837 				tgt->req = NULL;
4838 				mpt_scsi_tgt_status(mpt, NULL, req,
4839 				    0, NULL);
4840 				return (TRUE);
4841 			}
4842 			tgt->ccb = NULL;
4843 			tgt->nxfers++;
4844 			untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
4845 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4846 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
4847 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
4848 			/*
4849 			 * Free the Target Assist Request
4850 			 */
4851 			KASSERT(tgt->req->ccb == ccb,
4852 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
4853 			    tgt->req->serno, tgt->req->ccb));
4854 			TAILQ_REMOVE(&mpt->request_pending_list,
4855 			    tgt->req, links);
4856 			mpt_free_request(mpt, tgt->req);
4857 			tgt->req = NULL;
4858 
4859 			/*
4860 			 * Do we need to send status now? That is, are
4861 			 * we done with all our data transfers?
4862 			 */
4863 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4864 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4865 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4866 				KASSERT(ccb->ccb_h.status,
4867 				    ("zero ccb sts at %d\n", __LINE__));
4868 				tgt->state = TGT_STATE_IN_CAM;
4869 				if (mpt->outofbeer) {
4870 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4871 					mpt->outofbeer = 0;
4872 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4873 				}
4874 				MPTLOCK_2_CAMLOCK(mpt);
4875 				xpt_done(ccb);
4876 				CAMLOCK_2_MPTLOCK(mpt);
4877 				break;
4878 			}
4879 			/*
4880 			 * Otherwise, send status (and sense)
4881 			 */
4882 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4883 				sp = sense;
4884 				memcpy(sp, &ccb->csio.sense_data,
4885 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
4886 			}
4887 			mpt_scsi_tgt_status(mpt, ccb, req,
4888 			    ccb->csio.scsi_status, sp);
4889 			break;
4890 		}
4891 		case TGT_STATE_SENDING_STATUS:
4892 		case TGT_STATE_MOVING_DATA_AND_STATUS:
4893 		{
4894 			int ioindex;
4895 			ccb = tgt->ccb;
4896 
4897 			if (tgt->req == NULL) {
4898 				panic("mpt: turbo target reply with null "
4899 				    "associated request sending status");
4900 				/* NOTREACHED */
4901 			}
4902 
4903 			if (ccb) {
4904 				tgt->ccb = NULL;
4905 				if (tgt->state ==
4906 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
4907 					tgt->nxfers++;
4908 				}
4909 				untimeout(mpt_timeout, ccb,
4910 				    ccb->ccb_h.timeout_ch);
4911 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4912 					ccb->ccb_h.status |= CAM_SENT_SENSE;
4913 				}
4914 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4915 				    "TARGET_STATUS tag %x sts %x flgs %x req "
4916 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
4917 				    ccb->ccb_h.flags, tgt->req);
4918 				/*
4919 				 * Free the Target Send Status Request
4920 				 */
4921 				KASSERT(tgt->req->ccb == ccb,
4922 				    ("tgt->req %p:%u tgt->req->ccb %p",
4923 				    tgt->req, tgt->req->serno, tgt->req->ccb));
4924 				/*
4925 				 * Notify CAM that we're done
4926 				 */
4927 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4928 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4929 				KASSERT(ccb->ccb_h.status,
4930 				    ("ZERO ccb sts at %d\n", __LINE__));
4931 				tgt->ccb = NULL;
4932 			} else {
4933 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4934 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
4935 				    tgt->req, tgt->req->serno);
4936 			}
4937 			TAILQ_REMOVE(&mpt->request_pending_list,
4938 			    tgt->req, links);
4939 			mpt_free_request(mpt, tgt->req);
4940 			tgt->req = NULL;
4941 
4942 			/*
4943 			 * And re-post the Command Buffer.
4944 			 * This will reset the state.
4945 			 */
4946 			ioindex = GET_IO_INDEX(reply_desc);
4947 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4948 			tgt->is_local = 0;
4949 			mpt_post_target_command(mpt, req, ioindex);
4950 
4951 			/*
4952 			 * And post a done for anyone who cares
4953 			 */
4954 			if (ccb) {
4955 				if (mpt->outofbeer) {
4956 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4957 					mpt->outofbeer = 0;
4958 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4959 				}
4960 				MPTLOCK_2_CAMLOCK(mpt);
4961 				xpt_done(ccb);
4962 				CAMLOCK_2_MPTLOCK(mpt);
4963 			}
4964 			break;
4965 		}
4966 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
4967 			tgt->state = TGT_STATE_LOADED;
4968 			break;
4969 		default:
4970 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
4971 			    "Reply Function\n", tgt->state);
4972 		}
4973 		return (TRUE);
4974 	}
4975 
4976 	status = le16toh(reply_frame->IOCStatus);
4977 	if (status != MPI_IOCSTATUS_SUCCESS) {
4978 		dbg = MPT_PRT_ERROR;
4979 	} else {
4980 		dbg = MPT_PRT_DEBUG1;
4981 	}
4982 
4983 	mpt_lprt(mpt, dbg,
4984 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
4985 	     req, req->serno, reply_frame, reply_frame->Function, status);
4986 
4987 	switch (reply_frame->Function) {
4988 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
4989 	{
4990 		mpt_tgt_state_t *tgt;
4991 #ifdef	INVARIANTS
4992 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
4993 #endif
4994 		if (status != MPI_IOCSTATUS_SUCCESS) {
4995 			/*
4996 			 * XXX What to do?
4997 			 */
4998 			break;
4999 		}
5000 		tgt = MPT_TGT_STATE(mpt, req);
5001 		KASSERT(tgt->state == TGT_STATE_LOADING,
5002 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
5003 		mpt_assign_serno(mpt, req);
5004 		tgt->state = TGT_STATE_LOADED;
5005 		break;
5006 	}
5007 	case MPI_FUNCTION_TARGET_ASSIST:
5008 #ifdef	INVARIANTS
5009 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5010 #endif
5011 		mpt_prt(mpt, "target assist completion\n");
5012 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5013 		mpt_free_request(mpt, req);
5014 		break;
5015 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5016 #ifdef	INVARIANTS
5017 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5018 #endif
5019 		mpt_prt(mpt, "status send completion\n");
5020 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5021 		mpt_free_request(mpt, req);
5022 		break;
5023 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5024 	{
5025 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5026 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5027 		PTR_MSG_TARGET_MODE_ABORT abtp =
5028 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5029 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5030 #ifdef	INVARIANTS
5031 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5032 #endif
5033 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5034 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5035 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5036 		mpt_free_request(mpt, req);
5037 		break;
5038 	}
5039 	default:
5040 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5041 		    "0x%x\n", reply_frame->Function);
5042 		break;
5043 	}
5044 	return (TRUE);
5045 }
5046