xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 94942af266ac119ede0ca836f9aa5a5ac0582938)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #if __FreeBSD_version >= 500000
108 #include <sys/sysctl.h>
109 #endif
110 #include <sys/callout.h>
111 #include <sys/kthread.h>
112 
113 #if __FreeBSD_version >= 700025
114 #ifndef	CAM_NEW_TRAN_CODE
115 #define	CAM_NEW_TRAN_CODE	1
116 #endif
117 #endif
118 
119 static void mpt_poll(struct cam_sim *);
120 static timeout_t mpt_timeout;
121 static void mpt_action(struct cam_sim *, union ccb *);
122 static int
123 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
124 static void mpt_setwidth(struct mpt_softc *, int, int);
125 static void mpt_setsync(struct mpt_softc *, int, int, int);
126 static int mpt_update_spi_config(struct mpt_softc *, int);
127 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
128 
129 static mpt_reply_handler_t mpt_scsi_reply_handler;
130 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
131 static mpt_reply_handler_t mpt_fc_els_reply_handler;
132 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
133 					MSG_DEFAULT_REPLY *);
134 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
135 static int mpt_fc_reset_link(struct mpt_softc *, int);
136 
137 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
138 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
139 static void mpt_recovery_thread(void *arg);
140 static void mpt_recover_commands(struct mpt_softc *mpt);
141 
142 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
143     u_int, u_int, u_int, int);
144 
145 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
146 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
147 static int mpt_add_els_buffers(struct mpt_softc *mpt);
148 static int mpt_add_target_commands(struct mpt_softc *mpt);
149 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
150 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
151 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
152 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
153 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
154 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
155     uint8_t, uint8_t const *);
156 static void
157 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
158     tgt_resource_t *, int);
159 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
160 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
161 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
162 
163 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
164 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
165 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
166 
167 static mpt_probe_handler_t	mpt_cam_probe;
168 static mpt_attach_handler_t	mpt_cam_attach;
169 static mpt_enable_handler_t	mpt_cam_enable;
170 static mpt_ready_handler_t	mpt_cam_ready;
171 static mpt_event_handler_t	mpt_cam_event;
172 static mpt_reset_handler_t	mpt_cam_ioc_reset;
173 static mpt_detach_handler_t	mpt_cam_detach;
174 
175 static struct mpt_personality mpt_cam_personality =
176 {
177 	.name		= "mpt_cam",
178 	.probe		= mpt_cam_probe,
179 	.attach		= mpt_cam_attach,
180 	.enable		= mpt_cam_enable,
181 	.ready		= mpt_cam_ready,
182 	.event		= mpt_cam_event,
183 	.reset		= mpt_cam_ioc_reset,
184 	.detach		= mpt_cam_detach,
185 };
186 
187 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
188 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
189 
190 int
191 mpt_cam_probe(struct mpt_softc *mpt)
192 {
193 	int role;
194 
195 	/*
196 	 * Only attach to nodes that support the initiator or target role
197 	 * (or want to) or have RAID physical devices that need CAM pass-thru
198 	 * support.
199 	 */
200 	if (mpt->do_cfg_role) {
201 		role = mpt->cfg_role;
202 	} else {
203 		role = mpt->role;
204 	}
205 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
206 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
207 		return (0);
208 	}
209 	return (ENODEV);
210 }
211 
212 int
213 mpt_cam_attach(struct mpt_softc *mpt)
214 {
215 	struct cam_devq *devq;
216 	mpt_handler_t	 handler;
217 	int		 maxq;
218 	int		 error;
219 
220 	MPT_LOCK(mpt);
221 	TAILQ_INIT(&mpt->request_timeout_list);
222 	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
223 	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
224 
225 	handler.reply_handler = mpt_scsi_reply_handler;
226 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
227 				     &scsi_io_handler_id);
228 	if (error != 0) {
229 		MPT_UNLOCK(mpt);
230 		goto cleanup;
231 	}
232 
233 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
234 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
235 				     &scsi_tmf_handler_id);
236 	if (error != 0) {
237 		MPT_UNLOCK(mpt);
238 		goto cleanup;
239 	}
240 
241 	/*
242 	 * If we're fibre channel and could support target mode, we register
243 	 * an ELS reply handler and give it resources.
244 	 */
245 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
246 		handler.reply_handler = mpt_fc_els_reply_handler;
247 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
248 		    &fc_els_handler_id);
249 		if (error != 0) {
250 			MPT_UNLOCK(mpt);
251 			goto cleanup;
252 		}
253 		if (mpt_add_els_buffers(mpt) == FALSE) {
254 			error = ENOMEM;
255 			MPT_UNLOCK(mpt);
256 			goto cleanup;
257 		}
258 		maxq -= mpt->els_cmds_allocated;
259 	}
260 
261 	/*
262 	 * If we support target mode, we register a reply handler for it,
263 	 * but don't add command resources until we actually enable target
264 	 * mode.
265 	 */
266 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
267 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
268 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
269 		    &mpt->scsi_tgt_handler_id);
270 		if (error != 0) {
271 			MPT_UNLOCK(mpt);
272 			goto cleanup;
273 		}
274 	}
275 
276 	/*
277 	 * We keep one request reserved for timeout TMF requests.
278 	 */
279 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
280 	if (mpt->tmf_req == NULL) {
281 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
282 		error = ENOMEM;
283 		MPT_UNLOCK(mpt);
284 		goto cleanup;
285 	}
286 
287 	/*
288 	 * Mark the request as free even though not on the free list.
289 	 * There is only one TMF request allowed to be outstanding at
290 	 * a time and the TMF routines perform their own allocation
291 	 * tracking using the standard state flags.
292 	 */
293 	mpt->tmf_req->state = REQ_STATE_FREE;
294 	maxq--;
295 
296 	/*
297 	 * The rest of this is CAM foo, for which we need to drop our lock
298 	 */
299 	MPT_UNLOCK(mpt);
300 
301 	if (mpt_spawn_recovery_thread(mpt) != 0) {
302 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
303 		error = ENOMEM;
304 		goto cleanup;
305 	}
306 
307 	/*
308 	 * Create the device queue for our SIM(s).
309 	 */
310 	devq = cam_simq_alloc(maxq);
311 	if (devq == NULL) {
312 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
313 		error = ENOMEM;
314 		goto cleanup;
315 	}
316 
317 	/*
318 	 * Construct our SIM entry.
319 	 */
320 	mpt->sim =
321 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
322 	if (mpt->sim == NULL) {
323 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
324 		cam_simq_free(devq);
325 		error = ENOMEM;
326 		goto cleanup;
327 	}
328 
329 	/*
330 	 * Register exactly this bus.
331 	 */
332 	MPT_LOCK(mpt);
333 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
334 		mpt_prt(mpt, "Bus registration Failed!\n");
335 		error = ENOMEM;
336 		MPT_UNLOCK(mpt);
337 		goto cleanup;
338 	}
339 
340 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
341 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
342 		mpt_prt(mpt, "Unable to allocate Path!\n");
343 		error = ENOMEM;
344 		MPT_UNLOCK(mpt);
345 		goto cleanup;
346 	}
347 	MPT_UNLOCK(mpt);
348 
349 	/*
350 	 * Only register a second bus for RAID physical
351 	 * devices if the controller supports RAID.
352 	 */
353 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
354 		return (0);
355 	}
356 
357 	/*
358 	 * Create a "bus" to export all hidden disks to CAM.
359 	 */
360 	mpt->phydisk_sim =
361 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
362 	if (mpt->phydisk_sim == NULL) {
363 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
364 		error = ENOMEM;
365 		goto cleanup;
366 	}
367 
368 	/*
369 	 * Register this bus.
370 	 */
371 	MPT_LOCK(mpt);
372 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
373 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
374 		error = ENOMEM;
375 		MPT_UNLOCK(mpt);
376 		goto cleanup;
377 	}
378 
379 	if (xpt_create_path(&mpt->phydisk_path, NULL,
380 	    cam_sim_path(mpt->phydisk_sim),
381 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
382 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
383 		error = ENOMEM;
384 		MPT_UNLOCK(mpt);
385 		goto cleanup;
386 	}
387 	MPT_UNLOCK(mpt);
388 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
389 	return (0);
390 
391 cleanup:
392 	mpt_cam_detach(mpt);
393 	return (error);
394 }
395 
396 /*
397  * Read FC configuration information
398  */
399 static int
400 mpt_read_config_info_fc(struct mpt_softc *mpt)
401 {
402 	char *topology = NULL;
403 	int rv;
404 
405 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
406 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
407 	if (rv) {
408 		return (-1);
409 	}
410 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
411 		 mpt->mpt_fcport_page0.Header.PageVersion,
412 		 mpt->mpt_fcport_page0.Header.PageLength,
413 		 mpt->mpt_fcport_page0.Header.PageNumber,
414 		 mpt->mpt_fcport_page0.Header.PageType);
415 
416 
417 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
418 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
419 	if (rv) {
420 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
421 		return (-1);
422 	}
423 
424 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
425 
426 	switch (mpt->mpt_fcport_page0.Flags &
427 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
428 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
429 		mpt->mpt_fcport_speed = 0;
430 		topology = "<NO LOOP>";
431 		break;
432 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
433 		topology = "N-Port";
434 		break;
435 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
436 		topology = "NL-Port";
437 		break;
438 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
439 		topology = "F-Port";
440 		break;
441 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
442 		topology = "FL-Port";
443 		break;
444 	default:
445 		mpt->mpt_fcport_speed = 0;
446 		topology = "?";
447 		break;
448 	}
449 
450 	mpt_lprt(mpt, MPT_PRT_INFO,
451 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
452 	    "Speed %u-Gbit\n", topology,
453 	    mpt->mpt_fcport_page0.WWNN.High,
454 	    mpt->mpt_fcport_page0.WWNN.Low,
455 	    mpt->mpt_fcport_page0.WWPN.High,
456 	    mpt->mpt_fcport_page0.WWPN.Low,
457 	    mpt->mpt_fcport_speed);
458 #if __FreeBSD_version >= 500000
459 	MPT_UNLOCK(mpt);
460 	{
461 		struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
462 		struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
463 
464 		snprintf(mpt->scinfo.fc.wwnn,
465 		    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
466 		    mpt->mpt_fcport_page0.WWNN.High,
467 		    mpt->mpt_fcport_page0.WWNN.Low);
468 
469 		snprintf(mpt->scinfo.fc.wwpn,
470 		    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
471 		    mpt->mpt_fcport_page0.WWPN.High,
472 		    mpt->mpt_fcport_page0.WWPN.Low);
473 
474 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
475 		       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
476 		       "World Wide Node Name");
477 
478 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
479 		       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
480 		       "World Wide Port Name");
481 
482 	}
483 	MPT_LOCK(mpt);
484 #endif
485 	return (0);
486 }
487 
488 /*
489  * Set FC configuration information.
490  */
491 static int
492 mpt_set_initial_config_fc(struct mpt_softc *mpt)
493 {
494 
495 	CONFIG_PAGE_FC_PORT_1 fc;
496 	U32 fl;
497 	int r, doit = 0;
498 	int role;
499 
500 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
501 	    &fc.Header, FALSE, 5000);
502 	if (r) {
503 		mpt_prt(mpt, "failed to read FC page 1 header\n");
504 		return (mpt_fc_reset_link(mpt, 1));
505 	}
506 
507 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
508 	    &fc.Header, sizeof (fc), FALSE, 5000);
509 	if (r) {
510 		mpt_prt(mpt, "failed to read FC page 1\n");
511 		return (mpt_fc_reset_link(mpt, 1));
512 	}
513 
514 	/*
515 	 * Check our flags to make sure we support the role we want.
516 	 */
517 	doit = 0;
518 	role = 0;
519 	fl = le32toh(fc.Flags);;
520 
521 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
522 		role |= MPT_ROLE_INITIATOR;
523 	}
524 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
525 		role |= MPT_ROLE_TARGET;
526 	}
527 
528 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
529 
530 	if (mpt->do_cfg_role == 0) {
531 		role = mpt->cfg_role;
532 	} else {
533 		mpt->do_cfg_role = 0;
534 	}
535 
536 	if (role != mpt->cfg_role) {
537 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
538 			if ((role & MPT_ROLE_INITIATOR) == 0) {
539 				mpt_prt(mpt, "adding initiator role\n");
540 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
541 				doit++;
542 			} else {
543 				mpt_prt(mpt, "keeping initiator role\n");
544 			}
545 		} else if (role & MPT_ROLE_INITIATOR) {
546 			mpt_prt(mpt, "removing initiator role\n");
547 			doit++;
548 		}
549 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
550 			if ((role & MPT_ROLE_TARGET) == 0) {
551 				mpt_prt(mpt, "adding target role\n");
552 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
553 				doit++;
554 			} else {
555 				mpt_prt(mpt, "keeping target role\n");
556 			}
557 		} else if (role & MPT_ROLE_TARGET) {
558 			mpt_prt(mpt, "removing target role\n");
559 			doit++;
560 		}
561 		mpt->role = mpt->cfg_role;
562 	}
563 
564 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
565 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
566 			mpt_prt(mpt, "adding OXID option\n");
567 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
568 			doit++;
569 		}
570 	}
571 
572 	if (doit) {
573 		fc.Flags = htole32(fl);
574 		r = mpt_write_cfg_page(mpt,
575 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
576 		    sizeof(fc), FALSE, 5000);
577 		if (r != 0) {
578 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
579 			return (0);
580 		}
581 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
582 		    "effect until next reboot or IOC reset\n");
583 	}
584 	return (0);
585 }
586 
587 /*
588  * Read SAS configuration information. Nothing to do yet.
589  */
590 static int
591 mpt_read_config_info_sas(struct mpt_softc *mpt)
592 {
593 	return (0);
594 }
595 
596 /*
597  * Set SAS configuration information. Nothing to do yet.
598  */
599 static int
600 mpt_set_initial_config_sas(struct mpt_softc *mpt)
601 {
602 	return (0);
603 }
604 
605 /*
606  * Read SCSI configuration information
607  */
608 static int
609 mpt_read_config_info_spi(struct mpt_softc *mpt)
610 {
611 	int rv, i;
612 
613 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
614 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
615 	if (rv) {
616 		return (-1);
617 	}
618 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
619 	    mpt->mpt_port_page0.Header.PageVersion,
620 	    mpt->mpt_port_page0.Header.PageLength,
621 	    mpt->mpt_port_page0.Header.PageNumber,
622 	    mpt->mpt_port_page0.Header.PageType);
623 
624 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
625 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
626 	if (rv) {
627 		return (-1);
628 	}
629 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
630 	    mpt->mpt_port_page1.Header.PageVersion,
631 	    mpt->mpt_port_page1.Header.PageLength,
632 	    mpt->mpt_port_page1.Header.PageNumber,
633 	    mpt->mpt_port_page1.Header.PageType);
634 
635 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
636 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
637 	if (rv) {
638 		return (-1);
639 	}
640 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
641 	    mpt->mpt_port_page2.Header.PageVersion,
642 	    mpt->mpt_port_page2.Header.PageLength,
643 	    mpt->mpt_port_page2.Header.PageNumber,
644 	    mpt->mpt_port_page2.Header.PageType);
645 
646 	for (i = 0; i < 16; i++) {
647 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
648 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
649 		if (rv) {
650 			return (-1);
651 		}
652 		mpt_lprt(mpt, MPT_PRT_DEBUG,
653 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
654 		    mpt->mpt_dev_page0[i].Header.PageVersion,
655 		    mpt->mpt_dev_page0[i].Header.PageLength,
656 		    mpt->mpt_dev_page0[i].Header.PageNumber,
657 		    mpt->mpt_dev_page0[i].Header.PageType);
658 
659 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
660 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
661 		if (rv) {
662 			return (-1);
663 		}
664 		mpt_lprt(mpt, MPT_PRT_DEBUG,
665 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
666 		    mpt->mpt_dev_page1[i].Header.PageVersion,
667 		    mpt->mpt_dev_page1[i].Header.PageLength,
668 		    mpt->mpt_dev_page1[i].Header.PageNumber,
669 		    mpt->mpt_dev_page1[i].Header.PageType);
670 	}
671 
672 	/*
673 	 * At this point, we don't *have* to fail. As long as we have
674 	 * valid config header information, we can (barely) lurch
675 	 * along.
676 	 */
677 
678 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
679 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
680 	if (rv) {
681 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
682 	} else {
683 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
684 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
685 		    mpt->mpt_port_page0.Capabilities,
686 		    mpt->mpt_port_page0.PhysicalInterface);
687 	}
688 
689 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
690 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
691 	if (rv) {
692 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
693 	} else {
694 		mpt_lprt(mpt, MPT_PRT_DEBUG,
695 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
696 		    mpt->mpt_port_page1.Configuration,
697 		    mpt->mpt_port_page1.OnBusTimerValue);
698 	}
699 
700 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
701 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
702 	if (rv) {
703 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
704 	} else {
705 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
706 		    "Port Page 2: Flags %x Settings %x\n",
707 		    mpt->mpt_port_page2.PortFlags,
708 		    mpt->mpt_port_page2.PortSettings);
709 		for (i = 0; i < 16; i++) {
710 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
711 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
712 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
713 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
714 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
715 		}
716 	}
717 
718 	for (i = 0; i < 16; i++) {
719 		rv = mpt_read_cur_cfg_page(mpt, i,
720 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
721 		    FALSE, 5000);
722 		if (rv) {
723 			mpt_prt(mpt,
724 			    "cannot read SPI Target %d Device Page 0\n", i);
725 			continue;
726 		}
727 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
728 		    "target %d page 0: Negotiated Params %x Information %x\n",
729 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
730 		    mpt->mpt_dev_page0[i].Information);
731 
732 		rv = mpt_read_cur_cfg_page(mpt, i,
733 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
734 		    FALSE, 5000);
735 		if (rv) {
736 			mpt_prt(mpt,
737 			    "cannot read SPI Target %d Device Page 1\n", i);
738 			continue;
739 		}
740 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
741 		    "target %d page 1: Requested Params %x Configuration %x\n",
742 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
743 		    mpt->mpt_dev_page1[i].Configuration);
744 	}
745 	return (0);
746 }
747 
748 /*
749  * Validate SPI configuration information.
750  *
751  * In particular, validate SPI Port Page 1.
752  */
753 static int
754 mpt_set_initial_config_spi(struct mpt_softc *mpt)
755 {
756 	int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
757 	int error;
758 
759 	mpt->mpt_disc_enable = 0xff;
760 	mpt->mpt_tag_enable = 0;
761 
762 	if (mpt->mpt_port_page1.Configuration != pp1val) {
763 		CONFIG_PAGE_SCSI_PORT_1 tmp;
764 
765 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
766 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
767 		tmp = mpt->mpt_port_page1;
768 		tmp.Configuration = pp1val;
769 		error = mpt_write_cur_cfg_page(mpt, 0,
770 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
771 		if (error) {
772 			return (-1);
773 		}
774 		error = mpt_read_cur_cfg_page(mpt, 0,
775 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
776 		if (error) {
777 			return (-1);
778 		}
779 		if (tmp.Configuration != pp1val) {
780 			mpt_prt(mpt,
781 			    "failed to reset SPI Port Page 1 Config value\n");
782 			return (-1);
783 		}
784 		mpt->mpt_port_page1 = tmp;
785 	}
786 
787 	/*
788 	 * The purpose of this exercise is to get
789 	 * all targets back to async/narrow.
790 	 *
791 	 * We skip this step if the BIOS has already negotiated
792 	 * speeds with the targets and does not require us to
793 	 * do Domain Validation.
794 	 */
795 	i = mpt->mpt_port_page2.PortSettings &
796 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
797 	j = mpt->mpt_port_page2.PortFlags &
798 	    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
799 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* &&
800 	    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) {
801 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
802 		    "honoring BIOS transfer negotiations\n");
803 	} else {
804 		for (i = 0; i < 16; i++) {
805 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
806 			mpt->mpt_dev_page1[i].Configuration = 0;
807 			(void) mpt_update_spi_config(mpt, i);
808 		}
809 	}
810 	return (0);
811 }
812 
813 int
814 mpt_cam_enable(struct mpt_softc *mpt)
815 {
816 	int error;
817 
818 	MPT_LOCK(mpt);
819 
820 	error = EIO;
821 	if (mpt->is_fc) {
822 		if (mpt_read_config_info_fc(mpt)) {
823 			goto out;
824 		}
825 		if (mpt_set_initial_config_fc(mpt)) {
826 			goto out;
827 		}
828 	} else if (mpt->is_sas) {
829 		if (mpt_read_config_info_sas(mpt)) {
830 			goto out;
831 		}
832 		if (mpt_set_initial_config_sas(mpt)) {
833 			goto out;
834 		}
835 	} else if (mpt->is_spi) {
836 		if (mpt_read_config_info_spi(mpt)) {
837 			goto out;
838 		}
839 		if (mpt_set_initial_config_spi(mpt)) {
840 			goto out;
841 		}
842 	}
843 	error = 0;
844 
845 out:
846 	MPT_UNLOCK(mpt);
847 	return (error);
848 }
849 
850 void
851 mpt_cam_ready(struct mpt_softc *mpt)
852 {
853 	/*
854 	 * If we're in target mode, hang out resources now
855 	 * so we don't cause the world to hang talking to us.
856 	 */
857 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
858 		/*
859 		 * Try to add some target command resources
860 		 */
861 		MPT_LOCK(mpt);
862 		if (mpt_add_target_commands(mpt) == FALSE) {
863 			mpt_prt(mpt, "failed to add target commands\n");
864 		}
865 		MPT_UNLOCK(mpt);
866 	}
867 	mpt->ready = 1;
868 }
869 
870 void
871 mpt_cam_detach(struct mpt_softc *mpt)
872 {
873 	mpt_handler_t handler;
874 
875 	MPT_LOCK(mpt);
876 	mpt->ready = 0;
877 	mpt_terminate_recovery_thread(mpt);
878 
879 	handler.reply_handler = mpt_scsi_reply_handler;
880 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
881 			       scsi_io_handler_id);
882 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
883 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
884 			       scsi_tmf_handler_id);
885 	handler.reply_handler = mpt_fc_els_reply_handler;
886 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
887 			       fc_els_handler_id);
888 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
889 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
890 			       mpt->scsi_tgt_handler_id);
891 
892 	if (mpt->tmf_req != NULL) {
893 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
894 		mpt_free_request(mpt, mpt->tmf_req);
895 		mpt->tmf_req = NULL;
896 	}
897 	MPT_UNLOCK(mpt);
898 
899 	if (mpt->sim != NULL) {
900 		xpt_free_path(mpt->path);
901 		xpt_bus_deregister(cam_sim_path(mpt->sim));
902 		cam_sim_free(mpt->sim, TRUE);
903 		mpt->sim = NULL;
904 	}
905 
906 	if (mpt->phydisk_sim != NULL) {
907 		xpt_free_path(mpt->phydisk_path);
908 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
909 		cam_sim_free(mpt->phydisk_sim, TRUE);
910 		mpt->phydisk_sim = NULL;
911 	}
912 }
913 
914 /* This routine is used after a system crash to dump core onto the swap device.
915  */
916 static void
917 mpt_poll(struct cam_sim *sim)
918 {
919 	struct mpt_softc *mpt;
920 
921 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
922 	mpt_intr(mpt);
923 }
924 
925 /*
926  * Watchdog timeout routine for SCSI requests.
927  */
928 static void
929 mpt_timeout(void *arg)
930 {
931 	union ccb	 *ccb;
932 	struct mpt_softc *mpt;
933 	request_t	 *req;
934 
935 	ccb = (union ccb *)arg;
936 	mpt = ccb->ccb_h.ccb_mpt_ptr;
937 
938 	MPT_LOCK(mpt);
939 	req = ccb->ccb_h.ccb_req_ptr;
940 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
941 	    req->serno, ccb, req->ccb);
942 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
943 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
944 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
945 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
946 		req->state |= REQ_STATE_TIMEDOUT;
947 		mpt_wakeup_recovery_thread(mpt);
948 	}
949 	MPT_UNLOCK(mpt);
950 }
951 
952 /*
953  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
954  *
955  * Takes a list of physical segments and builds the SGL for SCSI IO command
956  * and forwards the commard to the IOC after one last check that CAM has not
957  * aborted the transaction.
958  */
959 static void
960 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
961 {
962 	request_t *req, *trq;
963 	char *mpt_off;
964 	union ccb *ccb;
965 	struct mpt_softc *mpt;
966 	int seg, first_lim;
967 	uint32_t flags, nxt_off;
968 	void *sglp = NULL;
969 	MSG_REQUEST_HEADER *hdrp;
970 	SGE_SIMPLE64 *se;
971 	SGE_CHAIN64 *ce;
972 	int istgt = 0;
973 
974 	req = (request_t *)arg;
975 	ccb = req->ccb;
976 
977 	mpt = ccb->ccb_h.ccb_mpt_ptr;
978 	req = ccb->ccb_h.ccb_req_ptr;
979 
980 	hdrp = req->req_vbuf;
981 	mpt_off = req->req_vbuf;
982 
983 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
984 		error = EFBIG;
985 	}
986 
987 	if (error == 0) {
988 		switch (hdrp->Function) {
989 		case MPI_FUNCTION_SCSI_IO_REQUEST:
990 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
991 			istgt = 0;
992 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
993 			break;
994 		case MPI_FUNCTION_TARGET_ASSIST:
995 			istgt = 1;
996 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
997 			break;
998 		default:
999 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1000 			    hdrp->Function);
1001 			error = EINVAL;
1002 			break;
1003 		}
1004 	}
1005 
1006 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1007 		error = EFBIG;
1008 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1009 		    nseg, mpt->max_seg_cnt);
1010 	}
1011 
1012 bad:
1013 	if (error != 0) {
1014 		if (error != EFBIG && error != ENOMEM) {
1015 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1016 		}
1017 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1018 			cam_status status;
1019 			mpt_freeze_ccb(ccb);
1020 			if (error == EFBIG) {
1021 				status = CAM_REQ_TOO_BIG;
1022 			} else if (error == ENOMEM) {
1023 				if (mpt->outofbeer == 0) {
1024 					mpt->outofbeer = 1;
1025 					xpt_freeze_simq(mpt->sim, 1);
1026 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1027 					    "FREEZEQ\n");
1028 				}
1029 				status = CAM_REQUEUE_REQ;
1030 			} else {
1031 				status = CAM_REQ_CMP_ERR;
1032 			}
1033 			mpt_set_ccb_status(ccb, status);
1034 		}
1035 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1036 			request_t *cmd_req =
1037 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1038 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1039 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1040 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1041 		}
1042 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1043 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1044 		xpt_done(ccb);
1045 		CAMLOCK_2_MPTLOCK(mpt);
1046 		mpt_free_request(mpt, req);
1047 		MPTLOCK_2_CAMLOCK(mpt);
1048 		return;
1049 	}
1050 
1051 	/*
1052 	 * No data to transfer?
1053 	 * Just make a single simple SGL with zero length.
1054 	 */
1055 
1056 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1057 		int tidx = ((char *)sglp) - mpt_off;
1058 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1059 	}
1060 
1061 	if (nseg == 0) {
1062 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1063 		MPI_pSGE_SET_FLAGS(se1,
1064 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1065 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1066 		se1->FlagsLength = htole32(se1->FlagsLength);
1067 		goto out;
1068 	}
1069 
1070 
1071 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1072 	if (istgt == 0) {
1073 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1074 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1075 		}
1076 	} else {
1077 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1078 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1079 		}
1080 	}
1081 
1082 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1083 		bus_dmasync_op_t op;
1084 		if (istgt == 0) {
1085 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1086 				op = BUS_DMASYNC_PREREAD;
1087 			} else {
1088 				op = BUS_DMASYNC_PREWRITE;
1089 			}
1090 		} else {
1091 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1092 				op = BUS_DMASYNC_PREWRITE;
1093 			} else {
1094 				op = BUS_DMASYNC_PREREAD;
1095 			}
1096 		}
1097 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1098 	}
1099 
1100 	/*
1101 	 * Okay, fill in what we can at the end of the command frame.
1102 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1103 	 * the command frame.
1104 	 *
1105 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1106 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1107 	 * that.
1108 	 */
1109 
1110 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1111 		first_lim = nseg;
1112 	} else {
1113 		/*
1114 		 * Leave room for CHAIN element
1115 		 */
1116 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1117 	}
1118 
1119 	se = (SGE_SIMPLE64 *) sglp;
1120 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1121 		uint32_t tf;
1122 
1123 		memset(se, 0, sizeof (*se));
1124 		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1125 		if (sizeof(bus_addr_t) > 4) {
1126 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
1127 		}
1128 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1129 		tf = flags;
1130 		if (seg == first_lim - 1) {
1131 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1132 		}
1133 		if (seg == nseg - 1) {
1134 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1135 				MPI_SGE_FLAGS_END_OF_BUFFER;
1136 		}
1137 		MPI_pSGE_SET_FLAGS(se, tf);
1138 		se->FlagsLength = htole32(se->FlagsLength);
1139 	}
1140 
1141 	if (seg == nseg) {
1142 		goto out;
1143 	}
1144 
1145 	/*
1146 	 * Tell the IOC where to find the first chain element.
1147 	 */
1148 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1149 	nxt_off = MPT_RQSL(mpt);
1150 	trq = req;
1151 
1152 	/*
1153 	 * Make up the rest of the data segments out of a chain element
1154 	 * (contiained in the current request frame) which points to
1155 	 * SIMPLE64 elements in the next request frame, possibly ending
1156 	 * with *another* chain element (if there's more).
1157 	 */
1158 	while (seg < nseg) {
1159 		int this_seg_lim;
1160 		uint32_t tf, cur_off;
1161 		bus_addr_t chain_list_addr;
1162 
1163 		/*
1164 		 * Point to the chain descriptor. Note that the chain
1165 		 * descriptor is at the end of the *previous* list (whether
1166 		 * chain or simple).
1167 		 */
1168 		ce = (SGE_CHAIN64 *) se;
1169 
1170 		/*
1171 		 * Before we change our current pointer, make  sure we won't
1172 		 * overflow the request area with this frame. Note that we
1173 		 * test against 'greater than' here as it's okay in this case
1174 		 * to have next offset be just outside the request area.
1175 		 */
1176 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1177 			nxt_off = MPT_REQUEST_AREA;
1178 			goto next_chain;
1179 		}
1180 
1181 		/*
1182 		 * Set our SGE element pointer to the beginning of the chain
1183 		 * list and update our next chain list offset.
1184 		 */
1185 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1186 		cur_off = nxt_off;
1187 		nxt_off += MPT_RQSL(mpt);
1188 
1189 		/*
1190 		 * Now initialized the chain descriptor.
1191 		 */
1192 		memset(ce, 0, sizeof (*ce));
1193 
1194 		/*
1195 		 * Get the physical address of the chain list.
1196 		 */
1197 		chain_list_addr = trq->req_pbuf;
1198 		chain_list_addr += cur_off;
1199 		if (sizeof (bus_addr_t) > 4) {
1200 			ce->Address.High =
1201 			    htole32((uint32_t) ((uint64_t)chain_list_addr >> 32));
1202 		}
1203 		ce->Address.Low = htole32((uint32_t) chain_list_addr);
1204 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1205 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1206 
1207 		/*
1208 		 * If we have more than a frame's worth of segments left,
1209 		 * set up the chain list to have the last element be another
1210 		 * chain descriptor.
1211 		 */
1212 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1213 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1214 			/*
1215 			 * The length of the chain is the length in bytes of the
1216 			 * number of segments plus the next chain element.
1217 			 *
1218 			 * The next chain descriptor offset is the length,
1219 			 * in words, of the number of segments.
1220 			 */
1221 			ce->Length = (this_seg_lim - seg) *
1222 			    sizeof (SGE_SIMPLE64);
1223 			ce->NextChainOffset = ce->Length >> 2;
1224 			ce->Length += sizeof (SGE_CHAIN64);
1225 		} else {
1226 			this_seg_lim = nseg;
1227 			ce->Length = (this_seg_lim - seg) *
1228 			    sizeof (SGE_SIMPLE64);
1229 		}
1230 
1231 		/*
1232 		 * Fill in the chain list SGE elements with our segment data.
1233 		 *
1234 		 * If we're the last element in this chain list, set the last
1235 		 * element flag. If we're the completely last element period,
1236 		 * set the end of list and end of buffer flags.
1237 		 */
1238 		while (seg < this_seg_lim) {
1239 			memset(se, 0, sizeof (*se));
1240 			se->Address.Low = htole32(dm_segs->ds_addr);
1241 			if (sizeof (bus_addr_t) > 4) {
1242 				se->Address.High =
1243 				    htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1244 			}
1245 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1246 			tf = flags;
1247 			if (seg ==  this_seg_lim - 1) {
1248 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1249 			}
1250 			if (seg == nseg - 1) {
1251 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1252 					MPI_SGE_FLAGS_END_OF_BUFFER;
1253 			}
1254 			MPI_pSGE_SET_FLAGS(se, tf);
1255 			se->FlagsLength = htole32(se->FlagsLength);
1256 			se++;
1257 			seg++;
1258 			dm_segs++;
1259 		}
1260 
1261     next_chain:
1262 		/*
1263 		 * If we have more segments to do and we've used up all of
1264 		 * the space in a request area, go allocate another one
1265 		 * and chain to that.
1266 		 */
1267 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1268 			request_t *nrq;
1269 
1270 			CAMLOCK_2_MPTLOCK(mpt);
1271 			nrq = mpt_get_request(mpt, FALSE);
1272 			MPTLOCK_2_CAMLOCK(mpt);
1273 
1274 			if (nrq == NULL) {
1275 				error = ENOMEM;
1276 				goto bad;
1277 			}
1278 
1279 			/*
1280 			 * Append the new request area on the tail of our list.
1281 			 */
1282 			if ((trq = req->chain) == NULL) {
1283 				req->chain = nrq;
1284 			} else {
1285 				while (trq->chain != NULL) {
1286 					trq = trq->chain;
1287 				}
1288 				trq->chain = nrq;
1289 			}
1290 			trq = nrq;
1291 			mpt_off = trq->req_vbuf;
1292 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1293 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1294 			}
1295 			nxt_off = 0;
1296 		}
1297 	}
1298 out:
1299 
1300 	/*
1301 	 * Last time we need to check if this CCB needs to be aborted.
1302 	 */
1303 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1304 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1305 			request_t *cmd_req =
1306 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1307 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1308 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1309 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1310 		}
1311 		mpt_prt(mpt,
1312 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1313 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1314 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1315 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1316 		}
1317 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1318 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1319 		xpt_done(ccb);
1320 		CAMLOCK_2_MPTLOCK(mpt);
1321 		mpt_free_request(mpt, req);
1322 		MPTLOCK_2_CAMLOCK(mpt);
1323 		return;
1324 	}
1325 
1326 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1327 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1328 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1329 		    mpt_timeout, ccb);
1330 	} else {
1331 		mpt_req_timeout_init(req);
1332 	}
1333 	if (mpt->verbose > MPT_PRT_DEBUG) {
1334 		int nc = 0;
1335 		mpt_print_request(req->req_vbuf);
1336 		for (trq = req->chain; trq; trq = trq->chain) {
1337 			printf("  Additional Chain Area %d\n", nc++);
1338 			mpt_dump_sgl(trq->req_vbuf, 0);
1339 		}
1340 	}
1341 
1342 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1343 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1344 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1345 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1346 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1347 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1348 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1349 		} else {
1350 			tgt->state = TGT_STATE_MOVING_DATA;
1351 		}
1352 #else
1353 		tgt->state = TGT_STATE_MOVING_DATA;
1354 #endif
1355 	}
1356 	CAMLOCK_2_MPTLOCK(mpt);
1357 	mpt_send_cmd(mpt, req);
1358 	MPTLOCK_2_CAMLOCK(mpt);
1359 }
1360 
1361 static void
1362 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1363 {
1364 	request_t *req, *trq;
1365 	char *mpt_off;
1366 	union ccb *ccb;
1367 	struct mpt_softc *mpt;
1368 	int seg, first_lim;
1369 	uint32_t flags, nxt_off;
1370 	void *sglp = NULL;
1371 	MSG_REQUEST_HEADER *hdrp;
1372 	SGE_SIMPLE32 *se;
1373 	SGE_CHAIN32 *ce;
1374 	int istgt = 0;
1375 
1376 	req = (request_t *)arg;
1377 	ccb = req->ccb;
1378 
1379 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1380 	req = ccb->ccb_h.ccb_req_ptr;
1381 
1382 	hdrp = req->req_vbuf;
1383 	mpt_off = req->req_vbuf;
1384 
1385 
1386 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1387 		error = EFBIG;
1388 	}
1389 
1390 	if (error == 0) {
1391 		switch (hdrp->Function) {
1392 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1393 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1394 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1395 			break;
1396 		case MPI_FUNCTION_TARGET_ASSIST:
1397 			istgt = 1;
1398 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1399 			break;
1400 		default:
1401 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1402 			    hdrp->Function);
1403 			error = EINVAL;
1404 			break;
1405 		}
1406 	}
1407 
1408 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1409 		error = EFBIG;
1410 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1411 		    nseg, mpt->max_seg_cnt);
1412 	}
1413 
1414 bad:
1415 	if (error != 0) {
1416 		if (error != EFBIG && error != ENOMEM) {
1417 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1418 		}
1419 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1420 			cam_status status;
1421 			mpt_freeze_ccb(ccb);
1422 			if (error == EFBIG) {
1423 				status = CAM_REQ_TOO_BIG;
1424 			} else if (error == ENOMEM) {
1425 				if (mpt->outofbeer == 0) {
1426 					mpt->outofbeer = 1;
1427 					xpt_freeze_simq(mpt->sim, 1);
1428 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1429 					    "FREEZEQ\n");
1430 				}
1431 				status = CAM_REQUEUE_REQ;
1432 			} else {
1433 				status = CAM_REQ_CMP_ERR;
1434 			}
1435 			mpt_set_ccb_status(ccb, status);
1436 		}
1437 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1438 			request_t *cmd_req =
1439 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1440 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1441 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1442 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1443 		}
1444 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1445 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1446 		xpt_done(ccb);
1447 		CAMLOCK_2_MPTLOCK(mpt);
1448 		mpt_free_request(mpt, req);
1449 		MPTLOCK_2_CAMLOCK(mpt);
1450 		return;
1451 	}
1452 
1453 	/*
1454 	 * No data to transfer?
1455 	 * Just make a single simple SGL with zero length.
1456 	 */
1457 
1458 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1459 		int tidx = ((char *)sglp) - mpt_off;
1460 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1461 	}
1462 
1463 	if (nseg == 0) {
1464 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1465 		MPI_pSGE_SET_FLAGS(se1,
1466 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1467 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1468 		se1->FlagsLength = htole32(se1->FlagsLength);
1469 		goto out;
1470 	}
1471 
1472 
1473 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1474 	if (istgt == 0) {
1475 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1476 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1477 		}
1478 	} else {
1479 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1480 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1481 		}
1482 	}
1483 
1484 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1485 		bus_dmasync_op_t op;
1486 		if (istgt) {
1487 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1488 				op = BUS_DMASYNC_PREREAD;
1489 			} else {
1490 				op = BUS_DMASYNC_PREWRITE;
1491 			}
1492 		} else {
1493 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1494 				op = BUS_DMASYNC_PREWRITE;
1495 			} else {
1496 				op = BUS_DMASYNC_PREREAD;
1497 			}
1498 		}
1499 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1500 	}
1501 
1502 	/*
1503 	 * Okay, fill in what we can at the end of the command frame.
1504 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1505 	 * the command frame.
1506 	 *
1507 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1508 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1509 	 * that.
1510 	 */
1511 
1512 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1513 		first_lim = nseg;
1514 	} else {
1515 		/*
1516 		 * Leave room for CHAIN element
1517 		 */
1518 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1519 	}
1520 
1521 	se = (SGE_SIMPLE32 *) sglp;
1522 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1523 		uint32_t tf;
1524 
1525 		memset(se, 0,sizeof (*se));
1526 		se->Address = dm_segs->ds_addr;
1527 
1528 
1529 
1530 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1531 		tf = flags;
1532 		if (seg == first_lim - 1) {
1533 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1534 		}
1535 		if (seg == nseg - 1) {
1536 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1537 				MPI_SGE_FLAGS_END_OF_BUFFER;
1538 		}
1539 		MPI_pSGE_SET_FLAGS(se, tf);
1540 		se->FlagsLength = htole32(se->FlagsLength);
1541 	}
1542 
1543 	if (seg == nseg) {
1544 		goto out;
1545 	}
1546 
1547 	/*
1548 	 * Tell the IOC where to find the first chain element.
1549 	 */
1550 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1551 	nxt_off = MPT_RQSL(mpt);
1552 	trq = req;
1553 
1554 	/*
1555 	 * Make up the rest of the data segments out of a chain element
1556 	 * (contiained in the current request frame) which points to
1557 	 * SIMPLE32 elements in the next request frame, possibly ending
1558 	 * with *another* chain element (if there's more).
1559 	 */
1560 	while (seg < nseg) {
1561 		int this_seg_lim;
1562 		uint32_t tf, cur_off;
1563 		bus_addr_t chain_list_addr;
1564 
1565 		/*
1566 		 * Point to the chain descriptor. Note that the chain
1567 		 * descriptor is at the end of the *previous* list (whether
1568 		 * chain or simple).
1569 		 */
1570 		ce = (SGE_CHAIN32 *) se;
1571 
1572 		/*
1573 		 * Before we change our current pointer, make  sure we won't
1574 		 * overflow the request area with this frame. Note that we
1575 		 * test against 'greater than' here as it's okay in this case
1576 		 * to have next offset be just outside the request area.
1577 		 */
1578 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1579 			nxt_off = MPT_REQUEST_AREA;
1580 			goto next_chain;
1581 		}
1582 
1583 		/*
1584 		 * Set our SGE element pointer to the beginning of the chain
1585 		 * list and update our next chain list offset.
1586 		 */
1587 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1588 		cur_off = nxt_off;
1589 		nxt_off += MPT_RQSL(mpt);
1590 
1591 		/*
1592 		 * Now initialized the chain descriptor.
1593 		 */
1594 		memset(ce, 0, sizeof (*ce));
1595 
1596 		/*
1597 		 * Get the physical address of the chain list.
1598 		 */
1599 		chain_list_addr = trq->req_pbuf;
1600 		chain_list_addr += cur_off;
1601 
1602 
1603 
1604 		ce->Address = chain_list_addr;
1605 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1606 
1607 
1608 		/*
1609 		 * If we have more than a frame's worth of segments left,
1610 		 * set up the chain list to have the last element be another
1611 		 * chain descriptor.
1612 		 */
1613 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1614 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1615 			/*
1616 			 * The length of the chain is the length in bytes of the
1617 			 * number of segments plus the next chain element.
1618 			 *
1619 			 * The next chain descriptor offset is the length,
1620 			 * in words, of the number of segments.
1621 			 */
1622 			ce->Length = (this_seg_lim - seg) *
1623 			    sizeof (SGE_SIMPLE32);
1624 			ce->NextChainOffset = ce->Length >> 2;
1625 			ce->Length += sizeof (SGE_CHAIN32);
1626 		} else {
1627 			this_seg_lim = nseg;
1628 			ce->Length = (this_seg_lim - seg) *
1629 			    sizeof (SGE_SIMPLE32);
1630 		}
1631 
1632 		/*
1633 		 * Fill in the chain list SGE elements with our segment data.
1634 		 *
1635 		 * If we're the last element in this chain list, set the last
1636 		 * element flag. If we're the completely last element period,
1637 		 * set the end of list and end of buffer flags.
1638 		 */
1639 		while (seg < this_seg_lim) {
1640 			memset(se, 0, sizeof (*se));
1641 			se->Address = dm_segs->ds_addr;
1642 
1643 
1644 
1645 
1646 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1647 			tf = flags;
1648 			if (seg ==  this_seg_lim - 1) {
1649 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1650 			}
1651 			if (seg == nseg - 1) {
1652 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1653 					MPI_SGE_FLAGS_END_OF_BUFFER;
1654 			}
1655 			MPI_pSGE_SET_FLAGS(se, tf);
1656 			se->FlagsLength = htole32(se->FlagsLength);
1657 			se++;
1658 			seg++;
1659 			dm_segs++;
1660 		}
1661 
1662     next_chain:
1663 		/*
1664 		 * If we have more segments to do and we've used up all of
1665 		 * the space in a request area, go allocate another one
1666 		 * and chain to that.
1667 		 */
1668 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1669 			request_t *nrq;
1670 
1671 			CAMLOCK_2_MPTLOCK(mpt);
1672 			nrq = mpt_get_request(mpt, FALSE);
1673 			MPTLOCK_2_CAMLOCK(mpt);
1674 
1675 			if (nrq == NULL) {
1676 				error = ENOMEM;
1677 				goto bad;
1678 			}
1679 
1680 			/*
1681 			 * Append the new request area on the tail of our list.
1682 			 */
1683 			if ((trq = req->chain) == NULL) {
1684 				req->chain = nrq;
1685 			} else {
1686 				while (trq->chain != NULL) {
1687 					trq = trq->chain;
1688 				}
1689 				trq->chain = nrq;
1690 			}
1691 			trq = nrq;
1692 			mpt_off = trq->req_vbuf;
1693 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1694 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1695 			}
1696 			nxt_off = 0;
1697 		}
1698 	}
1699 out:
1700 
1701 	/*
1702 	 * Last time we need to check if this CCB needs to be aborted.
1703 	 */
1704 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1705 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1706 			request_t *cmd_req =
1707 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1708 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1709 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1710 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1711 		}
1712 		mpt_prt(mpt,
1713 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1714 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1715 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1716 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1717 		}
1718 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1719 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1720 		xpt_done(ccb);
1721 		CAMLOCK_2_MPTLOCK(mpt);
1722 		mpt_free_request(mpt, req);
1723 		MPTLOCK_2_CAMLOCK(mpt);
1724 		return;
1725 	}
1726 
1727 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1728 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1729 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1730 		    mpt_timeout, ccb);
1731 	} else {
1732 		mpt_req_timeout_init(req);
1733 	}
1734 	if (mpt->verbose > MPT_PRT_DEBUG) {
1735 		int nc = 0;
1736 		mpt_print_request(req->req_vbuf);
1737 		for (trq = req->chain; trq; trq = trq->chain) {
1738 			printf("  Additional Chain Area %d\n", nc++);
1739 			mpt_dump_sgl(trq->req_vbuf, 0);
1740 		}
1741 	}
1742 
1743 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1744 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1745 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1746 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1747 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1748 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1749 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1750 		} else {
1751 			tgt->state = TGT_STATE_MOVING_DATA;
1752 		}
1753 #else
1754 		tgt->state = TGT_STATE_MOVING_DATA;
1755 #endif
1756 	}
1757 	CAMLOCK_2_MPTLOCK(mpt);
1758 	mpt_send_cmd(mpt, req);
1759 	MPTLOCK_2_CAMLOCK(mpt);
1760 }
1761 
1762 static void
1763 mpt_start(struct cam_sim *sim, union ccb *ccb)
1764 {
1765 	request_t *req;
1766 	struct mpt_softc *mpt;
1767 	MSG_SCSI_IO_REQUEST *mpt_req;
1768 	struct ccb_scsiio *csio = &ccb->csio;
1769 	struct ccb_hdr *ccbh = &ccb->ccb_h;
1770 	bus_dmamap_callback_t *cb;
1771 	target_id_t tgt;
1772 	int raid_passthru;
1773 
1774 	/* Get the pointer for the physical addapter */
1775 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1776 	raid_passthru = (sim == mpt->phydisk_sim);
1777 
1778 	CAMLOCK_2_MPTLOCK(mpt);
1779 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
1780 		if (mpt->outofbeer == 0) {
1781 			mpt->outofbeer = 1;
1782 			xpt_freeze_simq(mpt->sim, 1);
1783 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1784 		}
1785 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1786 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
1787 		MPTLOCK_2_CAMLOCK(mpt);
1788 		xpt_done(ccb);
1789 		return;
1790 	}
1791 #ifdef	INVARIANTS
1792 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
1793 #endif
1794 	MPTLOCK_2_CAMLOCK(mpt);
1795 
1796 	if (sizeof (bus_addr_t) > 4) {
1797 		cb = mpt_execute_req_a64;
1798 	} else {
1799 		cb = mpt_execute_req;
1800 	}
1801 
1802 	/*
1803 	 * Link the ccb and the request structure so we can find
1804 	 * the other knowing either the request or the ccb
1805 	 */
1806 	req->ccb = ccb;
1807 	ccb->ccb_h.ccb_req_ptr = req;
1808 
1809 	/* Now we build the command for the IOC */
1810 	mpt_req = req->req_vbuf;
1811 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
1812 
1813 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1814 	if (raid_passthru) {
1815 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1816 		CAMLOCK_2_MPTLOCK(mpt);
1817 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1818 			MPTLOCK_2_CAMLOCK(mpt);
1819 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1820 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
1821 			xpt_done(ccb);
1822 			return;
1823 		}
1824 		MPTLOCK_2_CAMLOCK(mpt);
1825 		mpt_req->Bus = 0;	/* we never set bus here */
1826 	} else {
1827 		tgt = ccb->ccb_h.target_id;
1828 		mpt_req->Bus = 0;	/* XXX */
1829 
1830 	}
1831 	mpt_req->SenseBufferLength =
1832 		(csio->sense_len < MPT_SENSE_SIZE) ?
1833 		 csio->sense_len : MPT_SENSE_SIZE;
1834 
1835 	/*
1836 	 * We use the message context to find the request structure when we
1837 	 * Get the command completion interrupt from the IOC.
1838 	 */
1839 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1840 
1841 	/* Which physical device to do the I/O on */
1842 	mpt_req->TargetID = tgt;
1843 
1844 	/* We assume a single level LUN type */
1845 	if (ccb->ccb_h.target_lun >= 256) {
1846 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1847 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1848 	} else {
1849 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1850 	}
1851 
1852 	/* Set the direction of the transfer */
1853 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1854 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1855 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1856 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1857 	} else {
1858 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1859 	}
1860 
1861 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1862 		switch(ccb->csio.tag_action) {
1863 		case MSG_HEAD_OF_Q_TAG:
1864 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
1865 			break;
1866 		case MSG_ACA_TASK:
1867 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
1868 			break;
1869 		case MSG_ORDERED_Q_TAG:
1870 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
1871 			break;
1872 		case MSG_SIMPLE_Q_TAG:
1873 		default:
1874 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1875 			break;
1876 		}
1877 	} else {
1878 		if (mpt->is_fc || mpt->is_sas) {
1879 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1880 		} else {
1881 			/* XXX No such thing for a target doing packetized. */
1882 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
1883 		}
1884 	}
1885 
1886 	if (mpt->is_spi) {
1887 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1888 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
1889 		}
1890 	}
1891 
1892 	/* Copy the scsi command block into place */
1893 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1894 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
1895 	} else {
1896 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
1897 	}
1898 
1899 	mpt_req->CDBLength = csio->cdb_len;
1900 	mpt_req->DataLength = htole32(csio->dxfer_len);
1901 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
1902 
1903 	/*
1904 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
1905 	 */
1906 	if (mpt->verbose == MPT_PRT_DEBUG) {
1907 		U32 df;
1908 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
1909 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
1910 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
1911 		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
1912 		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
1913 			mpt_prtc(mpt, "(%s %u byte%s ",
1914 			    (df == MPI_SCSIIO_CONTROL_READ)?
1915 			    "read" : "write",  csio->dxfer_len,
1916 			    (csio->dxfer_len == 1)? ")" : "s)");
1917 		}
1918 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
1919 		    ccb->ccb_h.target_lun, req, req->serno);
1920 	}
1921 
1922 	/*
1923 	 * If we have any data to send with this command map it into bus space.
1924 	 */
1925 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1926 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1927 			/*
1928 			 * We've been given a pointer to a single buffer.
1929 			 */
1930 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1931 				/*
1932 				 * Virtual address that needs to translated into
1933 				 * one or more physical address ranges.
1934 				 */
1935 				int error;
1936 				int s = splsoftvm();
1937 				error = bus_dmamap_load(mpt->buffer_dmat,
1938 				    req->dmap, csio->data_ptr, csio->dxfer_len,
1939 				    cb, req, 0);
1940 				splx(s);
1941 				if (error == EINPROGRESS) {
1942 					/*
1943 					 * So as to maintain ordering,
1944 					 * freeze the controller queue
1945 					 * until our mapping is
1946 					 * returned.
1947 					 */
1948 					xpt_freeze_simq(mpt->sim, 1);
1949 					ccbh->status |= CAM_RELEASE_SIMQ;
1950 				}
1951 			} else {
1952 				/*
1953 				 * We have been given a pointer to single
1954 				 * physical buffer.
1955 				 */
1956 				struct bus_dma_segment seg;
1957 				seg.ds_addr =
1958 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1959 				seg.ds_len = csio->dxfer_len;
1960 				(*cb)(req, &seg, 1, 0);
1961 			}
1962 		} else {
1963 			/*
1964 			 * We have been given a list of addresses.
1965 			 * This case could be easily supported but they are not
1966 			 * currently generated by the CAM subsystem so there
1967 			 * is no point in wasting the time right now.
1968 			 */
1969 			struct bus_dma_segment *segs;
1970 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1971 				(*cb)(req, NULL, 0, EFAULT);
1972 			} else {
1973 				/* Just use the segments provided */
1974 				segs = (struct bus_dma_segment *)csio->data_ptr;
1975 				(*cb)(req, segs, csio->sglist_cnt, 0);
1976 			}
1977 		}
1978 	} else {
1979 		(*cb)(req, NULL, 0, 0);
1980 	}
1981 }
1982 
1983 static int
1984 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
1985     int sleep_ok)
1986 {
1987 	int   error;
1988 	uint16_t status;
1989 	uint8_t response;
1990 
1991 	error = mpt_scsi_send_tmf(mpt,
1992 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
1993 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
1994 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1995 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
1996 	    0,	/* XXX How do I get the channel ID? */
1997 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
1998 	    lun != CAM_LUN_WILDCARD ? lun : 0,
1999 	    0, sleep_ok);
2000 
2001 	if (error != 0) {
2002 		/*
2003 		 * mpt_scsi_send_tmf hard resets on failure, so no
2004 		 * need to do so here.
2005 		 */
2006 		mpt_prt(mpt,
2007 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2008 		return (EIO);
2009 	}
2010 
2011 	/* Wait for bus reset to be processed by the IOC. */
2012 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2013 	    REQ_STATE_DONE, sleep_ok, 5000);
2014 
2015 	status = mpt->tmf_req->IOCStatus;
2016 	response = mpt->tmf_req->ResponseCode;
2017 	mpt->tmf_req->state = REQ_STATE_FREE;
2018 
2019 	if (error) {
2020 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2021 		    "Resetting controller.\n");
2022 		mpt_reset(mpt, TRUE);
2023 		return (ETIMEDOUT);
2024 	}
2025 
2026 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2027 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2028 		    "Resetting controller.\n", status);
2029 		mpt_reset(mpt, TRUE);
2030 		return (EIO);
2031 	}
2032 
2033 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2034 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2035 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2036 		    "Resetting controller.\n", response);
2037 		mpt_reset(mpt, TRUE);
2038 		return (EIO);
2039 	}
2040 	return (0);
2041 }
2042 
2043 static int
2044 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2045 {
2046 	int r = 0;
2047 	request_t *req;
2048 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2049 
2050  	req = mpt_get_request(mpt, FALSE);
2051 	if (req == NULL) {
2052 		return (ENOMEM);
2053 	}
2054 	fc = req->req_vbuf;
2055 	memset(fc, 0, sizeof(*fc));
2056 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2057 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2058 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2059 	mpt_send_cmd(mpt, req);
2060 	if (dowait) {
2061 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2062 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2063 		if (r == 0) {
2064 			mpt_free_request(mpt, req);
2065 		}
2066 	}
2067 	return (r);
2068 }
2069 
2070 static int
2071 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2072 	      MSG_EVENT_NOTIFY_REPLY *msg)
2073 {
2074 	uint32_t data0, data1;
2075 
2076 	data0 = le32toh(msg->Data[0]);
2077 	data1 = le32toh(msg->Data[1]);
2078 	switch(msg->Event & 0xFF) {
2079 	case MPI_EVENT_UNIT_ATTENTION:
2080 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2081 		    (data0 >> 8) & 0xff, data0 & 0xff);
2082 		break;
2083 
2084 	case MPI_EVENT_IOC_BUS_RESET:
2085 		/* We generated a bus reset */
2086 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2087 		    (data0 >> 8) & 0xff);
2088 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2089 		break;
2090 
2091 	case MPI_EVENT_EXT_BUS_RESET:
2092 		/* Someone else generated a bus reset */
2093 		mpt_prt(mpt, "External Bus Reset Detected\n");
2094 		/*
2095 		 * These replies don't return EventData like the MPI
2096 		 * spec says they do
2097 		 */
2098 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2099 		break;
2100 
2101 	case MPI_EVENT_RESCAN:
2102 #if __FreeBSD_version >= 600000
2103 	{
2104 		union ccb *ccb;
2105 		uint32_t pathid;
2106 		/*
2107 		 * In general this means a device has been added to the loop.
2108 		 */
2109 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2110 		if (mpt->ready == 0) {
2111 			break;
2112 		}
2113 		if (mpt->phydisk_sim) {
2114 			pathid = cam_sim_path(mpt->phydisk_sim);
2115 		} else {
2116 			pathid = cam_sim_path(mpt->sim);
2117 		}
2118 		MPTLOCK_2_CAMLOCK(mpt);
2119 		/*
2120 		 * Allocate a CCB, create a wildcard path for this bus,
2121 		 * and schedule a rescan.
2122 		 */
2123 		ccb = xpt_alloc_ccb_nowait();
2124 		if (ccb == NULL) {
2125 			mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2126 			CAMLOCK_2_MPTLOCK(mpt);
2127 			break;
2128 		}
2129 
2130 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2131 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2132 			CAMLOCK_2_MPTLOCK(mpt);
2133 			mpt_prt(mpt, "unable to create path for rescan\n");
2134 			xpt_free_ccb(ccb);
2135 			break;
2136 		}
2137 		xpt_rescan(ccb);
2138 		CAMLOCK_2_MPTLOCK(mpt);
2139 		break;
2140 	}
2141 #else
2142 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2143 		break;
2144 #endif
2145 	case MPI_EVENT_LINK_STATUS_CHANGE:
2146 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2147 		    (data1 >> 8) & 0xff,
2148 		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2149 		break;
2150 
2151 	case MPI_EVENT_LOOP_STATE_CHANGE:
2152 		switch ((data0 >> 16) & 0xff) {
2153 		case 0x01:
2154 			mpt_prt(mpt,
2155 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2156 			    "(Loop Initialization)\n",
2157 			    (data1 >> 8) & 0xff,
2158 			    (data0 >> 8) & 0xff,
2159 			    (data0     ) & 0xff);
2160 			switch ((data0 >> 8) & 0xff) {
2161 			case 0xF7:
2162 				if ((data0 & 0xff) == 0xF7) {
2163 					mpt_prt(mpt, "Device needs AL_PA\n");
2164 				} else {
2165 					mpt_prt(mpt, "Device %02x doesn't like "
2166 					    "FC performance\n",
2167 					    data0 & 0xFF);
2168 				}
2169 				break;
2170 			case 0xF8:
2171 				if ((data0 & 0xff) == 0xF7) {
2172 					mpt_prt(mpt, "Device had loop failure "
2173 					    "at its receiver prior to acquiring"
2174 					    " AL_PA\n");
2175 				} else {
2176 					mpt_prt(mpt, "Device %02x detected loop"
2177 					    " failure at its receiver\n",
2178 					    data0 & 0xFF);
2179 				}
2180 				break;
2181 			default:
2182 				mpt_prt(mpt, "Device %02x requests that device "
2183 				    "%02x reset itself\n",
2184 				    data0 & 0xFF,
2185 				    (data0 >> 8) & 0xFF);
2186 				break;
2187 			}
2188 			break;
2189 		case 0x02:
2190 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2191 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2192 			    (data1 >> 8) & 0xff, /* Port */
2193 			    (data0 >>  8) & 0xff, /* Character 3 */
2194 			    (data0      ) & 0xff  /* Character 4 */);
2195 			break;
2196 		case 0x03:
2197 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2198 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2199 			    (data1 >> 8) & 0xff, /* Port */
2200 			    (data0 >> 8) & 0xff, /* Character 3 */
2201 			    (data0     ) & 0xff  /* Character 4 */);
2202 			break;
2203 		default:
2204 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2205 			    "FC event (%02x %02x %02x)\n",
2206 			    (data1 >> 8) & 0xff, /* Port */
2207 			    (data0 >> 16) & 0xff, /* Event */
2208 			    (data0 >>  8) & 0xff, /* Character 3 */
2209 			    (data0      ) & 0xff  /* Character 4 */);
2210 		}
2211 		break;
2212 
2213 	case MPI_EVENT_LOGOUT:
2214 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2215 		    (data1 >> 8) & 0xff, data0);
2216 		break;
2217 	case MPI_EVENT_QUEUE_FULL:
2218 	{
2219 		struct cam_sim *sim;
2220 		struct cam_path *tmppath;
2221 		struct ccb_relsim crs;
2222 		PTR_EVENT_DATA_QUEUE_FULL pqf =
2223 		    (PTR_EVENT_DATA_QUEUE_FULL) msg->Data;
2224 		lun_id_t lun_id;
2225 
2226 		mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2227 		    "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2228 		if (mpt->phydisk_sim) {
2229 			sim = mpt->phydisk_sim;
2230 		} else {
2231 			sim = mpt->sim;
2232 		}
2233 		MPTLOCK_2_CAMLOCK(mpt);
2234 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2235 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2236 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2237 				mpt_prt(mpt, "unable to create a path to send "
2238 				    "XPT_REL_SIMQ");
2239 				CAMLOCK_2_MPTLOCK(mpt);
2240 				break;
2241 			}
2242 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2243 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2244 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2245 			crs.openings = pqf->CurrentDepth - 1;
2246 			xpt_action((union ccb *)&crs);
2247 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2248 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2249 			}
2250 			xpt_free_path(tmppath);
2251 		}
2252 		CAMLOCK_2_MPTLOCK(mpt);
2253 		break;
2254 	}
2255 	case MPI_EVENT_EVENT_CHANGE:
2256 	case MPI_EVENT_INTEGRATED_RAID:
2257 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2258 	case MPI_EVENT_SAS_SES:
2259 		break;
2260 	default:
2261 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2262 		    msg->Event & 0xFF);
2263 		return (0);
2264 	}
2265 	return (1);
2266 }
2267 
2268 /*
2269  * Reply path for all SCSI I/O requests, called from our
2270  * interrupt handler by extracting our handler index from
2271  * the MsgContext field of the reply from the IOC.
2272  *
2273  * This routine is optimized for the common case of a
2274  * completion without error.  All exception handling is
2275  * offloaded to non-inlined helper routines to minimize
2276  * cache footprint.
2277  */
2278 static int
2279 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2280     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2281 {
2282 	MSG_SCSI_IO_REQUEST *scsi_req;
2283 	union ccb *ccb;
2284 	target_id_t tgt;
2285 
2286 	if (req->state == REQ_STATE_FREE) {
2287 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2288 		return (TRUE);
2289 	}
2290 
2291 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2292 	ccb = req->ccb;
2293 	if (ccb == NULL) {
2294 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2295 		    req, req->serno);
2296 		return (TRUE);
2297 	}
2298 
2299 	tgt = scsi_req->TargetID;
2300 	mpt_req_untimeout(req, mpt_timeout, ccb);
2301 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2302 
2303 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2304 		bus_dmasync_op_t op;
2305 
2306 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2307 			op = BUS_DMASYNC_POSTREAD;
2308 		else
2309 			op = BUS_DMASYNC_POSTWRITE;
2310 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2311 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2312 	}
2313 
2314 	if (reply_frame == NULL) {
2315 		/*
2316 		 * Context only reply, completion without error status.
2317 		 */
2318 		ccb->csio.resid = 0;
2319 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2320 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2321 	} else {
2322 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2323 	}
2324 
2325 	if (mpt->outofbeer) {
2326 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2327 		mpt->outofbeer = 0;
2328 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2329 	}
2330 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2331 		struct scsi_inquiry_data *iq =
2332 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2333 		if (scsi_req->Function ==
2334 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2335 			/*
2336 			 * Fake out the device type so that only the
2337 			 * pass-thru device will attach.
2338 			 */
2339 			iq->device &= ~0x1F;
2340 			iq->device |= T_NODEVICE;
2341 		}
2342 	}
2343 	if (mpt->verbose == MPT_PRT_DEBUG) {
2344 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2345 		    req, req->serno);
2346 	}
2347 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2348 	MPTLOCK_2_CAMLOCK(mpt);
2349 	xpt_done(ccb);
2350 	CAMLOCK_2_MPTLOCK(mpt);
2351 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2352 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2353 	} else {
2354 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2355 		    req, req->serno);
2356 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2357 	}
2358 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2359 	    ("CCB req needed wakeup"));
2360 #ifdef	INVARIANTS
2361 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2362 #endif
2363 	mpt_free_request(mpt, req);
2364 	return (TRUE);
2365 }
2366 
2367 static int
2368 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2369     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2370 {
2371 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2372 
2373 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2374 #ifdef	INVARIANTS
2375 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2376 #endif
2377 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2378 	/* Record IOC Status and Response Code of TMF for any waiters. */
2379 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2380 	req->ResponseCode = tmf_reply->ResponseCode;
2381 
2382 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2383 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2384 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2385 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2386 		req->state |= REQ_STATE_DONE;
2387 		wakeup(req);
2388 	} else {
2389 		mpt->tmf_req->state = REQ_STATE_FREE;
2390 	}
2391 	return (TRUE);
2392 }
2393 
2394 /*
2395  * XXX: Move to definitions file
2396  */
2397 #define	ELS	0x22
2398 #define	FC4LS	0x32
2399 #define	ABTS	0x81
2400 #define	BA_ACC	0x84
2401 
2402 #define	LS_RJT	0x01
2403 #define	LS_ACC	0x02
2404 #define	PLOGI	0x03
2405 #define	LOGO	0x05
2406 #define SRR	0x14
2407 #define PRLI	0x20
2408 #define PRLO	0x21
2409 #define ADISC	0x52
2410 #define RSCN	0x61
2411 
2412 static void
2413 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2414     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2415 {
2416 	uint32_t fl;
2417 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2418 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2419 
2420 	/*
2421 	 * We are going to reuse the ELS request to send this response back.
2422 	 */
2423 	rsp = &tmp;
2424 	memset(rsp, 0, sizeof(*rsp));
2425 
2426 #ifdef	USE_IMMEDIATE_LINK_DATA
2427 	/*
2428 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2429 	 */
2430 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2431 #endif
2432 	rsp->RspLength = length;
2433 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2434 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2435 
2436 	/*
2437 	 * Copy over information from the original reply frame to
2438 	 * it's correct place in the response.
2439 	 */
2440 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2441 
2442 	/*
2443 	 * And now copy back the temporary area to the original frame.
2444 	 */
2445 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2446 	rsp = req->req_vbuf;
2447 
2448 #ifdef	USE_IMMEDIATE_LINK_DATA
2449 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2450 #else
2451 {
2452 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2453 	bus_addr_t paddr = req->req_pbuf;
2454 	paddr += MPT_RQSL(mpt);
2455 
2456 	fl =
2457 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2458 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2459 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2460 		MPI_SGE_FLAGS_END_OF_LIST	|
2461 		MPI_SGE_FLAGS_END_OF_BUFFER;
2462 	fl <<= MPI_SGE_FLAGS_SHIFT;
2463 	fl |= (length);
2464 	se->FlagsLength = htole32(fl);
2465 	se->Address = htole32((uint32_t) paddr);
2466 }
2467 #endif
2468 
2469 	/*
2470 	 * Send it on...
2471 	 */
2472 	mpt_send_cmd(mpt, req);
2473 }
2474 
2475 static int
2476 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2477     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2478 {
2479 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2480 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2481 	U8 rctl;
2482 	U8 type;
2483 	U8 cmd;
2484 	U16 status = le16toh(reply_frame->IOCStatus);
2485 	U32 *elsbuf;
2486 	int ioindex;
2487 	int do_refresh = TRUE;
2488 
2489 #ifdef	INVARIANTS
2490 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2491 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2492 	    req, req->serno, rp->Function));
2493 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2494 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2495 	} else {
2496 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2497 	}
2498 #endif
2499 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2500 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2501 	    req, req->serno, reply_frame, reply_frame->Function);
2502 
2503 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2504 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2505 		    status, reply_frame->Function);
2506 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2507 			/*
2508 			 * XXX: to get around shutdown issue
2509 			 */
2510 			mpt->disabled = 1;
2511 			return (TRUE);
2512 		}
2513 		return (TRUE);
2514 	}
2515 
2516 	/*
2517 	 * If the function of a link service response, we recycle the
2518 	 * response to be a refresh for a new link service request.
2519 	 *
2520 	 * The request pointer is bogus in this case and we have to fetch
2521 	 * it based upon the TransactionContext.
2522 	 */
2523 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2524 		/* Freddie Uncle Charlie Katie */
2525 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2526 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2527 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2528 				break;
2529 			}
2530 
2531 		KASSERT(ioindex < mpt->els_cmds_allocated,
2532 		    ("can't find my mommie!"));
2533 
2534 		/* remove from active list as we're going to re-post it */
2535 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2536 		req->state &= ~REQ_STATE_QUEUED;
2537 		req->state |= REQ_STATE_DONE;
2538 		mpt_fc_post_els(mpt, req, ioindex);
2539 		return (TRUE);
2540 	}
2541 
2542 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2543 		/* remove from active list as we're done */
2544 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2545 		req->state &= ~REQ_STATE_QUEUED;
2546 		req->state |= REQ_STATE_DONE;
2547 		if (req->state & REQ_STATE_TIMEDOUT) {
2548 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2549 			    "Sync Primitive Send Completed After Timeout\n");
2550 			mpt_free_request(mpt, req);
2551 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2552 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2553 			    "Async Primitive Send Complete\n");
2554 			mpt_free_request(mpt, req);
2555 		} else {
2556 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2557 			    "Sync Primitive Send Complete- Waking Waiter\n");
2558 			wakeup(req);
2559 		}
2560 		return (TRUE);
2561 	}
2562 
2563 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2564 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2565 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2566 		    rp->MsgLength, rp->MsgFlags);
2567 		return (TRUE);
2568 	}
2569 
2570 	if (rp->MsgLength <= 5) {
2571 		/*
2572 		 * This is just a ack of an original ELS buffer post
2573 		 */
2574 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2575 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2576 		return (TRUE);
2577 	}
2578 
2579 
2580 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2581 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2582 
2583 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2584 	cmd = be32toh(elsbuf[0]) >> 24;
2585 
2586 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2587 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2588 		return (TRUE);
2589 	}
2590 
2591 	ioindex = le32toh(rp->TransactionContext);
2592 	req = mpt->els_cmd_ptrs[ioindex];
2593 
2594 	if (rctl == ELS && type == 1) {
2595 		switch (cmd) {
2596 		case PRLI:
2597 			/*
2598 			 * Send back a PRLI ACC
2599 			 */
2600 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2601 			    le32toh(rp->Wwn.PortNameHigh),
2602 			    le32toh(rp->Wwn.PortNameLow));
2603 			elsbuf[0] = htobe32(0x02100014);
2604 			elsbuf[1] |= htobe32(0x00000100);
2605 			elsbuf[4] = htobe32(0x00000002);
2606 			if (mpt->role & MPT_ROLE_TARGET)
2607 				elsbuf[4] |= htobe32(0x00000010);
2608 			if (mpt->role & MPT_ROLE_INITIATOR)
2609 				elsbuf[4] |= htobe32(0x00000020);
2610 			/* remove from active list as we're done */
2611 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2612 			req->state &= ~REQ_STATE_QUEUED;
2613 			req->state |= REQ_STATE_DONE;
2614 			mpt_fc_els_send_response(mpt, req, rp, 20);
2615 			do_refresh = FALSE;
2616 			break;
2617 		case PRLO:
2618 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2619 			elsbuf[0] = htobe32(0x02100014);
2620 			elsbuf[1] = htobe32(0x08000100);
2621 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2622 			    le32toh(rp->Wwn.PortNameHigh),
2623 			    le32toh(rp->Wwn.PortNameLow));
2624 			/* remove from active list as we're done */
2625 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2626 			req->state &= ~REQ_STATE_QUEUED;
2627 			req->state |= REQ_STATE_DONE;
2628 			mpt_fc_els_send_response(mpt, req, rp, 20);
2629 			do_refresh = FALSE;
2630 			break;
2631 		default:
2632 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2633 			break;
2634 		}
2635 	} else if (rctl == ABTS && type == 0) {
2636 		uint16_t rx_id = le16toh(rp->Rxid);
2637 		uint16_t ox_id = le16toh(rp->Oxid);
2638 		request_t *tgt_req = NULL;
2639 
2640 		mpt_prt(mpt,
2641 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2642 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2643 		    le32toh(rp->Wwn.PortNameLow));
2644 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2645 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2646 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2647 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2648 		} else {
2649 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2650 		}
2651 		if (tgt_req) {
2652 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2653 			uint8_t *vbuf;
2654 			union ccb *ccb = tgt->ccb;
2655 			uint32_t ct_id;
2656 
2657 			vbuf = tgt_req->req_vbuf;
2658 			vbuf += MPT_RQSL(mpt);
2659 
2660 			/*
2661 			 * Check to make sure we have the correct command
2662 			 * The reply descriptor in the target state should
2663 			 * should contain an IoIndex that should match the
2664 			 * RX_ID.
2665 			 *
2666 			 * It'd be nice to have OX_ID to crosscheck with
2667 			 * as well.
2668 			 */
2669 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2670 
2671 			if (ct_id != rx_id) {
2672 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2673 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2674 				    rx_id, ct_id);
2675 				goto skip;
2676 			}
2677 
2678 			ccb = tgt->ccb;
2679 			if (ccb) {
2680 				mpt_prt(mpt,
2681 				    "CCB (%p): lun %u flags %x status %x\n",
2682 				    ccb, ccb->ccb_h.target_lun,
2683 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2684 			}
2685 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2686 			    "%x nxfers %x\n", tgt->state,
2687 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2688 			    tgt->nxfers);
2689   skip:
2690 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2691 				mpt_prt(mpt, "unable to start TargetAbort\n");
2692 			}
2693 		} else {
2694 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2695 		}
2696 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2697 		elsbuf[0] = htobe32(0);
2698 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2699 		elsbuf[2] = htobe32(0x000ffff);
2700 		/*
2701 		 * Dork with the reply frame so that the reponse to it
2702 		 * will be correct.
2703 		 */
2704 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2705 		/* remove from active list as we're done */
2706 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2707 		req->state &= ~REQ_STATE_QUEUED;
2708 		req->state |= REQ_STATE_DONE;
2709 		mpt_fc_els_send_response(mpt, req, rp, 12);
2710 		do_refresh = FALSE;
2711 	} else {
2712 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2713 	}
2714 	if (do_refresh == TRUE) {
2715 		/* remove from active list as we're done */
2716 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2717 		req->state &= ~REQ_STATE_QUEUED;
2718 		req->state |= REQ_STATE_DONE;
2719 		mpt_fc_post_els(mpt, req, ioindex);
2720 	}
2721 	return (TRUE);
2722 }
2723 
2724 /*
2725  * Clean up all SCSI Initiator personality state in response
2726  * to a controller reset.
2727  */
2728 static void
2729 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2730 {
2731 	/*
2732 	 * The pending list is already run down by
2733 	 * the generic handler.  Perform the same
2734 	 * operation on the timed out request list.
2735 	 */
2736 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
2737 				   MPI_IOCSTATUS_INVALID_STATE);
2738 
2739 	/*
2740 	 * XXX: We need to repost ELS and Target Command Buffers?
2741 	 */
2742 
2743 	/*
2744 	 * Inform the XPT that a bus reset has occurred.
2745 	 */
2746 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
2747 }
2748 
2749 /*
2750  * Parse additional completion information in the reply
2751  * frame for SCSI I/O requests.
2752  */
2753 static int
2754 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
2755 			     MSG_DEFAULT_REPLY *reply_frame)
2756 {
2757 	union ccb *ccb;
2758 	MSG_SCSI_IO_REPLY *scsi_io_reply;
2759 	u_int ioc_status;
2760 	u_int sstate;
2761 	u_int loginfo;
2762 
2763 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
2764 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
2765 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
2766 		("MPT SCSI I/O Handler called with incorrect reply type"));
2767 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
2768 		("MPT SCSI I/O Handler called with continuation reply"));
2769 
2770 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
2771 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
2772 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
2773 	ioc_status &= MPI_IOCSTATUS_MASK;
2774 	sstate = scsi_io_reply->SCSIState;
2775 
2776 	ccb = req->ccb;
2777 	ccb->csio.resid =
2778 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
2779 
2780 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
2781 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
2782 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2783 		ccb->csio.sense_resid =
2784 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
2785 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
2786 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
2787 	}
2788 
2789 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
2790 		/*
2791 		 * Tag messages rejected, but non-tagged retry
2792 		 * was successful.
2793 XXXX
2794 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
2795 		 */
2796 	}
2797 
2798 	switch(ioc_status) {
2799 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2800 		/*
2801 		 * XXX
2802 		 * Linux driver indicates that a zero
2803 		 * transfer length with this error code
2804 		 * indicates a CRC error.
2805 		 *
2806 		 * No need to swap the bytes for checking
2807 		 * against zero.
2808 		 */
2809 		if (scsi_io_reply->TransferCount == 0) {
2810 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2811 			break;
2812 		}
2813 		/* FALLTHROUGH */
2814 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
2815 	case MPI_IOCSTATUS_SUCCESS:
2816 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
2817 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
2818 			/*
2819 			 * Status was never returned for this transaction.
2820 			 */
2821 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
2822 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
2823 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
2824 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
2825 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
2826 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
2827 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
2828 
2829 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
2830 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2831 		} else
2832 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2833 		break;
2834 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
2835 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
2836 		break;
2837 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
2838 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2839 		break;
2840 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2841 		/*
2842 		 * Since selection timeouts and "device really not
2843 		 * there" are grouped into this error code, report
2844 		 * selection timeout.  Selection timeouts are
2845 		 * typically retried before giving up on the device
2846 		 * whereas "device not there" errors are considered
2847 		 * unretryable.
2848 		 */
2849 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2850 		break;
2851 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2852 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
2853 		break;
2854 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
2855 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
2856 		break;
2857 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
2858 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
2859 		break;
2860 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2861 		ccb->ccb_h.status = CAM_UA_TERMIO;
2862 		break;
2863 	case MPI_IOCSTATUS_INVALID_STATE:
2864 		/*
2865 		 * The IOC has been reset.  Emulate a bus reset.
2866 		 */
2867 		/* FALLTHROUGH */
2868 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
2869 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2870 		break;
2871 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
2872 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
2873 		/*
2874 		 * Don't clobber any timeout status that has
2875 		 * already been set for this transaction.  We
2876 		 * want the SCSI layer to be able to differentiate
2877 		 * between the command we aborted due to timeout
2878 		 * and any innocent bystanders.
2879 		 */
2880 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
2881 			break;
2882 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
2883 		break;
2884 
2885 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2886 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
2887 		break;
2888 	case MPI_IOCSTATUS_BUSY:
2889 		mpt_set_ccb_status(ccb, CAM_BUSY);
2890 		break;
2891 	case MPI_IOCSTATUS_INVALID_FUNCTION:
2892 	case MPI_IOCSTATUS_INVALID_SGL:
2893 	case MPI_IOCSTATUS_INTERNAL_ERROR:
2894 	case MPI_IOCSTATUS_INVALID_FIELD:
2895 	default:
2896 		/* XXX
2897 		 * Some of the above may need to kick
2898 		 * of a recovery action!!!!
2899 		 */
2900 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2901 		break;
2902 	}
2903 
2904 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2905 		mpt_freeze_ccb(ccb);
2906 	}
2907 
2908 	return (TRUE);
2909 }
2910 
2911 static void
2912 mpt_action(struct cam_sim *sim, union ccb *ccb)
2913 {
2914 	struct mpt_softc *mpt;
2915 	struct ccb_trans_settings *cts;
2916 	target_id_t tgt;
2917 	lun_id_t lun;
2918 	int raid_passthru;
2919 
2920 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2921 
2922 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
2923 	raid_passthru = (sim == mpt->phydisk_sim);
2924 	MPT_LOCK_ASSERT(mpt);
2925 
2926 	tgt = ccb->ccb_h.target_id;
2927 	lun = ccb->ccb_h.target_lun;
2928 	if (raid_passthru &&
2929 	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
2930 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
2931 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
2932 		CAMLOCK_2_MPTLOCK(mpt);
2933 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2934 			MPTLOCK_2_CAMLOCK(mpt);
2935 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2936 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2937 			xpt_done(ccb);
2938 			return;
2939 		}
2940 		MPTLOCK_2_CAMLOCK(mpt);
2941 	}
2942 	ccb->ccb_h.ccb_mpt_ptr = mpt;
2943 
2944 	switch (ccb->ccb_h.func_code) {
2945 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2946 		/*
2947 		 * Do a couple of preliminary checks...
2948 		 */
2949 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2950 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2951 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2952 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2953 				break;
2954 			}
2955 		}
2956 		/* Max supported CDB length is 16 bytes */
2957 		/* XXX Unless we implement the new 32byte message type */
2958 		if (ccb->csio.cdb_len >
2959 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
2960 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2961 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2962 			break;
2963 		}
2964 #ifdef	MPT_TEST_MULTIPATH
2965 		if (mpt->failure_id == ccb->ccb_h.target_id) {
2966 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2967 			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2968 			break;
2969 		}
2970 #endif
2971 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2972 		mpt_start(sim, ccb);
2973 		return;
2974 
2975 	case XPT_RESET_BUS:
2976 		if (raid_passthru) {
2977 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2978 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2979 			break;
2980 		}
2981 	case XPT_RESET_DEV:
2982 		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
2983 			if (bootverbose) {
2984 				xpt_print(ccb->ccb_h.path, "reset bus\n");
2985 			}
2986 		} else {
2987 			xpt_print(ccb->ccb_h.path, "reset device\n");
2988 		}
2989 		CAMLOCK_2_MPTLOCK(mpt);
2990 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
2991 		MPTLOCK_2_CAMLOCK(mpt);
2992 
2993 		/*
2994 		 * mpt_bus_reset is always successful in that it
2995 		 * will fall back to a hard reset should a bus
2996 		 * reset attempt fail.
2997 		 */
2998 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2999 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3000 		break;
3001 
3002 	case XPT_ABORT:
3003 	{
3004 		union ccb *accb = ccb->cab.abort_ccb;
3005 		CAMLOCK_2_MPTLOCK(mpt);
3006 		switch (accb->ccb_h.func_code) {
3007 		case XPT_ACCEPT_TARGET_IO:
3008 		case XPT_IMMED_NOTIFY:
3009 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3010 			break;
3011 		case XPT_CONT_TARGET_IO:
3012 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3013 			ccb->ccb_h.status = CAM_UA_ABORT;
3014 			break;
3015 		case XPT_SCSI_IO:
3016 			ccb->ccb_h.status = CAM_UA_ABORT;
3017 			break;
3018 		default:
3019 			ccb->ccb_h.status = CAM_REQ_INVALID;
3020 			break;
3021 		}
3022 		MPTLOCK_2_CAMLOCK(mpt);
3023 		break;
3024 	}
3025 
3026 #ifdef	CAM_NEW_TRAN_CODE
3027 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3028 #else
3029 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3030 #endif
3031 #define	DP_DISC_ENABLE	0x1
3032 #define	DP_DISC_DISABL	0x2
3033 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3034 
3035 #define	DP_TQING_ENABLE	0x4
3036 #define	DP_TQING_DISABL	0x8
3037 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3038 
3039 #define	DP_WIDE		0x10
3040 #define	DP_NARROW	0x20
3041 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3042 
3043 #define	DP_SYNC		0x40
3044 
3045 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3046 	{
3047 #ifdef	CAM_NEW_TRAN_CODE
3048 		struct ccb_trans_settings_scsi *scsi;
3049 		struct ccb_trans_settings_spi *spi;
3050 #endif
3051 		uint8_t dval;
3052 		u_int period;
3053 		u_int offset;
3054 		int i, j;
3055 
3056 		cts = &ccb->cts;
3057 
3058 		if (mpt->is_fc || mpt->is_sas) {
3059 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3060 			break;
3061 		}
3062 
3063 #ifdef	CAM_NEW_TRAN_CODE
3064 		scsi = &cts->proto_specific.scsi;
3065 		spi = &cts->xport_specific.spi;
3066 
3067 		/*
3068 		 * We can be called just to valid transport and proto versions
3069 		 */
3070 		if (scsi->valid == 0 && spi->valid == 0) {
3071 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3072 			break;
3073 		}
3074 #endif
3075 
3076 		/*
3077 		 * Skip attempting settings on RAID volume disks.
3078 		 * Other devices on the bus get the normal treatment.
3079 		 */
3080 		if (mpt->phydisk_sim && raid_passthru == 0 &&
3081 		    mpt_is_raid_volume(mpt, tgt) != 0) {
3082 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3083 			    "no transfer settings for RAID vols\n");
3084 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3085 			break;
3086 		}
3087 
3088 		i = mpt->mpt_port_page2.PortSettings &
3089 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3090 		j = mpt->mpt_port_page2.PortFlags &
3091 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3092 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3093 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3094 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3095 			    "honoring BIOS transfer negotiations\n");
3096 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3097 			break;
3098 		}
3099 
3100 		dval = 0;
3101 		period = 0;
3102 		offset = 0;
3103 
3104 #ifndef	CAM_NEW_TRAN_CODE
3105 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3106 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3107 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3108 		}
3109 
3110 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3111 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3112 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3113 		}
3114 
3115 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3116 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3117 		}
3118 
3119 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3120 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3121 			dval |= DP_SYNC;
3122 			period = cts->sync_period;
3123 			offset = cts->sync_offset;
3124 		}
3125 #else
3126 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3127 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3128 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3129 		}
3130 
3131 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3132 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3133 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3134 		}
3135 
3136 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3137 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3138 			    DP_WIDE : DP_NARROW;
3139 		}
3140 
3141 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3142 			dval |= DP_SYNC;
3143 			offset = spi->sync_offset;
3144 		} else {
3145 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3146 			    &mpt->mpt_dev_page1[tgt];
3147 			offset = ptr->RequestedParameters;
3148 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3149 	    		offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3150 		}
3151 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3152 			dval |= DP_SYNC;
3153 			period = spi->sync_period;
3154 		} else {
3155 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3156 			    &mpt->mpt_dev_page1[tgt];
3157 			period = ptr->RequestedParameters;
3158 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3159 	    		period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3160 		}
3161 #endif
3162 		CAMLOCK_2_MPTLOCK(mpt);
3163 		if (dval & DP_DISC_ENABLE) {
3164 			mpt->mpt_disc_enable |= (1 << tgt);
3165 		} else if (dval & DP_DISC_DISABL) {
3166 			mpt->mpt_disc_enable &= ~(1 << tgt);
3167 		}
3168 		if (dval & DP_TQING_ENABLE) {
3169 			mpt->mpt_tag_enable |= (1 << tgt);
3170 		} else if (dval & DP_TQING_DISABL) {
3171 			mpt->mpt_tag_enable &= ~(1 << tgt);
3172 		}
3173 		if (dval & DP_WIDTH) {
3174 			mpt_setwidth(mpt, tgt, 1);
3175 		}
3176 		if (dval & DP_SYNC) {
3177 			mpt_setsync(mpt, tgt, period, offset);
3178 		}
3179 		if (dval == 0) {
3180 			MPTLOCK_2_CAMLOCK(mpt);
3181 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3182 			break;
3183 		}
3184 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3185 		    "set [%d]: 0x%x period 0x%x offset %d\n",
3186 		    tgt, dval, period, offset);
3187 		if (mpt_update_spi_config(mpt, tgt)) {
3188 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3189 		} else {
3190 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3191 		}
3192 		MPTLOCK_2_CAMLOCK(mpt);
3193 		break;
3194 	}
3195 	case XPT_GET_TRAN_SETTINGS:
3196 	{
3197 #ifdef	CAM_NEW_TRAN_CODE
3198 		struct ccb_trans_settings_scsi *scsi;
3199 		cts = &ccb->cts;
3200 		cts->protocol = PROTO_SCSI;
3201 		if (mpt->is_fc) {
3202 			struct ccb_trans_settings_fc *fc =
3203 			    &cts->xport_specific.fc;
3204 			cts->protocol_version = SCSI_REV_SPC;
3205 			cts->transport = XPORT_FC;
3206 			cts->transport_version = 0;
3207 			fc->valid = CTS_FC_VALID_SPEED;
3208 			fc->bitrate = 100000;
3209 		} else if (mpt->is_sas) {
3210 			struct ccb_trans_settings_sas *sas =
3211 			    &cts->xport_specific.sas;
3212 			cts->protocol_version = SCSI_REV_SPC2;
3213 			cts->transport = XPORT_SAS;
3214 			cts->transport_version = 0;
3215 			sas->valid = CTS_SAS_VALID_SPEED;
3216 			sas->bitrate = 300000;
3217 		} else {
3218 			cts->protocol_version = SCSI_REV_2;
3219 			cts->transport = XPORT_SPI;
3220 			cts->transport_version = 2;
3221 			if (mpt_get_spi_settings(mpt, cts) != 0) {
3222 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3223 				break;
3224 			}
3225 		}
3226 		scsi = &cts->proto_specific.scsi;
3227 		scsi->valid = CTS_SCSI_VALID_TQ;
3228 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3229 #else
3230 		cts = &ccb->cts;
3231 		if (mpt->is_fc) {
3232 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3233 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3234 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3235 		} else if (mpt->is_sas) {
3236 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3237 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3238 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3239 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3240 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3241 			break;
3242 		}
3243 #endif
3244 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3245 		break;
3246 	}
3247 	case XPT_CALC_GEOMETRY:
3248 	{
3249 		struct ccb_calc_geometry *ccg;
3250 
3251 		ccg = &ccb->ccg;
3252 		if (ccg->block_size == 0) {
3253 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3254 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3255 			break;
3256 		}
3257 		mpt_calc_geometry(ccg, /*extended*/1);
3258 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3259 		break;
3260 	}
3261 	case XPT_PATH_INQ:		/* Path routing inquiry */
3262 	{
3263 		struct ccb_pathinq *cpi = &ccb->cpi;
3264 
3265 		cpi->version_num = 1;
3266 		cpi->target_sprt = 0;
3267 		cpi->hba_eng_cnt = 0;
3268 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3269 		/*
3270 		 * FC cards report MAX_DEVICES of 512, but
3271 		 * the MSG_SCSI_IO_REQUEST target id field
3272 		 * is only 8 bits. Until we fix the driver
3273 		 * to support 'channels' for bus overflow,
3274 		 * just limit it.
3275 		 */
3276 		if (cpi->max_target > 255) {
3277 			cpi->max_target = 255;
3278 		}
3279 
3280 		/*
3281 		 * VMware ESX reports > 16 devices and then dies when we probe.
3282 		 */
3283 		if (mpt->is_spi && cpi->max_target > 15) {
3284 			cpi->max_target = 15;
3285 		}
3286 		cpi->max_lun = 7;
3287 		cpi->initiator_id = mpt->mpt_ini_id;
3288 		cpi->bus_id = cam_sim_bus(sim);
3289 
3290 		/*
3291 		 * The base speed is the speed of the underlying connection.
3292 		 */
3293 #ifdef	CAM_NEW_TRAN_CODE
3294 		cpi->protocol = PROTO_SCSI;
3295 		if (mpt->is_fc) {
3296 			cpi->hba_misc = PIM_NOBUSRESET;
3297 			cpi->base_transfer_speed = 100000;
3298 			cpi->hba_inquiry = PI_TAG_ABLE;
3299 			cpi->transport = XPORT_FC;
3300 			cpi->transport_version = 0;
3301 			cpi->protocol_version = SCSI_REV_SPC;
3302 		} else if (mpt->is_sas) {
3303 			cpi->hba_misc = PIM_NOBUSRESET;
3304 			cpi->base_transfer_speed = 300000;
3305 			cpi->hba_inquiry = PI_TAG_ABLE;
3306 			cpi->transport = XPORT_SAS;
3307 			cpi->transport_version = 0;
3308 			cpi->protocol_version = SCSI_REV_SPC2;
3309 		} else {
3310 			cpi->hba_misc = PIM_SEQSCAN;
3311 			cpi->base_transfer_speed = 3300;
3312 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3313 			cpi->transport = XPORT_SPI;
3314 			cpi->transport_version = 2;
3315 			cpi->protocol_version = SCSI_REV_2;
3316 		}
3317 #else
3318 		if (mpt->is_fc) {
3319 			cpi->hba_misc = PIM_NOBUSRESET;
3320 			cpi->base_transfer_speed = 100000;
3321 			cpi->hba_inquiry = PI_TAG_ABLE;
3322 		} else if (mpt->is_sas) {
3323 			cpi->hba_misc = PIM_NOBUSRESET;
3324 			cpi->base_transfer_speed = 300000;
3325 			cpi->hba_inquiry = PI_TAG_ABLE;
3326 		} else {
3327 			cpi->hba_misc = PIM_SEQSCAN;
3328 			cpi->base_transfer_speed = 3300;
3329 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3330 		}
3331 #endif
3332 
3333 		/*
3334 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3335 		 * wide and restrict it to one lun.
3336 		 */
3337 		if (raid_passthru) {
3338 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3339 			cpi->initiator_id = cpi->max_target + 1;
3340 			cpi->max_lun = 0;
3341 		}
3342 
3343 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3344 			cpi->hba_misc |= PIM_NOINITIATOR;
3345 		}
3346 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3347 			cpi->target_sprt =
3348 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3349 		} else {
3350 			cpi->target_sprt = 0;
3351 		}
3352 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3353 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3354 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3355 		cpi->unit_number = cam_sim_unit(sim);
3356 		cpi->ccb_h.status = CAM_REQ_CMP;
3357 		break;
3358 	}
3359 	case XPT_EN_LUN:		/* Enable LUN as a target */
3360 	{
3361 		int result;
3362 
3363 		CAMLOCK_2_MPTLOCK(mpt);
3364 		if (ccb->cel.enable)
3365 			result = mpt_enable_lun(mpt,
3366 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3367 		else
3368 			result = mpt_disable_lun(mpt,
3369 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3370 		MPTLOCK_2_CAMLOCK(mpt);
3371 		if (result == 0) {
3372 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3373 		} else {
3374 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3375 		}
3376 		break;
3377 	}
3378 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3379 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3380 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3381 	{
3382 		tgt_resource_t *trtp;
3383 		lun_id_t lun = ccb->ccb_h.target_lun;
3384 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3385 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3386 		ccb->ccb_h.flags = 0;
3387 
3388 		if (lun == CAM_LUN_WILDCARD) {
3389 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3390 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3391 				break;
3392 			}
3393 			trtp = &mpt->trt_wildcard;
3394 		} else if (lun >= MPT_MAX_LUNS) {
3395 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3396 			break;
3397 		} else {
3398 			trtp = &mpt->trt[lun];
3399 		}
3400 		CAMLOCK_2_MPTLOCK(mpt);
3401 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3402 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3403 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3404 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3405 			    sim_links.stqe);
3406 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3407 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3408 			    "Put FREE INOT lun %d\n", lun);
3409 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3410 			    sim_links.stqe);
3411 		} else {
3412 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3413 		}
3414 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3415 		MPTLOCK_2_CAMLOCK(mpt);
3416 		return;
3417 	}
3418 	case XPT_CONT_TARGET_IO:
3419 		CAMLOCK_2_MPTLOCK(mpt);
3420 		mpt_target_start_io(mpt, ccb);
3421 		MPTLOCK_2_CAMLOCK(mpt);
3422 		return;
3423 
3424 	default:
3425 		ccb->ccb_h.status = CAM_REQ_INVALID;
3426 		break;
3427 	}
3428 	xpt_done(ccb);
3429 }
3430 
3431 static int
3432 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3433 {
3434 #ifdef	CAM_NEW_TRAN_CODE
3435 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3436 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3437 #endif
3438 	target_id_t tgt;
3439 	uint32_t dval, pval, oval;
3440 	int rv;
3441 
3442 	if (IS_CURRENT_SETTINGS(cts) == 0) {
3443 		tgt = cts->ccb_h.target_id;
3444 	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3445 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3446 			return (-1);
3447 		}
3448 	} else {
3449 		tgt = cts->ccb_h.target_id;
3450 	}
3451 
3452 	/*
3453 	 * We aren't looking at Port Page 2 BIOS settings here-
3454 	 * sometimes these have been known to be bogus XXX.
3455 	 *
3456 	 * For user settings, we pick the max from port page 0
3457 	 *
3458 	 * For current settings we read the current settings out from
3459 	 * device page 0 for that target.
3460 	 */
3461 	if (IS_CURRENT_SETTINGS(cts)) {
3462 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3463 		dval = 0;
3464 
3465 		CAMLOCK_2_MPTLOCK(mpt);
3466 		tmp = mpt->mpt_dev_page0[tgt];
3467 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3468 		    sizeof(tmp), FALSE, 5000);
3469 		if (rv) {
3470 			MPTLOCK_2_CAMLOCK(mpt);
3471 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3472 			return (rv);
3473 		}
3474 		MPTLOCK_2_CAMLOCK(mpt);
3475 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3476 		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3477 		    tmp.NegotiatedParameters, tmp.Information);
3478 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3479 		    DP_WIDE : DP_NARROW;
3480 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3481 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3482 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3483 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3484 		oval = tmp.NegotiatedParameters;
3485 		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3486 		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3487 		pval = tmp.NegotiatedParameters;
3488 		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3489 		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3490 		mpt->mpt_dev_page0[tgt] = tmp;
3491 	} else {
3492 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3493 		oval = mpt->mpt_port_page0.Capabilities;
3494 		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3495 		pval = mpt->mpt_port_page0.Capabilities;
3496 		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3497 	}
3498 
3499 #ifndef	CAM_NEW_TRAN_CODE
3500 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3501 	cts->valid = 0;
3502 	cts->sync_period = pval;
3503 	cts->sync_offset = oval;
3504 	cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3505 	cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3506 	cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3507 	if (dval & DP_WIDE) {
3508 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3509 	} else {
3510 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3511 	}
3512 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3513 		cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3514 		if (dval & DP_DISC_ENABLE) {
3515 			cts->flags |= CCB_TRANS_DISC_ENB;
3516 		}
3517 		if (dval & DP_TQING_ENABLE) {
3518 			cts->flags |= CCB_TRANS_TAG_ENB;
3519 		}
3520 	}
3521 #else
3522 	spi->valid = 0;
3523 	scsi->valid = 0;
3524 	spi->flags = 0;
3525 	scsi->flags = 0;
3526 	spi->sync_offset = oval;
3527 	spi->sync_period = pval;
3528 	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3529 	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3530 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3531 	if (dval & DP_WIDE) {
3532 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3533 	} else {
3534 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3535 	}
3536 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3537 		scsi->valid = CTS_SCSI_VALID_TQ;
3538 		if (dval & DP_TQING_ENABLE) {
3539 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3540 		}
3541 		spi->valid |= CTS_SPI_VALID_DISC;
3542 		if (dval & DP_DISC_ENABLE) {
3543 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3544 		}
3545 	}
3546 #endif
3547 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3548 	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3549 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3550 	return (0);
3551 }
3552 
3553 static void
3554 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3555 {
3556 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3557 
3558 	ptr = &mpt->mpt_dev_page1[tgt];
3559 	if (onoff) {
3560 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3561 	} else {
3562 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3563 	}
3564 }
3565 
3566 static void
3567 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3568 {
3569 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3570 
3571 	ptr = &mpt->mpt_dev_page1[tgt];
3572 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3573 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3574 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3575 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3576 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3577 	if (period == 0) {
3578 		return;
3579 	}
3580 	ptr->RequestedParameters |=
3581 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3582 	ptr->RequestedParameters |=
3583 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3584 	if (period < 0xa) {
3585 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3586 	}
3587 	if (period < 0x9) {
3588 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3589 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3590 	}
3591 }
3592 
3593 static int
3594 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3595 {
3596 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3597 	int rv;
3598 
3599 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3600 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3601 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3602 	tmp = mpt->mpt_dev_page1[tgt];
3603 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3604 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3605 	if (rv) {
3606 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3607 		return (-1);
3608 	}
3609 	return (0);
3610 }
3611 
3612 static void
3613 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3614 {
3615 #if __FreeBSD_version >= 500000
3616 	cam_calc_geometry(ccg, extended);
3617 #else
3618 	uint32_t size_mb;
3619 	uint32_t secs_per_cylinder;
3620 
3621 	if (ccg->block_size == 0) {
3622 		ccg->ccb_h.status = CAM_REQ_INVALID;
3623 		return;
3624 	}
3625 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3626 	if (size_mb > 1024 && extended) {
3627 		ccg->heads = 255;
3628 		ccg->secs_per_track = 63;
3629 	} else {
3630 		ccg->heads = 64;
3631 		ccg->secs_per_track = 32;
3632 	}
3633 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3634 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3635 	ccg->ccb_h.status = CAM_REQ_CMP;
3636 #endif
3637 }
3638 
3639 /****************************** Timeout Recovery ******************************/
3640 static int
3641 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3642 {
3643 	int error;
3644 
3645 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3646 	    &mpt->recovery_thread, /*flags*/0,
3647 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3648 	return (error);
3649 }
3650 
3651 static void
3652 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3653 {
3654 	if (mpt->recovery_thread == NULL) {
3655 		return;
3656 	}
3657 	mpt->shutdwn_recovery = 1;
3658 	wakeup(mpt);
3659 	/*
3660 	 * Sleep on a slightly different location
3661 	 * for this interlock just for added safety.
3662 	 */
3663 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3664 }
3665 
3666 static void
3667 mpt_recovery_thread(void *arg)
3668 {
3669 	struct mpt_softc *mpt;
3670 
3671 	mpt = (struct mpt_softc *)arg;
3672 	MPT_LOCK(mpt);
3673 	for (;;) {
3674 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3675 			if (mpt->shutdwn_recovery == 0) {
3676 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3677 			}
3678 		}
3679 		if (mpt->shutdwn_recovery != 0) {
3680 			break;
3681 		}
3682 		mpt_recover_commands(mpt);
3683 	}
3684 	mpt->recovery_thread = NULL;
3685 	wakeup(&mpt->recovery_thread);
3686 	MPT_UNLOCK(mpt);
3687 	kthread_exit(0);
3688 }
3689 
3690 static int
3691 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3692     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3693 {
3694 	MSG_SCSI_TASK_MGMT *tmf_req;
3695 	int		    error;
3696 
3697 	/*
3698 	 * Wait for any current TMF request to complete.
3699 	 * We're only allowed to issue one TMF at a time.
3700 	 */
3701 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3702 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3703 	if (error != 0) {
3704 		mpt_reset(mpt, TRUE);
3705 		return (ETIMEDOUT);
3706 	}
3707 
3708 	mpt_assign_serno(mpt, mpt->tmf_req);
3709 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3710 
3711 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3712 	memset(tmf_req, 0, sizeof(*tmf_req));
3713 	tmf_req->TargetID = target;
3714 	tmf_req->Bus = channel;
3715 	tmf_req->ChainOffset = 0;
3716 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3717 	tmf_req->Reserved = 0;
3718 	tmf_req->TaskType = type;
3719 	tmf_req->Reserved1 = 0;
3720 	tmf_req->MsgFlags = flags;
3721 	tmf_req->MsgContext =
3722 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3723 	memset(&tmf_req->LUN, 0,
3724 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
3725 	if (lun > 256) {
3726 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3727 		tmf_req->LUN[1] = lun & 0xff;
3728 	} else {
3729 		tmf_req->LUN[1] = lun;
3730 	}
3731 	tmf_req->TaskMsgContext = abort_ctx;
3732 
3733 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3734 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3735 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3736 	if (mpt->verbose > MPT_PRT_DEBUG) {
3737 		mpt_print_request(tmf_req);
3738 	}
3739 
3740 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3741 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3742 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3743 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3744 	if (error != MPT_OK) {
3745 		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3746 		mpt->tmf_req->state = REQ_STATE_FREE;
3747 		mpt_reset(mpt, TRUE);
3748 	}
3749 	return (error);
3750 }
3751 
3752 /*
3753  * When a command times out, it is placed on the requeust_timeout_list
3754  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3755  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3756  * the timedout transactions.  The next TMF is issued either by the
3757  * completion handler of the current TMF waking our recovery thread,
3758  * or the TMF timeout handler causing a hard reset sequence.
3759  */
3760 static void
3761 mpt_recover_commands(struct mpt_softc *mpt)
3762 {
3763 	request_t	   *req;
3764 	union ccb	   *ccb;
3765 	int		    error;
3766 
3767 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3768 		/*
3769 		 * No work to do- leave.
3770 		 */
3771 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3772 		return;
3773 	}
3774 
3775 	/*
3776 	 * Flush any commands whose completion coincides with their timeout.
3777 	 */
3778 	mpt_intr(mpt);
3779 
3780 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3781 		/*
3782 		 * The timedout commands have already
3783 		 * completed.  This typically means
3784 		 * that either the timeout value was on
3785 		 * the hairy edge of what the device
3786 		 * requires or - more likely - interrupts
3787 		 * are not happening.
3788 		 */
3789 		mpt_prt(mpt, "Timedout requests already complete. "
3790 		    "Interrupts may not be functioning.\n");
3791 		mpt_enable_ints(mpt);
3792 		return;
3793 	}
3794 
3795 	/*
3796 	 * We have no visibility into the current state of the
3797 	 * controller, so attempt to abort the commands in the
3798 	 * order they timed-out. For initiator commands, we
3799 	 * depend on the reply handler pulling requests off
3800 	 * the timeout list.
3801 	 */
3802 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3803 		uint16_t status;
3804 		uint8_t response;
3805 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3806 
3807 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3808 		    req, req->serno, hdrp->Function);
3809 		ccb = req->ccb;
3810 		if (ccb == NULL) {
3811 			mpt_prt(mpt, "null ccb in timed out request. "
3812 			    "Resetting Controller.\n");
3813 			mpt_reset(mpt, TRUE);
3814 			continue;
3815 		}
3816 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3817 
3818 		/*
3819 		 * Check to see if this is not an initiator command and
3820 		 * deal with it differently if it is.
3821 		 */
3822 		switch (hdrp->Function) {
3823 		case MPI_FUNCTION_SCSI_IO_REQUEST:
3824 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
3825 			break;
3826 		default:
3827 			/*
3828 			 * XXX: FIX ME: need to abort target assists...
3829 			 */
3830 			mpt_prt(mpt, "just putting it back on the pend q\n");
3831 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
3832 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
3833 			    links);
3834 			continue;
3835 		}
3836 
3837 		error = mpt_scsi_send_tmf(mpt,
3838 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3839 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
3840 		    htole32(req->index | scsi_io_handler_id), TRUE);
3841 
3842 		if (error != 0) {
3843 			/*
3844 			 * mpt_scsi_send_tmf hard resets on failure, so no
3845 			 * need to do so here.  Our queue should be emptied
3846 			 * by the hard reset.
3847 			 */
3848 			continue;
3849 		}
3850 
3851 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
3852 		    REQ_STATE_DONE, TRUE, 500);
3853 
3854 		status = mpt->tmf_req->IOCStatus;
3855 		response = mpt->tmf_req->ResponseCode;
3856 		mpt->tmf_req->state = REQ_STATE_FREE;
3857 
3858 		if (error != 0) {
3859 			/*
3860 			 * If we've errored out,, reset the controller.
3861 			 */
3862 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
3863 			    "Resetting controller\n");
3864 			mpt_reset(mpt, TRUE);
3865 			continue;
3866 		}
3867 
3868 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
3869 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
3870 			    "Resetting controller.\n", status);
3871 			mpt_reset(mpt, TRUE);
3872 			continue;
3873 		}
3874 
3875 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
3876 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
3877 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
3878 			    "Resetting controller.\n", response);
3879 			mpt_reset(mpt, TRUE);
3880 			continue;
3881 		}
3882 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
3883 	}
3884 }
3885 
3886 /************************ Target Mode Support ****************************/
3887 static void
3888 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
3889 {
3890 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3891 	PTR_SGE_TRANSACTION32 tep;
3892 	PTR_SGE_SIMPLE32 se;
3893 	bus_addr_t paddr;
3894 	uint32_t fl;
3895 
3896 	paddr = req->req_pbuf;
3897 	paddr += MPT_RQSL(mpt);
3898 
3899 	fc = req->req_vbuf;
3900 	memset(fc, 0, MPT_REQUEST_AREA);
3901 	fc->BufferCount = 1;
3902 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3903 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
3904 
3905 	/*
3906 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
3907 	 * consist of a TE SGL element (with details length of zero)
3908 	 * followe by a SIMPLE SGL element which holds the address
3909 	 * of the buffer.
3910 	 */
3911 
3912 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3913 
3914 	tep->ContextSize = 4;
3915 	tep->Flags = 0;
3916 	tep->TransactionContext[0] = htole32(ioindex);
3917 
3918 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3919 	fl =
3920 		MPI_SGE_FLAGS_HOST_TO_IOC	|
3921 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
3922 		MPI_SGE_FLAGS_LAST_ELEMENT	|
3923 		MPI_SGE_FLAGS_END_OF_LIST	|
3924 		MPI_SGE_FLAGS_END_OF_BUFFER;
3925 	fl <<= MPI_SGE_FLAGS_SHIFT;
3926 	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3927 	se->FlagsLength = htole32(fl);
3928 	se->Address = htole32((uint32_t) paddr);
3929 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3930 	    "add ELS index %d ioindex %d for %p:%u\n",
3931 	    req->index, ioindex, req, req->serno);
3932 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
3933 	    ("mpt_fc_post_els: request not locked"));
3934 	mpt_send_cmd(mpt, req);
3935 }
3936 
3937 static void
3938 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3939 {
3940 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3941 	PTR_CMD_BUFFER_DESCRIPTOR cb;
3942 	bus_addr_t paddr;
3943 
3944 	paddr = req->req_pbuf;
3945 	paddr += MPT_RQSL(mpt);
3946 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3947 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
3948 
3949 	fc = req->req_vbuf;
3950 	fc->BufferCount = 1;
3951 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3952 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3953 
3954 	cb = &fc->Buffer[0];
3955 	cb->IoIndex = htole16(ioindex);
3956 	cb->u.PhysicalAddress32 = htole32((U32) paddr);
3957 
3958 	mpt_check_doorbell(mpt);
3959 	mpt_send_cmd(mpt, req);
3960 }
3961 
3962 static int
3963 mpt_add_els_buffers(struct mpt_softc *mpt)
3964 {
3965 	int i;
3966 
3967 	if (mpt->is_fc == 0) {
3968 		return (TRUE);
3969 	}
3970 
3971 	if (mpt->els_cmds_allocated) {
3972 		return (TRUE);
3973 	}
3974 
3975 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
3976 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3977 
3978 	if (mpt->els_cmd_ptrs == NULL) {
3979 		return (FALSE);
3980 	}
3981 
3982 	/*
3983 	 * Feed the chip some ELS buffer resources
3984 	 */
3985 	for (i = 0; i < MPT_MAX_ELS; i++) {
3986 		request_t *req = mpt_get_request(mpt, FALSE);
3987 		if (req == NULL) {
3988 			break;
3989 		}
3990 		req->state |= REQ_STATE_LOCKED;
3991 		mpt->els_cmd_ptrs[i] = req;
3992 		mpt_fc_post_els(mpt, req, i);
3993 	}
3994 
3995 	if (i == 0) {
3996 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
3997 		free(mpt->els_cmd_ptrs, M_DEVBUF);
3998 		mpt->els_cmd_ptrs = NULL;
3999 		return (FALSE);
4000 	}
4001 	if (i != MPT_MAX_ELS) {
4002 		mpt_lprt(mpt, MPT_PRT_INFO,
4003 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4004 	}
4005 	mpt->els_cmds_allocated = i;
4006 	return(TRUE);
4007 }
4008 
4009 static int
4010 mpt_add_target_commands(struct mpt_softc *mpt)
4011 {
4012 	int i, max;
4013 
4014 	if (mpt->tgt_cmd_ptrs) {
4015 		return (TRUE);
4016 	}
4017 
4018 	max = MPT_MAX_REQUESTS(mpt) >> 1;
4019 	if (max > mpt->mpt_max_tgtcmds) {
4020 		max = mpt->mpt_max_tgtcmds;
4021 	}
4022 	mpt->tgt_cmd_ptrs =
4023 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4024 	if (mpt->tgt_cmd_ptrs == NULL) {
4025 		mpt_prt(mpt,
4026 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4027 		return (FALSE);
4028 	}
4029 
4030 	for (i = 0; i < max; i++) {
4031 		request_t *req;
4032 
4033 		req = mpt_get_request(mpt, FALSE);
4034 		if (req == NULL) {
4035 			break;
4036 		}
4037 		req->state |= REQ_STATE_LOCKED;
4038 		mpt->tgt_cmd_ptrs[i] = req;
4039 		mpt_post_target_command(mpt, req, i);
4040 	}
4041 
4042 
4043 	if (i == 0) {
4044 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4045 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4046 		mpt->tgt_cmd_ptrs = NULL;
4047 		return (FALSE);
4048 	}
4049 
4050 	mpt->tgt_cmds_allocated = i;
4051 
4052 	if (i < max) {
4053 		mpt_lprt(mpt, MPT_PRT_INFO,
4054 		    "added %d of %d target bufs\n", i, max);
4055 	}
4056 	return (i);
4057 }
4058 
4059 static int
4060 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4061 {
4062 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4063 		mpt->twildcard = 1;
4064 	} else if (lun >= MPT_MAX_LUNS) {
4065 		return (EINVAL);
4066 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4067 		return (EINVAL);
4068 	}
4069 	if (mpt->tenabled == 0) {
4070 		if (mpt->is_fc) {
4071 			(void) mpt_fc_reset_link(mpt, 0);
4072 		}
4073 		mpt->tenabled = 1;
4074 	}
4075 	if (lun == CAM_LUN_WILDCARD) {
4076 		mpt->trt_wildcard.enabled = 1;
4077 	} else {
4078 		mpt->trt[lun].enabled = 1;
4079 	}
4080 	return (0);
4081 }
4082 
4083 static int
4084 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4085 {
4086 	int i;
4087 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4088 		mpt->twildcard = 0;
4089 	} else if (lun >= MPT_MAX_LUNS) {
4090 		return (EINVAL);
4091 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4092 		return (EINVAL);
4093 	}
4094 	if (lun == CAM_LUN_WILDCARD) {
4095 		mpt->trt_wildcard.enabled = 0;
4096 	} else {
4097 		mpt->trt[lun].enabled = 0;
4098 	}
4099 	for (i = 0; i < MPT_MAX_LUNS; i++) {
4100 		if (mpt->trt[lun].enabled) {
4101 			break;
4102 		}
4103 	}
4104 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4105 		if (mpt->is_fc) {
4106 			(void) mpt_fc_reset_link(mpt, 0);
4107 		}
4108 		mpt->tenabled = 0;
4109 	}
4110 	return (0);
4111 }
4112 
4113 /*
4114  * Called with MPT lock held
4115  */
4116 static void
4117 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4118 {
4119 	struct ccb_scsiio *csio = &ccb->csio;
4120 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4121 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4122 
4123 	switch (tgt->state) {
4124 	case TGT_STATE_IN_CAM:
4125 		break;
4126 	case TGT_STATE_MOVING_DATA:
4127 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4128 		xpt_freeze_simq(mpt->sim, 1);
4129 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4130 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4131 		MPTLOCK_2_CAMLOCK(mpt);
4132 		xpt_done(ccb);
4133 		CAMLOCK_2_MPTLOCK(mpt);
4134 		return;
4135 	default:
4136 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4137 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4138 		mpt_tgt_dump_req_state(mpt, cmd_req);
4139 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4140 		MPTLOCK_2_CAMLOCK(mpt);
4141 		xpt_done(ccb);
4142 		CAMLOCK_2_MPTLOCK(mpt);
4143 		return;
4144 	}
4145 
4146 	if (csio->dxfer_len) {
4147 		bus_dmamap_callback_t *cb;
4148 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4149 		request_t *req;
4150 
4151 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4152 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
4153 
4154 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4155 			if (mpt->outofbeer == 0) {
4156 				mpt->outofbeer = 1;
4157 				xpt_freeze_simq(mpt->sim, 1);
4158 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4159 			}
4160 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4161 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4162 			MPTLOCK_2_CAMLOCK(mpt);
4163 			xpt_done(ccb);
4164 			CAMLOCK_2_MPTLOCK(mpt);
4165 			return;
4166 		}
4167 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4168 		if (sizeof (bus_addr_t) > 4) {
4169 			cb = mpt_execute_req_a64;
4170 		} else {
4171 			cb = mpt_execute_req;
4172 		}
4173 
4174 		req->ccb = ccb;
4175 		ccb->ccb_h.ccb_req_ptr = req;
4176 
4177 		/*
4178 		 * Record the currently active ccb and the
4179 		 * request for it in our target state area.
4180 		 */
4181 		tgt->ccb = ccb;
4182 		tgt->req = req;
4183 
4184 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4185 		ta = req->req_vbuf;
4186 
4187 		if (mpt->is_sas) {
4188 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4189 			     cmd_req->req_vbuf;
4190 			ta->QueueTag = ssp->InitiatorTag;
4191 		} else if (mpt->is_spi) {
4192 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4193 			     cmd_req->req_vbuf;
4194 			ta->QueueTag = sp->Tag;
4195 		}
4196 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4197 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4198 		ta->ReplyWord = htole32(tgt->reply_desc);
4199 		if (csio->ccb_h.target_lun > 256) {
4200 			ta->LUN[0] =
4201 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4202 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4203 		} else {
4204 			ta->LUN[1] = csio->ccb_h.target_lun;
4205 		}
4206 
4207 		ta->RelativeOffset = tgt->bytes_xfered;
4208 		ta->DataLength = ccb->csio.dxfer_len;
4209 		if (ta->DataLength > tgt->resid) {
4210 			ta->DataLength = tgt->resid;
4211 		}
4212 
4213 		/*
4214 		 * XXX Should be done after data transfer completes?
4215 		 */
4216 		tgt->resid -= csio->dxfer_len;
4217 		tgt->bytes_xfered += csio->dxfer_len;
4218 
4219 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4220 			ta->TargetAssistFlags |=
4221 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4222 		}
4223 
4224 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4225 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4226 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4227 			ta->TargetAssistFlags |=
4228 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4229 		}
4230 #endif
4231 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4232 
4233 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4234 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4235 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4236 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4237 
4238 		MPTLOCK_2_CAMLOCK(mpt);
4239 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4240 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4241 				int error;
4242 				int s = splsoftvm();
4243 				error = bus_dmamap_load(mpt->buffer_dmat,
4244 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4245 				    cb, req, 0);
4246 				splx(s);
4247 				if (error == EINPROGRESS) {
4248 					xpt_freeze_simq(mpt->sim, 1);
4249 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4250 				}
4251 			} else {
4252 				/*
4253 				 * We have been given a pointer to single
4254 				 * physical buffer.
4255 				 */
4256 				struct bus_dma_segment seg;
4257 				seg.ds_addr = (bus_addr_t)
4258 				    (vm_offset_t)csio->data_ptr;
4259 				seg.ds_len = csio->dxfer_len;
4260 				(*cb)(req, &seg, 1, 0);
4261 			}
4262 		} else {
4263 			/*
4264 			 * We have been given a list of addresses.
4265 			 * This case could be easily supported but they are not
4266 			 * currently generated by the CAM subsystem so there
4267 			 * is no point in wasting the time right now.
4268 			 */
4269 			struct bus_dma_segment *sgs;
4270 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4271 				(*cb)(req, NULL, 0, EFAULT);
4272 			} else {
4273 				/* Just use the segments provided */
4274 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4275 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4276 			}
4277 		}
4278 		CAMLOCK_2_MPTLOCK(mpt);
4279 	} else {
4280 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4281 
4282 		/*
4283 		 * XXX: I don't know why this seems to happen, but
4284 		 * XXX: completing the CCB seems to make things happy.
4285 		 * XXX: This seems to happen if the initiator requests
4286 		 * XXX: enough data that we have to do multiple CTIOs.
4287 		 */
4288 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4289 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4290 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4291 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4292 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4293 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4294 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4295 			MPTLOCK_2_CAMLOCK(mpt);
4296 			xpt_done(ccb);
4297 			CAMLOCK_2_MPTLOCK(mpt);
4298 			return;
4299 		}
4300 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4301 			sp = sense;
4302 			memcpy(sp, &csio->sense_data,
4303 			   min(csio->sense_len, MPT_SENSE_SIZE));
4304 		}
4305 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4306 	}
4307 }
4308 
4309 static void
4310 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4311     uint32_t lun, int send, uint8_t *data, size_t length)
4312 {
4313 	mpt_tgt_state_t *tgt;
4314 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4315 	SGE_SIMPLE32 *se;
4316 	uint32_t flags;
4317 	uint8_t *dptr;
4318 	bus_addr_t pptr;
4319 	request_t *req;
4320 
4321 	/*
4322 	 * We enter with resid set to the data load for the command.
4323 	 */
4324 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4325 	if (length == 0 || tgt->resid == 0) {
4326 		tgt->resid = 0;
4327 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4328 		return;
4329 	}
4330 
4331 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4332 		mpt_prt(mpt, "out of resources- dropping local response\n");
4333 		return;
4334 	}
4335 	tgt->is_local = 1;
4336 
4337 
4338 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4339 	ta = req->req_vbuf;
4340 
4341 	if (mpt->is_sas) {
4342 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4343 		ta->QueueTag = ssp->InitiatorTag;
4344 	} else if (mpt->is_spi) {
4345 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4346 		ta->QueueTag = sp->Tag;
4347 	}
4348 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4349 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4350 	ta->ReplyWord = htole32(tgt->reply_desc);
4351 	if (lun > 256) {
4352 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4353 		ta->LUN[1] = lun & 0xff;
4354 	} else {
4355 		ta->LUN[1] = lun;
4356 	}
4357 	ta->RelativeOffset = 0;
4358 	ta->DataLength = length;
4359 
4360 	dptr = req->req_vbuf;
4361 	dptr += MPT_RQSL(mpt);
4362 	pptr = req->req_pbuf;
4363 	pptr += MPT_RQSL(mpt);
4364 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4365 
4366 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4367 	memset(se, 0,sizeof (*se));
4368 
4369 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4370 	if (send) {
4371 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4372 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4373 	}
4374 	se->Address = pptr;
4375 	MPI_pSGE_SET_LENGTH(se, length);
4376 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4377 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4378 	MPI_pSGE_SET_FLAGS(se, flags);
4379 
4380 	tgt->ccb = NULL;
4381 	tgt->req = req;
4382 	tgt->resid -= length;
4383 	tgt->bytes_xfered = length;
4384 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4385 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4386 #else
4387 	tgt->state = TGT_STATE_MOVING_DATA;
4388 #endif
4389 	mpt_send_cmd(mpt, req);
4390 }
4391 
4392 /*
4393  * Abort queued up CCBs
4394  */
4395 static cam_status
4396 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4397 {
4398 	struct mpt_hdr_stailq *lp;
4399 	struct ccb_hdr *srch;
4400 	int found = 0;
4401 	union ccb *accb = ccb->cab.abort_ccb;
4402 	tgt_resource_t *trtp;
4403 
4404 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4405 
4406 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4407 		trtp = &mpt->trt_wildcard;
4408 	} else {
4409 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4410 	}
4411 
4412 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4413 		lp = &trtp->atios;
4414 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4415 		lp = &trtp->inots;
4416 	} else {
4417 		return (CAM_REQ_INVALID);
4418 	}
4419 
4420 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4421 		if (srch == &accb->ccb_h) {
4422 			found = 1;
4423 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4424 			break;
4425 		}
4426 	}
4427 	if (found) {
4428 		accb->ccb_h.status = CAM_REQ_ABORTED;
4429 		xpt_done(accb);
4430 		return (CAM_REQ_CMP);
4431 	}
4432 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4433 	return (CAM_PATH_INVALID);
4434 }
4435 
4436 /*
4437  * Ask the MPT to abort the current target command
4438  */
4439 static int
4440 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4441 {
4442 	int error;
4443 	request_t *req;
4444 	PTR_MSG_TARGET_MODE_ABORT abtp;
4445 
4446 	req = mpt_get_request(mpt, FALSE);
4447 	if (req == NULL) {
4448 		return (-1);
4449 	}
4450 	abtp = req->req_vbuf;
4451 	memset(abtp, 0, sizeof (*abtp));
4452 
4453 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4454 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4455 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4456 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4457 	error = 0;
4458 	if (mpt->is_fc || mpt->is_sas) {
4459 		mpt_send_cmd(mpt, req);
4460 	} else {
4461 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4462 	}
4463 	return (error);
4464 }
4465 
4466 /*
4467  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4468  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4469  * FC929 to set bogus FC_RSP fields (nonzero residuals
4470  * but w/o RESID fields set). This causes QLogic initiators
4471  * to think maybe that a frame was lost.
4472  *
4473  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4474  * we use allocated requests to do TARGET_ASSIST and we
4475  * need to know when to release them.
4476  */
4477 
4478 static void
4479 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4480     uint8_t status, uint8_t const *sense_data)
4481 {
4482 	uint8_t *cmd_vbuf;
4483 	mpt_tgt_state_t *tgt;
4484 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4485 	request_t *req;
4486 	bus_addr_t paddr;
4487 	int resplen = 0;
4488 	uint32_t fl;
4489 
4490 	cmd_vbuf = cmd_req->req_vbuf;
4491 	cmd_vbuf += MPT_RQSL(mpt);
4492 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4493 
4494 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4495 		if (mpt->outofbeer == 0) {
4496 			mpt->outofbeer = 1;
4497 			xpt_freeze_simq(mpt->sim, 1);
4498 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4499 		}
4500 		if (ccb) {
4501 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4502 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4503 			MPTLOCK_2_CAMLOCK(mpt);
4504 			xpt_done(ccb);
4505 			CAMLOCK_2_MPTLOCK(mpt);
4506 		} else {
4507 			mpt_prt(mpt,
4508 			    "could not allocate status request- dropping\n");
4509 		}
4510 		return;
4511 	}
4512 	req->ccb = ccb;
4513 	if (ccb) {
4514 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4515 		ccb->ccb_h.ccb_req_ptr = req;
4516 	}
4517 
4518 	/*
4519 	 * Record the currently active ccb, if any, and the
4520 	 * request for it in our target state area.
4521 	 */
4522 	tgt->ccb = ccb;
4523 	tgt->req = req;
4524 	tgt->state = TGT_STATE_SENDING_STATUS;
4525 
4526 	tp = req->req_vbuf;
4527 	paddr = req->req_pbuf;
4528 	paddr += MPT_RQSL(mpt);
4529 
4530 	memset(tp, 0, sizeof (*tp));
4531 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4532 	if (mpt->is_fc) {
4533 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4534 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4535 		uint8_t *sts_vbuf;
4536 		uint32_t *rsp;
4537 
4538 		sts_vbuf = req->req_vbuf;
4539 		sts_vbuf += MPT_RQSL(mpt);
4540 		rsp = (uint32_t *) sts_vbuf;
4541 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4542 
4543 		/*
4544 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4545 		 * It has to be big-endian in memory and is organized
4546 		 * in 32 bit words, which are much easier to deal with
4547 		 * as words which are swizzled as needed.
4548 		 *
4549 		 * All we're filling here is the FC_RSP payload.
4550 		 * We may just have the chip synthesize it if
4551 		 * we have no residual and an OK status.
4552 		 *
4553 		 */
4554 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4555 
4556 		rsp[2] = status;
4557 		if (tgt->resid) {
4558 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4559 			rsp[3] = htobe32(tgt->resid);
4560 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4561 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4562 #endif
4563 		}
4564 		if (status == SCSI_STATUS_CHECK_COND) {
4565 			int i;
4566 
4567 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4568 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4569 			if (sense_data) {
4570 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4571 			} else {
4572 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4573 				    "TION but no sense data?\n");
4574 				memset(&rsp, 0, MPT_SENSE_SIZE);
4575 			}
4576 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4577 				rsp[i] = htobe32(rsp[i]);
4578 			}
4579 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4580 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4581 #endif
4582 		}
4583 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4584 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4585 #endif
4586 		rsp[2] = htobe32(rsp[2]);
4587 	} else if (mpt->is_sas) {
4588 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4589 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4590 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4591 	} else {
4592 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4593 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4594 		tp->StatusCode = status;
4595 		tp->QueueTag = htole16(sp->Tag);
4596 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4597 	}
4598 
4599 	tp->ReplyWord = htole32(tgt->reply_desc);
4600 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4601 
4602 #ifdef	WE_CAN_USE_AUTO_REPOST
4603 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4604 #endif
4605 	if (status == SCSI_STATUS_OK && resplen == 0) {
4606 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4607 	} else {
4608 		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4609 		fl =
4610 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4611 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4612 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4613 			MPI_SGE_FLAGS_END_OF_LIST	|
4614 			MPI_SGE_FLAGS_END_OF_BUFFER;
4615 		fl <<= MPI_SGE_FLAGS_SHIFT;
4616 		fl |= resplen;
4617 		tp->StatusDataSGE.FlagsLength = htole32(fl);
4618 	}
4619 
4620 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4621 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4622 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4623 	    req->serno, tgt->resid);
4624 	if (ccb) {
4625 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4626 		mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
4627 	}
4628 	mpt_send_cmd(mpt, req);
4629 }
4630 
4631 static void
4632 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4633     tgt_resource_t *trtp, int init_id)
4634 {
4635 	struct ccb_immed_notify *inot;
4636 	mpt_tgt_state_t *tgt;
4637 
4638 	tgt = MPT_TGT_STATE(mpt, req);
4639 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4640 	if (inot == NULL) {
4641 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4642 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4643 		return;
4644 	}
4645 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4646 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4647 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4648 
4649 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4650 	inot->sense_len = 0;
4651 	memset(inot->message_args, 0, sizeof (inot->message_args));
4652 	inot->initiator_id = init_id;	/* XXX */
4653 
4654 	/*
4655 	 * This is a somewhat grotesque attempt to map from task management
4656 	 * to old style SCSI messages. God help us all.
4657 	 */
4658 	switch (fc) {
4659 	case MPT_ABORT_TASK_SET:
4660 		inot->message_args[0] = MSG_ABORT_TAG;
4661 		break;
4662 	case MPT_CLEAR_TASK_SET:
4663 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4664 		break;
4665 	case MPT_TARGET_RESET:
4666 		inot->message_args[0] = MSG_TARGET_RESET;
4667 		break;
4668 	case MPT_CLEAR_ACA:
4669 		inot->message_args[0] = MSG_CLEAR_ACA;
4670 		break;
4671 	case MPT_TERMINATE_TASK:
4672 		inot->message_args[0] = MSG_ABORT_TAG;
4673 		break;
4674 	default:
4675 		inot->message_args[0] = MSG_NOOP;
4676 		break;
4677 	}
4678 	tgt->ccb = (union ccb *) inot;
4679 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4680 	MPTLOCK_2_CAMLOCK(mpt);
4681 	xpt_done((union ccb *)inot);
4682 	CAMLOCK_2_MPTLOCK(mpt);
4683 }
4684 
4685 static void
4686 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4687 {
4688 	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4689 	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4690 	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
4691 	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
4692 	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
4693 	     '0',  '0',  '0',  '1'
4694 	};
4695 	struct ccb_accept_tio *atiop;
4696 	lun_id_t lun;
4697 	int tag_action = 0;
4698 	mpt_tgt_state_t *tgt;
4699 	tgt_resource_t *trtp = NULL;
4700 	U8 *lunptr;
4701 	U8 *vbuf;
4702 	U16 itag;
4703 	U16 ioindex;
4704 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4705 	uint8_t *cdbp;
4706 
4707 	/*
4708 	 * First, DMA sync the received command-
4709 	 * which is in the *request* * phys area.
4710 	 *
4711 	 * XXX: We could optimize this for a range
4712 	 */
4713 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
4714 	    BUS_DMASYNC_POSTREAD);
4715 
4716 	/*
4717 	 * Stash info for the current command where we can get at it later.
4718 	 */
4719 	vbuf = req->req_vbuf;
4720 	vbuf += MPT_RQSL(mpt);
4721 
4722 	/*
4723 	 * Get our state pointer set up.
4724 	 */
4725 	tgt = MPT_TGT_STATE(mpt, req);
4726 	if (tgt->state != TGT_STATE_LOADED) {
4727 		mpt_tgt_dump_req_state(mpt, req);
4728 		panic("bad target state in mpt_scsi_tgt_atio");
4729 	}
4730 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4731 	tgt->state = TGT_STATE_IN_CAM;
4732 	tgt->reply_desc = reply_desc;
4733 	ioindex = GET_IO_INDEX(reply_desc);
4734 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4735 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4736 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4737 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4738 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4739 	}
4740 	if (mpt->is_fc) {
4741 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4742 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4743 		if (fc->FcpCntl[2]) {
4744 			/*
4745 			 * Task Management Request
4746 			 */
4747 			switch (fc->FcpCntl[2]) {
4748 			case 0x2:
4749 				fct = MPT_ABORT_TASK_SET;
4750 				break;
4751 			case 0x4:
4752 				fct = MPT_CLEAR_TASK_SET;
4753 				break;
4754 			case 0x20:
4755 				fct = MPT_TARGET_RESET;
4756 				break;
4757 			case 0x40:
4758 				fct = MPT_CLEAR_ACA;
4759 				break;
4760 			case 0x80:
4761 				fct = MPT_TERMINATE_TASK;
4762 				break;
4763 			default:
4764 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4765 				    fc->FcpCntl[2]);
4766 				mpt_scsi_tgt_status(mpt, 0, req,
4767 				    SCSI_STATUS_OK, 0);
4768 				return;
4769 			}
4770 		} else {
4771 			switch (fc->FcpCntl[1]) {
4772 			case 0:
4773 				tag_action = MSG_SIMPLE_Q_TAG;
4774 				break;
4775 			case 1:
4776 				tag_action = MSG_HEAD_OF_Q_TAG;
4777 				break;
4778 			case 2:
4779 				tag_action = MSG_ORDERED_Q_TAG;
4780 				break;
4781 			default:
4782 				/*
4783 				 * Bah. Ignore Untagged Queing and ACA
4784 				 */
4785 				tag_action = MSG_SIMPLE_Q_TAG;
4786 				break;
4787 			}
4788 		}
4789 		tgt->resid = be32toh(fc->FcpDl);
4790 		cdbp = fc->FcpCdb;
4791 		lunptr = fc->FcpLun;
4792 		itag = be16toh(fc->OptionalOxid);
4793 	} else if (mpt->is_sas) {
4794 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4795 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4796 		cdbp = ssp->CDB;
4797 		lunptr = ssp->LogicalUnitNumber;
4798 		itag = ssp->InitiatorTag;
4799 	} else {
4800 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4801 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4802 		cdbp = sp->CDB;
4803 		lunptr = sp->LogicalUnitNumber;
4804 		itag = sp->Tag;
4805 	}
4806 
4807 	/*
4808 	 * Generate a simple lun
4809 	 */
4810 	switch (lunptr[0] & 0xc0) {
4811 	case 0x40:
4812 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4813 		break;
4814 	case 0:
4815 		lun = lunptr[1];
4816 		break;
4817 	default:
4818 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4819 		lun = 0xffff;
4820 		break;
4821 	}
4822 
4823 	/*
4824 	 * Deal with non-enabled or bad luns here.
4825 	 */
4826 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4827 	    mpt->trt[lun].enabled == 0) {
4828 		if (mpt->twildcard) {
4829 			trtp = &mpt->trt_wildcard;
4830 		} else if (fct == MPT_NIL_TMT_VALUE) {
4831 			/*
4832 			 * In this case, we haven't got an upstream listener
4833 			 * for either a specific lun or wildcard luns. We
4834 			 * have to make some sensible response. For regular
4835 			 * inquiry, just return some NOT HERE inquiry data.
4836 			 * For VPD inquiry, report illegal field in cdb.
4837 			 * For REQUEST SENSE, just return NO SENSE data.
4838 			 * REPORT LUNS gets illegal command.
4839 			 * All other commands get 'no such device'.
4840 			 */
4841 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
4842 			size_t len;
4843 
4844 			memset(buf, 0, MPT_SENSE_SIZE);
4845 			cond = SCSI_STATUS_CHECK_COND;
4846 			buf[0] = 0xf0;
4847 			buf[2] = 0x5;
4848 			buf[7] = 0x8;
4849 			sp = buf;
4850 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4851 
4852 			switch (cdbp[0]) {
4853 			case INQUIRY:
4854 			{
4855 				if (cdbp[1] != 0) {
4856 					buf[12] = 0x26;
4857 					buf[13] = 0x01;
4858 					break;
4859 				}
4860 				len = min(tgt->resid, cdbp[4]);
4861 				len = min(len, sizeof (null_iqd));
4862 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4863 				    "local inquiry %ld bytes\n", (long) len);
4864 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4865 				    null_iqd, len);
4866 				return;
4867 			}
4868 			case REQUEST_SENSE:
4869 			{
4870 				buf[2] = 0x0;
4871 				len = min(tgt->resid, cdbp[4]);
4872 				len = min(len, sizeof (buf));
4873 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4874 				    "local reqsense %ld bytes\n", (long) len);
4875 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4876 				    buf, len);
4877 				return;
4878 			}
4879 			case REPORT_LUNS:
4880 				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
4881 				buf[12] = 0x26;
4882 				return;
4883 			default:
4884 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4885 				    "CMD 0x%x to unmanaged lun %u\n",
4886 				    cdbp[0], lun);
4887 				buf[12] = 0x25;
4888 				break;
4889 			}
4890 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
4891 			return;
4892 		}
4893 		/* otherwise, leave trtp NULL */
4894 	} else {
4895 		trtp = &mpt->trt[lun];
4896 	}
4897 
4898 	/*
4899 	 * Deal with any task management
4900 	 */
4901 	if (fct != MPT_NIL_TMT_VALUE) {
4902 		if (trtp == NULL) {
4903 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
4904 			    fct);
4905 			mpt_scsi_tgt_status(mpt, 0, req,
4906 			    SCSI_STATUS_OK, 0);
4907 		} else {
4908 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
4909 			    GET_INITIATOR_INDEX(reply_desc));
4910 		}
4911 		return;
4912 	}
4913 
4914 
4915 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
4916 	if (atiop == NULL) {
4917 		mpt_lprt(mpt, MPT_PRT_WARN,
4918 		    "no ATIOs for lun %u- sending back %s\n", lun,
4919 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
4920 		mpt_scsi_tgt_status(mpt, NULL, req,
4921 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
4922 		    NULL);
4923 		return;
4924 	}
4925 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
4926 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4927 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
4928 	atiop->ccb_h.ccb_mpt_ptr = mpt;
4929 	atiop->ccb_h.status = CAM_CDB_RECVD;
4930 	atiop->ccb_h.target_lun = lun;
4931 	atiop->sense_len = 0;
4932 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
4933 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
4934 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
4935 
4936 	/*
4937 	 * The tag we construct here allows us to find the
4938 	 * original request that the command came in with.
4939 	 *
4940 	 * This way we don't have to depend on anything but the
4941 	 * tag to find things when CCBs show back up from CAM.
4942 	 */
4943 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4944 	tgt->tag_id = atiop->tag_id;
4945 	if (tag_action) {
4946 		atiop->tag_action = tag_action;
4947 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
4948 	}
4949 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4950 		int i;
4951 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
4952 		    atiop->ccb_h.target_lun);
4953 		for (i = 0; i < atiop->cdb_len; i++) {
4954 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
4955 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
4956 		}
4957 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
4958 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
4959 	}
4960 
4961 	MPTLOCK_2_CAMLOCK(mpt);
4962 	xpt_done((union ccb *)atiop);
4963 	CAMLOCK_2_MPTLOCK(mpt);
4964 }
4965 
4966 static void
4967 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
4968 {
4969 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4970 
4971 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
4972 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
4973 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
4974 	    tgt->tag_id, tgt->state);
4975 }
4976 
4977 static void
4978 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
4979 {
4980 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
4981 	    req->index, req->index, req->state);
4982 	mpt_tgt_dump_tgt_state(mpt, req);
4983 }
4984 
4985 static int
4986 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
4987     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4988 {
4989 	int dbg;
4990 	union ccb *ccb;
4991 	U16 status;
4992 
4993 	if (reply_frame == NULL) {
4994 		/*
4995 		 * Figure out what the state of the command is.
4996 		 */
4997 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4998 
4999 #ifdef	INVARIANTS
5000 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5001 		if (tgt->req) {
5002 			mpt_req_not_spcl(mpt, tgt->req,
5003 			    "turbo scsi_tgt_reply associated req", __LINE__);
5004 		}
5005 #endif
5006 		switch(tgt->state) {
5007 		case TGT_STATE_LOADED:
5008 			/*
5009 			 * This is a new command starting.
5010 			 */
5011 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5012 			break;
5013 		case TGT_STATE_MOVING_DATA:
5014 		{
5015 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5016 
5017 			ccb = tgt->ccb;
5018 			if (tgt->req == NULL) {
5019 				panic("mpt: turbo target reply with null "
5020 				    "associated request moving data");
5021 				/* NOTREACHED */
5022 			}
5023 			if (ccb == NULL) {
5024 				if (tgt->is_local == 0) {
5025 					panic("mpt: turbo target reply with "
5026 					    "null associated ccb moving data");
5027 					/* NOTREACHED */
5028 				}
5029 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5030 				    "TARGET_ASSIST local done\n");
5031 				TAILQ_REMOVE(&mpt->request_pending_list,
5032 				    tgt->req, links);
5033 				mpt_free_request(mpt, tgt->req);
5034 				tgt->req = NULL;
5035 				mpt_scsi_tgt_status(mpt, NULL, req,
5036 				    0, NULL);
5037 				return (TRUE);
5038 			}
5039 			tgt->ccb = NULL;
5040 			tgt->nxfers++;
5041 			mpt_req_untimeout(req, mpt_timeout, ccb);
5042 			mpt_lprt(mpt, MPT_PRT_DEBUG,
5043 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5044 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5045 			/*
5046 			 * Free the Target Assist Request
5047 			 */
5048 			KASSERT(tgt->req->ccb == ccb,
5049 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5050 			    tgt->req->serno, tgt->req->ccb));
5051 			TAILQ_REMOVE(&mpt->request_pending_list,
5052 			    tgt->req, links);
5053 			mpt_free_request(mpt, tgt->req);
5054 			tgt->req = NULL;
5055 
5056 			/*
5057 			 * Do we need to send status now? That is, are
5058 			 * we done with all our data transfers?
5059 			 */
5060 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5061 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5062 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5063 				KASSERT(ccb->ccb_h.status,
5064 				    ("zero ccb sts at %d\n", __LINE__));
5065 				tgt->state = TGT_STATE_IN_CAM;
5066 				if (mpt->outofbeer) {
5067 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5068 					mpt->outofbeer = 0;
5069 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5070 				}
5071 				MPTLOCK_2_CAMLOCK(mpt);
5072 				xpt_done(ccb);
5073 				CAMLOCK_2_MPTLOCK(mpt);
5074 				break;
5075 			}
5076 			/*
5077 			 * Otherwise, send status (and sense)
5078 			 */
5079 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5080 				sp = sense;
5081 				memcpy(sp, &ccb->csio.sense_data,
5082 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5083 			}
5084 			mpt_scsi_tgt_status(mpt, ccb, req,
5085 			    ccb->csio.scsi_status, sp);
5086 			break;
5087 		}
5088 		case TGT_STATE_SENDING_STATUS:
5089 		case TGT_STATE_MOVING_DATA_AND_STATUS:
5090 		{
5091 			int ioindex;
5092 			ccb = tgt->ccb;
5093 
5094 			if (tgt->req == NULL) {
5095 				panic("mpt: turbo target reply with null "
5096 				    "associated request sending status");
5097 				/* NOTREACHED */
5098 			}
5099 
5100 			if (ccb) {
5101 				tgt->ccb = NULL;
5102 				if (tgt->state ==
5103 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5104 					tgt->nxfers++;
5105 				}
5106 				mpt_req_untimeout(req, mpt_timeout, ccb);
5107 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5108 					ccb->ccb_h.status |= CAM_SENT_SENSE;
5109 				}
5110 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5111 				    "TARGET_STATUS tag %x sts %x flgs %x req "
5112 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5113 				    ccb->ccb_h.flags, tgt->req);
5114 				/*
5115 				 * Free the Target Send Status Request
5116 				 */
5117 				KASSERT(tgt->req->ccb == ccb,
5118 				    ("tgt->req %p:%u tgt->req->ccb %p",
5119 				    tgt->req, tgt->req->serno, tgt->req->ccb));
5120 				/*
5121 				 * Notify CAM that we're done
5122 				 */
5123 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5124 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5125 				KASSERT(ccb->ccb_h.status,
5126 				    ("ZERO ccb sts at %d\n", __LINE__));
5127 				tgt->ccb = NULL;
5128 			} else {
5129 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5130 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
5131 				    tgt->req, tgt->req->serno);
5132 			}
5133 			TAILQ_REMOVE(&mpt->request_pending_list,
5134 			    tgt->req, links);
5135 			mpt_free_request(mpt, tgt->req);
5136 			tgt->req = NULL;
5137 
5138 			/*
5139 			 * And re-post the Command Buffer.
5140 			 * This will reset the state.
5141 			 */
5142 			ioindex = GET_IO_INDEX(reply_desc);
5143 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5144 			tgt->is_local = 0;
5145 			mpt_post_target_command(mpt, req, ioindex);
5146 
5147 			/*
5148 			 * And post a done for anyone who cares
5149 			 */
5150 			if (ccb) {
5151 				if (mpt->outofbeer) {
5152 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5153 					mpt->outofbeer = 0;
5154 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5155 				}
5156 				MPTLOCK_2_CAMLOCK(mpt);
5157 				xpt_done(ccb);
5158 				CAMLOCK_2_MPTLOCK(mpt);
5159 			}
5160 			break;
5161 		}
5162 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5163 			tgt->state = TGT_STATE_LOADED;
5164 			break;
5165 		default:
5166 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5167 			    "Reply Function\n", tgt->state);
5168 		}
5169 		return (TRUE);
5170 	}
5171 
5172 	status = le16toh(reply_frame->IOCStatus);
5173 	if (status != MPI_IOCSTATUS_SUCCESS) {
5174 		dbg = MPT_PRT_ERROR;
5175 	} else {
5176 		dbg = MPT_PRT_DEBUG1;
5177 	}
5178 
5179 	mpt_lprt(mpt, dbg,
5180 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5181 	     req, req->serno, reply_frame, reply_frame->Function, status);
5182 
5183 	switch (reply_frame->Function) {
5184 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5185 	{
5186 		mpt_tgt_state_t *tgt;
5187 #ifdef	INVARIANTS
5188 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5189 #endif
5190 		if (status != MPI_IOCSTATUS_SUCCESS) {
5191 			/*
5192 			 * XXX What to do?
5193 			 */
5194 			break;
5195 		}
5196 		tgt = MPT_TGT_STATE(mpt, req);
5197 		KASSERT(tgt->state == TGT_STATE_LOADING,
5198 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
5199 		mpt_assign_serno(mpt, req);
5200 		tgt->state = TGT_STATE_LOADED;
5201 		break;
5202 	}
5203 	case MPI_FUNCTION_TARGET_ASSIST:
5204 #ifdef	INVARIANTS
5205 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5206 #endif
5207 		mpt_prt(mpt, "target assist completion\n");
5208 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5209 		mpt_free_request(mpt, req);
5210 		break;
5211 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5212 #ifdef	INVARIANTS
5213 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5214 #endif
5215 		mpt_prt(mpt, "status send completion\n");
5216 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5217 		mpt_free_request(mpt, req);
5218 		break;
5219 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5220 	{
5221 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5222 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5223 		PTR_MSG_TARGET_MODE_ABORT abtp =
5224 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5225 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5226 #ifdef	INVARIANTS
5227 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5228 #endif
5229 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5230 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5231 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5232 		mpt_free_request(mpt, req);
5233 		break;
5234 	}
5235 	default:
5236 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5237 		    "0x%x\n", reply_frame->Function);
5238 		break;
5239 	}
5240 	return (TRUE);
5241 }
5242