xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision e4e9813eb92cd7c4d4b819a8fbed5cbd3d92f5d8)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 
108 #include <sys/sysctl.h>
109 
110 #include <sys/callout.h>
111 #include <sys/kthread.h>
112 
113 static void mpt_poll(struct cam_sim *);
114 static timeout_t mpt_timeout;
115 static void mpt_action(struct cam_sim *, union ccb *);
116 static int
117 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
118 static void mpt_setwidth(struct mpt_softc *, int, int);
119 static void mpt_setsync(struct mpt_softc *, int, int, int);
120 static int mpt_update_spi_config(struct mpt_softc *, int);
121 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
122 
123 static mpt_reply_handler_t mpt_scsi_reply_handler;
124 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
125 static mpt_reply_handler_t mpt_fc_els_reply_handler;
126 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
127 					MSG_DEFAULT_REPLY *);
128 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
129 static int mpt_fc_reset_link(struct mpt_softc *, int);
130 
131 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
132 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
133 static void mpt_recovery_thread(void *arg);
134 static void mpt_recover_commands(struct mpt_softc *mpt);
135 
136 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
137     u_int, u_int, u_int, int);
138 
139 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
140 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
141 static int mpt_add_els_buffers(struct mpt_softc *mpt);
142 static int mpt_add_target_commands(struct mpt_softc *mpt);
143 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
145 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
146 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
147 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
148 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
149     uint8_t, uint8_t const *);
150 static void
151 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
152     tgt_resource_t *, int);
153 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
154 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
155 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
156 
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 
161 static mpt_probe_handler_t	mpt_cam_probe;
162 static mpt_attach_handler_t	mpt_cam_attach;
163 static mpt_enable_handler_t	mpt_cam_enable;
164 static mpt_event_handler_t	mpt_cam_event;
165 static mpt_reset_handler_t	mpt_cam_ioc_reset;
166 static mpt_detach_handler_t	mpt_cam_detach;
167 
168 static struct mpt_personality mpt_cam_personality =
169 {
170 	.name		= "mpt_cam",
171 	.probe		= mpt_cam_probe,
172 	.attach		= mpt_cam_attach,
173 	.enable		= mpt_cam_enable,
174 	.event		= mpt_cam_event,
175 	.reset		= mpt_cam_ioc_reset,
176 	.detach		= mpt_cam_detach,
177 };
178 
179 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
180 
181 int
182 mpt_cam_probe(struct mpt_softc *mpt)
183 {
184 	int role;
185 
186 	/*
187 	 * Only attach to nodes that support the initiator or target role
188 	 * (or want to) or have RAID physical devices that need CAM pass-thru
189 	 * support.
190 	 */
191 	if (mpt->do_cfg_role) {
192 		role = mpt->cfg_role;
193 	} else {
194 		role = mpt->role;
195 	}
196 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
197 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
198 		return (0);
199 	}
200 	return (ENODEV);
201 }
202 
203 int
204 mpt_cam_attach(struct mpt_softc *mpt)
205 {
206 	struct cam_devq *devq;
207 	mpt_handler_t	 handler;
208 	int		 maxq;
209 	int		 error;
210 
211 	TAILQ_INIT(&mpt->request_timeout_list);
212 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
213 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
214 
215 	handler.reply_handler = mpt_scsi_reply_handler;
216 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
217 				     &scsi_io_handler_id);
218 	if (error != 0) {
219 		goto cleanup0;
220 	}
221 
222 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
223 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
224 				     &scsi_tmf_handler_id);
225 	if (error != 0) {
226 		goto cleanup0;
227 	}
228 
229 	/*
230 	 * If we're fibre channel and could support target mode, we register
231 	 * an ELS reply handler and give it resources.
232 	 */
233 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
234 		handler.reply_handler = mpt_fc_els_reply_handler;
235 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
236 		    &fc_els_handler_id);
237 		if (error != 0) {
238 			goto cleanup0;
239 		}
240 		if (mpt_add_els_buffers(mpt) == FALSE) {
241 			error = ENOMEM;
242 			goto cleanup0;
243 		}
244 		maxq -= mpt->els_cmds_allocated;
245 	}
246 
247 	/*
248 	 * If we support target mode, we register a reply handler for it,
249 	 * but don't add resources until we actually enable target mode.
250 	 */
251 	if ((mpt->role & MPT_ROLE_TARGET) != 0) {
252 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
253 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
254 		    &mpt->scsi_tgt_handler_id);
255 		if (error != 0) {
256 			goto cleanup0;
257 		}
258 	}
259 
260 	/*
261 	 * We keep one request reserved for timeout TMF requests.
262 	 */
263 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
264 	if (mpt->tmf_req == NULL) {
265 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
266 		error = ENOMEM;
267 		goto cleanup0;
268 	}
269 
270 	/*
271 	 * Mark the request as free even though not on the free list.
272 	 * There is only one TMF request allowed to be outstanding at
273 	 * a time and the TMF routines perform their own allocation
274 	 * tracking using the standard state flags.
275 	 */
276 	mpt->tmf_req->state = REQ_STATE_FREE;
277 	maxq--;
278 
279 	if (mpt_spawn_recovery_thread(mpt) != 0) {
280 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
281 		error = ENOMEM;
282 		goto cleanup0;
283 	}
284 
285 	/*
286 	 * The rest of this is CAM foo, for which we need to drop our lock
287 	 */
288 	MPTLOCK_2_CAMLOCK(mpt);
289 
290 	/*
291 	 * Create the device queue for our SIM(s).
292 	 */
293 	devq = cam_simq_alloc(maxq);
294 	if (devq == NULL) {
295 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
296 		error = ENOMEM;
297 		goto cleanup;
298 	}
299 
300 	/*
301 	 * Construct our SIM entry.
302 	 */
303 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
304 	    mpt->unit, 1, maxq, devq);
305 	if (mpt->sim == NULL) {
306 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
307 		cam_simq_free(devq);
308 		error = ENOMEM;
309 		goto cleanup;
310 	}
311 
312 	/*
313 	 * Register exactly this bus.
314 	 */
315 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
316 		mpt_prt(mpt, "Bus registration Failed!\n");
317 		error = ENOMEM;
318 		goto cleanup;
319 	}
320 
321 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
322 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
323 		mpt_prt(mpt, "Unable to allocate Path!\n");
324 		error = ENOMEM;
325 		goto cleanup;
326 	}
327 
328 	/*
329 	 * Only register a second bus for RAID physical
330 	 * devices if the controller supports RAID.
331 	 */
332 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
333 		CAMLOCK_2_MPTLOCK(mpt);
334 		return (0);
335 	}
336 
337 	/*
338 	 * Create a "bus" to export all hidden disks to CAM.
339 	 */
340 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
341 	    mpt->unit, 1, maxq, devq);
342 	if (mpt->phydisk_sim == NULL) {
343 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
344 		error = ENOMEM;
345 		goto cleanup;
346 	}
347 
348 	/*
349 	 * Register this bus.
350 	 */
351 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
352 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
353 		error = ENOMEM;
354 		goto cleanup;
355 	}
356 
357 	if (xpt_create_path(&mpt->phydisk_path, NULL,
358 	    cam_sim_path(mpt->phydisk_sim),
359 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
360 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
361 		error = ENOMEM;
362 		goto cleanup;
363 	}
364 	CAMLOCK_2_MPTLOCK(mpt);
365 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
366 	return (0);
367 
368 cleanup:
369 	CAMLOCK_2_MPTLOCK(mpt);
370 cleanup0:
371 	mpt_cam_detach(mpt);
372 	return (error);
373 }
374 
375 /*
376  * Read FC configuration information
377  */
378 static int
379 mpt_read_config_info_fc(struct mpt_softc *mpt)
380 {
381 	char *topology = NULL;
382 	int rv;
383 
384 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
385 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
386 	if (rv) {
387 		return (-1);
388 	}
389 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
390 		 mpt->mpt_fcport_page0.Header.PageVersion,
391 		 mpt->mpt_fcport_page0.Header.PageLength,
392 		 mpt->mpt_fcport_page0.Header.PageNumber,
393 		 mpt->mpt_fcport_page0.Header.PageType);
394 
395 
396 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
397 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
398 	if (rv) {
399 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
400 		return (-1);
401 	}
402 
403 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
404 
405 	switch (mpt->mpt_fcport_page0.Flags &
406 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
407 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
408 		mpt->mpt_fcport_speed = 0;
409 		topology = "<NO LOOP>";
410 		break;
411 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
412 		topology = "N-Port";
413 		break;
414 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
415 		topology = "NL-Port";
416 		break;
417 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
418 		topology = "F-Port";
419 		break;
420 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
421 		topology = "FL-Port";
422 		break;
423 	default:
424 		mpt->mpt_fcport_speed = 0;
425 		topology = "?";
426 		break;
427 	}
428 
429 	mpt_lprt(mpt, MPT_PRT_INFO,
430 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
431 	    "Speed %u-Gbit\n", topology,
432 	    mpt->mpt_fcport_page0.WWNN.High,
433 	    mpt->mpt_fcport_page0.WWNN.Low,
434 	    mpt->mpt_fcport_page0.WWPN.High,
435 	    mpt->mpt_fcport_page0.WWPN.Low,
436 	    mpt->mpt_fcport_speed);
437 #if __FreeBSD_version >= 500000
438 	{
439 		struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
440 		struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
441 
442 		snprintf(mpt->scinfo.fc.wwnn,
443 		    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
444 		    mpt->mpt_fcport_page0.WWNN.High,
445 		    mpt->mpt_fcport_page0.WWNN.Low);
446 
447 		snprintf(mpt->scinfo.fc.wwpn,
448 		    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
449 		    mpt->mpt_fcport_page0.WWPN.High,
450 		    mpt->mpt_fcport_page0.WWPN.Low);
451 
452 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
453 		       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
454 		       "World Wide Node Name");
455 
456 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
457 		       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
458 		       "World Wide Port Name");
459 
460 	}
461 #endif
462 	return (0);
463 }
464 
465 /*
466  * Set FC configuration information.
467  */
468 static int
469 mpt_set_initial_config_fc(struct mpt_softc *mpt)
470 {
471 
472 	CONFIG_PAGE_FC_PORT_1 fc;
473 	U32 fl;
474 	int r, doit = 0;
475 	int role;
476 
477 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
478 	    &fc.Header, FALSE, 5000);
479 	if (r) {
480 		mpt_prt(mpt, "failed to read FC page 1 header\n");
481 		return (mpt_fc_reset_link(mpt, 1));
482 	}
483 
484 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
485 	    &fc.Header, sizeof (fc), FALSE, 5000);
486 	if (r) {
487 		mpt_prt(mpt, "failed to read FC page 1\n");
488 		return (mpt_fc_reset_link(mpt, 1));
489 	}
490 
491 	/*
492 	 * Check our flags to make sure we support the role we want.
493 	 */
494 	doit = 0;
495 	role = 0;
496 	fl = le32toh(fc.Flags);;
497 
498 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
499 		role |= MPT_ROLE_INITIATOR;
500 	}
501 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
502 		role |= MPT_ROLE_TARGET;
503 	}
504 
505 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
506 
507 	if (mpt->do_cfg_role == 0) {
508 		role = mpt->cfg_role;
509 	} else {
510 		mpt->do_cfg_role = 0;
511 	}
512 
513 	if (role != mpt->cfg_role) {
514 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
515 			if ((role & MPT_ROLE_INITIATOR) == 0) {
516 				mpt_prt(mpt, "adding initiator role\n");
517 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
518 				doit++;
519 			} else {
520 				mpt_prt(mpt, "keeping initiator role\n");
521 			}
522 		} else if (role & MPT_ROLE_INITIATOR) {
523 			mpt_prt(mpt, "removing initiator role\n");
524 			doit++;
525 		}
526 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
527 			if ((role & MPT_ROLE_TARGET) == 0) {
528 				mpt_prt(mpt, "adding target role\n");
529 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
530 				doit++;
531 			} else {
532 				mpt_prt(mpt, "keeping target role\n");
533 			}
534 		} else if (role & MPT_ROLE_TARGET) {
535 			mpt_prt(mpt, "removing target role\n");
536 			doit++;
537 		}
538 		mpt->role = mpt->cfg_role;
539 	}
540 
541 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
542 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
543 			mpt_prt(mpt, "adding OXID option\n");
544 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
545 			doit++;
546 		}
547 	}
548 
549 	if (doit) {
550 		fc.Flags = htole32(fl);
551 		r = mpt_write_cfg_page(mpt,
552 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
553 		    sizeof(fc), FALSE, 5000);
554 		if (r != 0) {
555 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
556 			return (0);
557 		}
558 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
559 		    "effect until next reboot or IOC reset\n");
560 	}
561 	return (0);
562 }
563 
564 /*
565  * Read SAS configuration information. Nothing to do yet.
566  */
567 static int
568 mpt_read_config_info_sas(struct mpt_softc *mpt)
569 {
570 	return (0);
571 }
572 
573 /*
574  * Set SAS configuration information. Nothing to do yet.
575  */
576 static int
577 mpt_set_initial_config_sas(struct mpt_softc *mpt)
578 {
579 	return (0);
580 }
581 
582 /*
583  * Read SCSI configuration information
584  */
585 static int
586 mpt_read_config_info_spi(struct mpt_softc *mpt)
587 {
588 	int rv, i;
589 
590 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
591 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
592 	if (rv) {
593 		return (-1);
594 	}
595 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
596 	    mpt->mpt_port_page0.Header.PageVersion,
597 	    mpt->mpt_port_page0.Header.PageLength,
598 	    mpt->mpt_port_page0.Header.PageNumber,
599 	    mpt->mpt_port_page0.Header.PageType);
600 
601 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
602 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
603 	if (rv) {
604 		return (-1);
605 	}
606 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
607 	    mpt->mpt_port_page1.Header.PageVersion,
608 	    mpt->mpt_port_page1.Header.PageLength,
609 	    mpt->mpt_port_page1.Header.PageNumber,
610 	    mpt->mpt_port_page1.Header.PageType);
611 
612 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
613 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
614 	if (rv) {
615 		return (-1);
616 	}
617 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
618 	    mpt->mpt_port_page2.Header.PageVersion,
619 	    mpt->mpt_port_page2.Header.PageLength,
620 	    mpt->mpt_port_page2.Header.PageNumber,
621 	    mpt->mpt_port_page2.Header.PageType);
622 
623 	for (i = 0; i < 16; i++) {
624 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
625 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
626 		if (rv) {
627 			return (-1);
628 		}
629 		mpt_lprt(mpt, MPT_PRT_DEBUG,
630 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
631 		    mpt->mpt_dev_page0[i].Header.PageVersion,
632 		    mpt->mpt_dev_page0[i].Header.PageLength,
633 		    mpt->mpt_dev_page0[i].Header.PageNumber,
634 		    mpt->mpt_dev_page0[i].Header.PageType);
635 
636 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
637 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
638 		if (rv) {
639 			return (-1);
640 		}
641 		mpt_lprt(mpt, MPT_PRT_DEBUG,
642 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
643 		    mpt->mpt_dev_page1[i].Header.PageVersion,
644 		    mpt->mpt_dev_page1[i].Header.PageLength,
645 		    mpt->mpt_dev_page1[i].Header.PageNumber,
646 		    mpt->mpt_dev_page1[i].Header.PageType);
647 	}
648 
649 	/*
650 	 * At this point, we don't *have* to fail. As long as we have
651 	 * valid config header information, we can (barely) lurch
652 	 * along.
653 	 */
654 
655 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
656 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
657 	if (rv) {
658 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
659 	} else {
660 		mpt_lprt(mpt, MPT_PRT_DEBUG,
661 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
662 		    mpt->mpt_port_page0.Capabilities,
663 		    mpt->mpt_port_page0.PhysicalInterface);
664 	}
665 
666 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
667 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
668 	if (rv) {
669 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
670 	} else {
671 		mpt_lprt(mpt, MPT_PRT_DEBUG,
672 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
673 		    mpt->mpt_port_page1.Configuration,
674 		    mpt->mpt_port_page1.OnBusTimerValue);
675 	}
676 
677 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
678 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
679 	if (rv) {
680 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
681 	} else {
682 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
683 		    "Port Page 2: Flags %x Settings %x\n",
684 		    mpt->mpt_port_page2.PortFlags,
685 		    mpt->mpt_port_page2.PortSettings);
686 		for (i = 0; i < 16; i++) {
687 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
688 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
689 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
690 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
691 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
692 		}
693 	}
694 
695 	for (i = 0; i < 16; i++) {
696 		rv = mpt_read_cur_cfg_page(mpt, i,
697 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
698 		    FALSE, 5000);
699 		if (rv) {
700 			mpt_prt(mpt,
701 			    "cannot read SPI Target %d Device Page 0\n", i);
702 			continue;
703 		}
704 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
705 		    "target %d page 0: Negotiated Params %x Information %x\n",
706 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
707 		    mpt->mpt_dev_page0[i].Information);
708 
709 		rv = mpt_read_cur_cfg_page(mpt, i,
710 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
711 		    FALSE, 5000);
712 		if (rv) {
713 			mpt_prt(mpt,
714 			    "cannot read SPI Target %d Device Page 1\n", i);
715 			continue;
716 		}
717 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
718 		    "target %d page 1: Requested Params %x Configuration %x\n",
719 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
720 		    mpt->mpt_dev_page1[i].Configuration);
721 	}
722 	return (0);
723 }
724 
725 /*
726  * Validate SPI configuration information.
727  *
728  * In particular, validate SPI Port Page 1.
729  */
730 static int
731 mpt_set_initial_config_spi(struct mpt_softc *mpt)
732 {
733 	int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
734 	int error;
735 
736 	mpt->mpt_disc_enable = 0xff;
737 	mpt->mpt_tag_enable = 0;
738 
739 	if (mpt->mpt_port_page1.Configuration != pp1val) {
740 		CONFIG_PAGE_SCSI_PORT_1 tmp;
741 
742 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
743 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
744 		tmp = mpt->mpt_port_page1;
745 		tmp.Configuration = pp1val;
746 		error = mpt_write_cur_cfg_page(mpt, 0,
747 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
748 		if (error) {
749 			return (-1);
750 		}
751 		error = mpt_read_cur_cfg_page(mpt, 0,
752 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
753 		if (error) {
754 			return (-1);
755 		}
756 		if (tmp.Configuration != pp1val) {
757 			mpt_prt(mpt,
758 			    "failed to reset SPI Port Page 1 Config value\n");
759 			return (-1);
760 		}
761 		mpt->mpt_port_page1 = tmp;
762 	}
763 
764 	/*
765 	 * The purpose of this exercise is to get
766 	 * all targets back to async/narrow.
767 	 *
768 	 * We skip this step if the BIOS has already negotiated
769 	 * speeds with the targets and does not require us to
770 	 * do Domain Validation.
771 	 */
772 	i = mpt->mpt_port_page2.PortSettings &
773 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
774 	j = mpt->mpt_port_page2.PortFlags &
775 	    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
776 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* &&
777 	    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) {
778 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
779 		    "honoring BIOS transfer negotiations\n");
780 	} else {
781 		for (i = 0; i < 16; i++) {
782 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
783 			mpt->mpt_dev_page1[i].Configuration = 0;
784 			(void) mpt_update_spi_config(mpt, i);
785 		}
786 	}
787 	return (0);
788 }
789 
790 int
791 mpt_cam_enable(struct mpt_softc *mpt)
792 {
793 	if (mpt->is_fc) {
794 		if (mpt_read_config_info_fc(mpt)) {
795 			return (EIO);
796 		}
797 		if (mpt_set_initial_config_fc(mpt)) {
798 			return (EIO);
799 		}
800 	} else if (mpt->is_sas) {
801 		if (mpt_read_config_info_sas(mpt)) {
802 			return (EIO);
803 		}
804 		if (mpt_set_initial_config_sas(mpt)) {
805 			return (EIO);
806 		}
807 	} else if (mpt->is_spi) {
808 		if (mpt_read_config_info_spi(mpt)) {
809 			return (EIO);
810 		}
811 		if (mpt_set_initial_config_spi(mpt)) {
812 			return (EIO);
813 		}
814 	}
815 	/*
816 	 * If we're in target mode, hang out resources now
817 	 * so we don't cause the world to hang talking to us.
818 	 */
819 	if (mpt->role & MPT_ROLE_TARGET) {
820 		/*
821 		 * Try to add some target command resources
822 		 */
823 		if (mpt_add_target_commands(mpt) == FALSE) {
824 			return (ENOMEM);
825 		}
826 	}
827 	return (0);
828 }
829 
830 void
831 mpt_cam_detach(struct mpt_softc *mpt)
832 {
833 	mpt_handler_t handler;
834 
835 	mpt_terminate_recovery_thread(mpt);
836 
837 	handler.reply_handler = mpt_scsi_reply_handler;
838 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
839 			       scsi_io_handler_id);
840 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
841 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
842 			       scsi_tmf_handler_id);
843 	handler.reply_handler = mpt_fc_els_reply_handler;
844 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
845 			       fc_els_handler_id);
846 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
847 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
848 			       mpt->scsi_tgt_handler_id);
849 
850 	if (mpt->tmf_req != NULL) {
851 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
852 		mpt_free_request(mpt, mpt->tmf_req);
853 		mpt->tmf_req = NULL;
854 	}
855 
856 	if (mpt->sim != NULL) {
857 		MPTLOCK_2_CAMLOCK(mpt);
858 		xpt_free_path(mpt->path);
859 		xpt_bus_deregister(cam_sim_path(mpt->sim));
860 		cam_sim_free(mpt->sim, TRUE);
861 		mpt->sim = NULL;
862 		CAMLOCK_2_MPTLOCK(mpt);
863 	}
864 
865 	if (mpt->phydisk_sim != NULL) {
866 		MPTLOCK_2_CAMLOCK(mpt);
867 		xpt_free_path(mpt->phydisk_path);
868 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
869 		cam_sim_free(mpt->phydisk_sim, TRUE);
870 		mpt->phydisk_sim = NULL;
871 		CAMLOCK_2_MPTLOCK(mpt);
872 	}
873 }
874 
875 /* This routine is used after a system crash to dump core onto the swap device.
876  */
877 static void
878 mpt_poll(struct cam_sim *sim)
879 {
880 	struct mpt_softc *mpt;
881 
882 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
883 	MPT_LOCK(mpt);
884 	mpt_intr(mpt);
885 	MPT_UNLOCK(mpt);
886 }
887 
888 /*
889  * Watchdog timeout routine for SCSI requests.
890  */
891 static void
892 mpt_timeout(void *arg)
893 {
894 	union ccb	 *ccb;
895 	struct mpt_softc *mpt;
896 	request_t	 *req;
897 
898 	ccb = (union ccb *)arg;
899 	mpt = ccb->ccb_h.ccb_mpt_ptr;
900 
901 	MPT_LOCK(mpt);
902 	req = ccb->ccb_h.ccb_req_ptr;
903 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
904 	    req->serno, ccb, req->ccb);
905 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
906 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
907 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
908 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
909 		req->state |= REQ_STATE_TIMEDOUT;
910 		mpt_wakeup_recovery_thread(mpt);
911 	}
912 	MPT_UNLOCK(mpt);
913 }
914 
915 /*
916  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
917  *
918  * Takes a list of physical segments and builds the SGL for SCSI IO command
919  * and forwards the commard to the IOC after one last check that CAM has not
920  * aborted the transaction.
921  */
922 static void
923 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
924 {
925 	request_t *req, *trq;
926 	char *mpt_off;
927 	union ccb *ccb;
928 	struct mpt_softc *mpt;
929 	int seg, first_lim;
930 	uint32_t flags, nxt_off;
931 	void *sglp = NULL;
932 	MSG_REQUEST_HEADER *hdrp;
933 	SGE_SIMPLE64 *se;
934 	SGE_CHAIN64 *ce;
935 	int istgt = 0;
936 
937 	req = (request_t *)arg;
938 	ccb = req->ccb;
939 
940 	mpt = ccb->ccb_h.ccb_mpt_ptr;
941 	req = ccb->ccb_h.ccb_req_ptr;
942 
943 	hdrp = req->req_vbuf;
944 	mpt_off = req->req_vbuf;
945 
946 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
947 		error = EFBIG;
948 	}
949 
950 	if (error == 0) {
951 		switch (hdrp->Function) {
952 		case MPI_FUNCTION_SCSI_IO_REQUEST:
953 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
954 			istgt = 0;
955 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
956 			break;
957 		case MPI_FUNCTION_TARGET_ASSIST:
958 			istgt = 1;
959 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
960 			break;
961 		default:
962 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
963 			    hdrp->Function);
964 			error = EINVAL;
965 			break;
966 		}
967 	}
968 
969 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
970 		error = EFBIG;
971 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
972 		    nseg, mpt->max_seg_cnt);
973 	}
974 
975 bad:
976 	if (error != 0) {
977 		if (error != EFBIG && error != ENOMEM) {
978 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
979 		}
980 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
981 			cam_status status;
982 			mpt_freeze_ccb(ccb);
983 			if (error == EFBIG) {
984 				status = CAM_REQ_TOO_BIG;
985 			} else if (error == ENOMEM) {
986 				if (mpt->outofbeer == 0) {
987 					mpt->outofbeer = 1;
988 					xpt_freeze_simq(mpt->sim, 1);
989 					mpt_lprt(mpt, MPT_PRT_DEBUG,
990 					    "FREEZEQ\n");
991 				}
992 				status = CAM_REQUEUE_REQ;
993 			} else {
994 				status = CAM_REQ_CMP_ERR;
995 			}
996 			mpt_set_ccb_status(ccb, status);
997 		}
998 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
999 			request_t *cmd_req =
1000 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1001 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1002 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1003 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1004 		}
1005 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1006 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1007 		xpt_done(ccb);
1008 		CAMLOCK_2_MPTLOCK(mpt);
1009 		mpt_free_request(mpt, req);
1010 		MPTLOCK_2_CAMLOCK(mpt);
1011 		return;
1012 	}
1013 
1014 	/*
1015 	 * No data to transfer?
1016 	 * Just make a single simple SGL with zero length.
1017 	 */
1018 
1019 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1020 		int tidx = ((char *)sglp) - mpt_off;
1021 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1022 	}
1023 
1024 	if (nseg == 0) {
1025 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1026 		MPI_pSGE_SET_FLAGS(se1,
1027 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1028 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1029 		goto out;
1030 	}
1031 
1032 
1033 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1034 	if (istgt == 0) {
1035 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1036 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1037 		}
1038 	} else {
1039 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1040 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1041 		}
1042 	}
1043 
1044 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1045 		bus_dmasync_op_t op;
1046 		if (istgt == 0) {
1047 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1048 				op = BUS_DMASYNC_PREREAD;
1049 			} else {
1050 				op = BUS_DMASYNC_PREWRITE;
1051 			}
1052 		} else {
1053 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1054 				op = BUS_DMASYNC_PREWRITE;
1055 			} else {
1056 				op = BUS_DMASYNC_PREREAD;
1057 			}
1058 		}
1059 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1060 	}
1061 
1062 	/*
1063 	 * Okay, fill in what we can at the end of the command frame.
1064 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1065 	 * the command frame.
1066 	 *
1067 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1068 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1069 	 * that.
1070 	 */
1071 
1072 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1073 		first_lim = nseg;
1074 	} else {
1075 		/*
1076 		 * Leave room for CHAIN element
1077 		 */
1078 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1079 	}
1080 
1081 	se = (SGE_SIMPLE64 *) sglp;
1082 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1083 		uint32_t tf;
1084 
1085 		memset(se, 0, sizeof (*se));
1086 		se->Address.Low = dm_segs->ds_addr;
1087 		if (sizeof(bus_addr_t) > 4) {
1088 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
1089 		}
1090 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1091 		tf = flags;
1092 		if (seg == first_lim - 1) {
1093 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1094 		}
1095 		if (seg == nseg - 1) {
1096 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1097 				MPI_SGE_FLAGS_END_OF_BUFFER;
1098 		}
1099 		MPI_pSGE_SET_FLAGS(se, tf);
1100 	}
1101 
1102 	if (seg == nseg) {
1103 		goto out;
1104 	}
1105 
1106 	/*
1107 	 * Tell the IOC where to find the first chain element.
1108 	 */
1109 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1110 	nxt_off = MPT_RQSL(mpt);
1111 	trq = req;
1112 
1113 	/*
1114 	 * Make up the rest of the data segments out of a chain element
1115 	 * (contiained in the current request frame) which points to
1116 	 * SIMPLE64 elements in the next request frame, possibly ending
1117 	 * with *another* chain element (if there's more).
1118 	 */
1119 	while (seg < nseg) {
1120 		int this_seg_lim;
1121 		uint32_t tf, cur_off;
1122 		bus_addr_t chain_list_addr;
1123 
1124 		/*
1125 		 * Point to the chain descriptor. Note that the chain
1126 		 * descriptor is at the end of the *previous* list (whether
1127 		 * chain or simple).
1128 		 */
1129 		ce = (SGE_CHAIN64 *) se;
1130 
1131 		/*
1132 		 * Before we change our current pointer, make  sure we won't
1133 		 * overflow the request area with this frame. Note that we
1134 		 * test against 'greater than' here as it's okay in this case
1135 		 * to have next offset be just outside the request area.
1136 		 */
1137 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1138 			nxt_off = MPT_REQUEST_AREA;
1139 			goto next_chain;
1140 		}
1141 
1142 		/*
1143 		 * Set our SGE element pointer to the beginning of the chain
1144 		 * list and update our next chain list offset.
1145 		 */
1146 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1147 		cur_off = nxt_off;
1148 		nxt_off += MPT_RQSL(mpt);
1149 
1150 		/*
1151 		 * Now initialized the chain descriptor.
1152 		 */
1153 		memset(ce, 0, sizeof (*ce));
1154 
1155 		/*
1156 		 * Get the physical address of the chain list.
1157 		 */
1158 		chain_list_addr = trq->req_pbuf;
1159 		chain_list_addr += cur_off;
1160 		if (sizeof (bus_addr_t) > 4) {
1161 			ce->Address.High =
1162 			    (uint32_t) ((uint64_t)chain_list_addr >> 32);
1163 		}
1164 		ce->Address.Low = (uint32_t) chain_list_addr;
1165 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1166 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1167 
1168 		/*
1169 		 * If we have more than a frame's worth of segments left,
1170 		 * set up the chain list to have the last element be another
1171 		 * chain descriptor.
1172 		 */
1173 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1174 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1175 			/*
1176 			 * The length of the chain is the length in bytes of the
1177 			 * number of segments plus the next chain element.
1178 			 *
1179 			 * The next chain descriptor offset is the length,
1180 			 * in words, of the number of segments.
1181 			 */
1182 			ce->Length = (this_seg_lim - seg) *
1183 			    sizeof (SGE_SIMPLE64);
1184 			ce->NextChainOffset = ce->Length >> 2;
1185 			ce->Length += sizeof (SGE_CHAIN64);
1186 		} else {
1187 			this_seg_lim = nseg;
1188 			ce->Length = (this_seg_lim - seg) *
1189 			    sizeof (SGE_SIMPLE64);
1190 		}
1191 
1192 		/*
1193 		 * Fill in the chain list SGE elements with our segment data.
1194 		 *
1195 		 * If we're the last element in this chain list, set the last
1196 		 * element flag. If we're the completely last element period,
1197 		 * set the end of list and end of buffer flags.
1198 		 */
1199 		while (seg < this_seg_lim) {
1200 			memset(se, 0, sizeof (*se));
1201 			se->Address.Low = dm_segs->ds_addr;
1202 			if (sizeof (bus_addr_t) > 4) {
1203 				se->Address.High =
1204 				    ((uint64_t)dm_segs->ds_addr) >> 32;
1205 			}
1206 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1207 			tf = flags;
1208 			if (seg ==  this_seg_lim - 1) {
1209 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1210 			}
1211 			if (seg == nseg - 1) {
1212 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1213 					MPI_SGE_FLAGS_END_OF_BUFFER;
1214 			}
1215 			MPI_pSGE_SET_FLAGS(se, tf);
1216 			se++;
1217 			seg++;
1218 			dm_segs++;
1219 		}
1220 
1221     next_chain:
1222 		/*
1223 		 * If we have more segments to do and we've used up all of
1224 		 * the space in a request area, go allocate another one
1225 		 * and chain to that.
1226 		 */
1227 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1228 			request_t *nrq;
1229 
1230 			CAMLOCK_2_MPTLOCK(mpt);
1231 			nrq = mpt_get_request(mpt, FALSE);
1232 			MPTLOCK_2_CAMLOCK(mpt);
1233 
1234 			if (nrq == NULL) {
1235 				error = ENOMEM;
1236 				goto bad;
1237 			}
1238 
1239 			/*
1240 			 * Append the new request area on the tail of our list.
1241 			 */
1242 			if ((trq = req->chain) == NULL) {
1243 				req->chain = nrq;
1244 			} else {
1245 				while (trq->chain != NULL) {
1246 					trq = trq->chain;
1247 				}
1248 				trq->chain = nrq;
1249 			}
1250 			trq = nrq;
1251 			mpt_off = trq->req_vbuf;
1252 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1253 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1254 			}
1255 			nxt_off = 0;
1256 		}
1257 	}
1258 out:
1259 
1260 	/*
1261 	 * Last time we need to check if this CCB needs to be aborted.
1262 	 */
1263 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1264 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1265 			request_t *cmd_req =
1266 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1267 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1268 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1269 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1270 		}
1271 		mpt_prt(mpt,
1272 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1273 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1274 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1275 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1276 		}
1277 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1278 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1279 		xpt_done(ccb);
1280 		CAMLOCK_2_MPTLOCK(mpt);
1281 		mpt_free_request(mpt, req);
1282 		MPTLOCK_2_CAMLOCK(mpt);
1283 		return;
1284 	}
1285 
1286 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1287 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1288 		ccb->ccb_h.timeout_ch =
1289 			timeout(mpt_timeout, (caddr_t)ccb,
1290 				(ccb->ccb_h.timeout * hz) / 1000);
1291 	} else {
1292 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1293 	}
1294 	if (mpt->verbose > MPT_PRT_DEBUG) {
1295 		int nc = 0;
1296 		mpt_print_request(req->req_vbuf);
1297 		for (trq = req->chain; trq; trq = trq->chain) {
1298 			printf("  Additional Chain Area %d\n", nc++);
1299 			mpt_dump_sgl(trq->req_vbuf, 0);
1300 		}
1301 	}
1302 
1303 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1304 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1305 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1306 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1307 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1308 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1309 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1310 		} else {
1311 			tgt->state = TGT_STATE_MOVING_DATA;
1312 		}
1313 #else
1314 		tgt->state = TGT_STATE_MOVING_DATA;
1315 #endif
1316 	}
1317 	CAMLOCK_2_MPTLOCK(mpt);
1318 	mpt_send_cmd(mpt, req);
1319 	MPTLOCK_2_CAMLOCK(mpt);
1320 }
1321 
1322 static void
1323 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1324 {
1325 	request_t *req, *trq;
1326 	char *mpt_off;
1327 	union ccb *ccb;
1328 	struct mpt_softc *mpt;
1329 	int seg, first_lim;
1330 	uint32_t flags, nxt_off;
1331 	void *sglp = NULL;
1332 	MSG_REQUEST_HEADER *hdrp;
1333 	SGE_SIMPLE32 *se;
1334 	SGE_CHAIN32 *ce;
1335 	int istgt = 0;
1336 
1337 	req = (request_t *)arg;
1338 	ccb = req->ccb;
1339 
1340 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1341 	req = ccb->ccb_h.ccb_req_ptr;
1342 
1343 	hdrp = req->req_vbuf;
1344 	mpt_off = req->req_vbuf;
1345 
1346 
1347 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1348 		error = EFBIG;
1349 	}
1350 
1351 	if (error == 0) {
1352 		switch (hdrp->Function) {
1353 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1354 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1355 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1356 			break;
1357 		case MPI_FUNCTION_TARGET_ASSIST:
1358 			istgt = 1;
1359 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1360 			break;
1361 		default:
1362 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1363 			    hdrp->Function);
1364 			error = EINVAL;
1365 			break;
1366 		}
1367 	}
1368 
1369 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1370 		error = EFBIG;
1371 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1372 		    nseg, mpt->max_seg_cnt);
1373 	}
1374 
1375 bad:
1376 	if (error != 0) {
1377 		if (error != EFBIG && error != ENOMEM) {
1378 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1379 		}
1380 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1381 			cam_status status;
1382 			mpt_freeze_ccb(ccb);
1383 			if (error == EFBIG) {
1384 				status = CAM_REQ_TOO_BIG;
1385 			} else if (error == ENOMEM) {
1386 				if (mpt->outofbeer == 0) {
1387 					mpt->outofbeer = 1;
1388 					xpt_freeze_simq(mpt->sim, 1);
1389 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1390 					    "FREEZEQ\n");
1391 				}
1392 				status = CAM_REQUEUE_REQ;
1393 			} else {
1394 				status = CAM_REQ_CMP_ERR;
1395 			}
1396 			mpt_set_ccb_status(ccb, status);
1397 		}
1398 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1399 			request_t *cmd_req =
1400 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1401 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1402 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1403 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1404 		}
1405 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1406 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1407 		xpt_done(ccb);
1408 		CAMLOCK_2_MPTLOCK(mpt);
1409 		mpt_free_request(mpt, req);
1410 		MPTLOCK_2_CAMLOCK(mpt);
1411 		return;
1412 	}
1413 
1414 	/*
1415 	 * No data to transfer?
1416 	 * Just make a single simple SGL with zero length.
1417 	 */
1418 
1419 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1420 		int tidx = ((char *)sglp) - mpt_off;
1421 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1422 	}
1423 
1424 	if (nseg == 0) {
1425 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1426 		MPI_pSGE_SET_FLAGS(se1,
1427 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1428 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1429 		goto out;
1430 	}
1431 
1432 
1433 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1434 	if (istgt == 0) {
1435 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1436 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1437 		}
1438 	} else {
1439 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1440 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1441 		}
1442 	}
1443 
1444 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1445 		bus_dmasync_op_t op;
1446 		if (istgt) {
1447 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1448 				op = BUS_DMASYNC_PREREAD;
1449 			} else {
1450 				op = BUS_DMASYNC_PREWRITE;
1451 			}
1452 		} else {
1453 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1454 				op = BUS_DMASYNC_PREWRITE;
1455 			} else {
1456 				op = BUS_DMASYNC_PREREAD;
1457 			}
1458 		}
1459 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1460 	}
1461 
1462 	/*
1463 	 * Okay, fill in what we can at the end of the command frame.
1464 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1465 	 * the command frame.
1466 	 *
1467 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1468 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1469 	 * that.
1470 	 */
1471 
1472 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1473 		first_lim = nseg;
1474 	} else {
1475 		/*
1476 		 * Leave room for CHAIN element
1477 		 */
1478 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1479 	}
1480 
1481 	se = (SGE_SIMPLE32 *) sglp;
1482 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1483 		uint32_t tf;
1484 
1485 		memset(se, 0,sizeof (*se));
1486 		se->Address = dm_segs->ds_addr;
1487 
1488 
1489 
1490 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1491 		tf = flags;
1492 		if (seg == first_lim - 1) {
1493 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1494 		}
1495 		if (seg == nseg - 1) {
1496 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1497 				MPI_SGE_FLAGS_END_OF_BUFFER;
1498 		}
1499 		MPI_pSGE_SET_FLAGS(se, tf);
1500 	}
1501 
1502 	if (seg == nseg) {
1503 		goto out;
1504 	}
1505 
1506 	/*
1507 	 * Tell the IOC where to find the first chain element.
1508 	 */
1509 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1510 	nxt_off = MPT_RQSL(mpt);
1511 	trq = req;
1512 
1513 	/*
1514 	 * Make up the rest of the data segments out of a chain element
1515 	 * (contiained in the current request frame) which points to
1516 	 * SIMPLE32 elements in the next request frame, possibly ending
1517 	 * with *another* chain element (if there's more).
1518 	 */
1519 	while (seg < nseg) {
1520 		int this_seg_lim;
1521 		uint32_t tf, cur_off;
1522 		bus_addr_t chain_list_addr;
1523 
1524 		/*
1525 		 * Point to the chain descriptor. Note that the chain
1526 		 * descriptor is at the end of the *previous* list (whether
1527 		 * chain or simple).
1528 		 */
1529 		ce = (SGE_CHAIN32 *) se;
1530 
1531 		/*
1532 		 * Before we change our current pointer, make  sure we won't
1533 		 * overflow the request area with this frame. Note that we
1534 		 * test against 'greater than' here as it's okay in this case
1535 		 * to have next offset be just outside the request area.
1536 		 */
1537 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1538 			nxt_off = MPT_REQUEST_AREA;
1539 			goto next_chain;
1540 		}
1541 
1542 		/*
1543 		 * Set our SGE element pointer to the beginning of the chain
1544 		 * list and update our next chain list offset.
1545 		 */
1546 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1547 		cur_off = nxt_off;
1548 		nxt_off += MPT_RQSL(mpt);
1549 
1550 		/*
1551 		 * Now initialized the chain descriptor.
1552 		 */
1553 		memset(ce, 0, sizeof (*ce));
1554 
1555 		/*
1556 		 * Get the physical address of the chain list.
1557 		 */
1558 		chain_list_addr = trq->req_pbuf;
1559 		chain_list_addr += cur_off;
1560 
1561 
1562 
1563 		ce->Address = chain_list_addr;
1564 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1565 
1566 
1567 		/*
1568 		 * If we have more than a frame's worth of segments left,
1569 		 * set up the chain list to have the last element be another
1570 		 * chain descriptor.
1571 		 */
1572 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1573 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1574 			/*
1575 			 * The length of the chain is the length in bytes of the
1576 			 * number of segments plus the next chain element.
1577 			 *
1578 			 * The next chain descriptor offset is the length,
1579 			 * in words, of the number of segments.
1580 			 */
1581 			ce->Length = (this_seg_lim - seg) *
1582 			    sizeof (SGE_SIMPLE32);
1583 			ce->NextChainOffset = ce->Length >> 2;
1584 			ce->Length += sizeof (SGE_CHAIN32);
1585 		} else {
1586 			this_seg_lim = nseg;
1587 			ce->Length = (this_seg_lim - seg) *
1588 			    sizeof (SGE_SIMPLE32);
1589 		}
1590 
1591 		/*
1592 		 * Fill in the chain list SGE elements with our segment data.
1593 		 *
1594 		 * If we're the last element in this chain list, set the last
1595 		 * element flag. If we're the completely last element period,
1596 		 * set the end of list and end of buffer flags.
1597 		 */
1598 		while (seg < this_seg_lim) {
1599 			memset(se, 0, sizeof (*se));
1600 			se->Address = dm_segs->ds_addr;
1601 
1602 
1603 
1604 
1605 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1606 			tf = flags;
1607 			if (seg ==  this_seg_lim - 1) {
1608 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1609 			}
1610 			if (seg == nseg - 1) {
1611 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1612 					MPI_SGE_FLAGS_END_OF_BUFFER;
1613 			}
1614 			MPI_pSGE_SET_FLAGS(se, tf);
1615 			se++;
1616 			seg++;
1617 			dm_segs++;
1618 		}
1619 
1620     next_chain:
1621 		/*
1622 		 * If we have more segments to do and we've used up all of
1623 		 * the space in a request area, go allocate another one
1624 		 * and chain to that.
1625 		 */
1626 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1627 			request_t *nrq;
1628 
1629 			CAMLOCK_2_MPTLOCK(mpt);
1630 			nrq = mpt_get_request(mpt, FALSE);
1631 			MPTLOCK_2_CAMLOCK(mpt);
1632 
1633 			if (nrq == NULL) {
1634 				error = ENOMEM;
1635 				goto bad;
1636 			}
1637 
1638 			/*
1639 			 * Append the new request area on the tail of our list.
1640 			 */
1641 			if ((trq = req->chain) == NULL) {
1642 				req->chain = nrq;
1643 			} else {
1644 				while (trq->chain != NULL) {
1645 					trq = trq->chain;
1646 				}
1647 				trq->chain = nrq;
1648 			}
1649 			trq = nrq;
1650 			mpt_off = trq->req_vbuf;
1651 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1652 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1653 			}
1654 			nxt_off = 0;
1655 		}
1656 	}
1657 out:
1658 
1659 	/*
1660 	 * Last time we need to check if this CCB needs to be aborted.
1661 	 */
1662 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1663 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1664 			request_t *cmd_req =
1665 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1666 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1667 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1668 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1669 		}
1670 		mpt_prt(mpt,
1671 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1672 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1673 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1674 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1675 		}
1676 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1677 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1678 		xpt_done(ccb);
1679 		CAMLOCK_2_MPTLOCK(mpt);
1680 		mpt_free_request(mpt, req);
1681 		MPTLOCK_2_CAMLOCK(mpt);
1682 		return;
1683 	}
1684 
1685 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1686 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1687 		ccb->ccb_h.timeout_ch =
1688 			timeout(mpt_timeout, (caddr_t)ccb,
1689 				(ccb->ccb_h.timeout * hz) / 1000);
1690 	} else {
1691 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1692 	}
1693 	if (mpt->verbose > MPT_PRT_DEBUG) {
1694 		int nc = 0;
1695 		mpt_print_request(req->req_vbuf);
1696 		for (trq = req->chain; trq; trq = trq->chain) {
1697 			printf("  Additional Chain Area %d\n", nc++);
1698 			mpt_dump_sgl(trq->req_vbuf, 0);
1699 		}
1700 	}
1701 
1702 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1703 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1704 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1705 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1706 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1707 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1708 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1709 		} else {
1710 			tgt->state = TGT_STATE_MOVING_DATA;
1711 		}
1712 #else
1713 		tgt->state = TGT_STATE_MOVING_DATA;
1714 #endif
1715 	}
1716 	CAMLOCK_2_MPTLOCK(mpt);
1717 	mpt_send_cmd(mpt, req);
1718 	MPTLOCK_2_CAMLOCK(mpt);
1719 }
1720 
1721 static void
1722 mpt_start(struct cam_sim *sim, union ccb *ccb)
1723 {
1724 	request_t *req;
1725 	struct mpt_softc *mpt;
1726 	MSG_SCSI_IO_REQUEST *mpt_req;
1727 	struct ccb_scsiio *csio = &ccb->csio;
1728 	struct ccb_hdr *ccbh = &ccb->ccb_h;
1729 	bus_dmamap_callback_t *cb;
1730 	target_id_t tgt;
1731 	int raid_passthru;
1732 
1733 	/* Get the pointer for the physical addapter */
1734 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1735 	raid_passthru = (sim == mpt->phydisk_sim);
1736 
1737 	CAMLOCK_2_MPTLOCK(mpt);
1738 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
1739 		if (mpt->outofbeer == 0) {
1740 			mpt->outofbeer = 1;
1741 			xpt_freeze_simq(mpt->sim, 1);
1742 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1743 		}
1744 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1745 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
1746 		MPTLOCK_2_CAMLOCK(mpt);
1747 		xpt_done(ccb);
1748 		return;
1749 	}
1750 #ifdef	INVARIANTS
1751 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
1752 #endif
1753 	MPTLOCK_2_CAMLOCK(mpt);
1754 
1755 	if (sizeof (bus_addr_t) > 4) {
1756 		cb = mpt_execute_req_a64;
1757 	} else {
1758 		cb = mpt_execute_req;
1759 	}
1760 
1761 	/*
1762 	 * Link the ccb and the request structure so we can find
1763 	 * the other knowing either the request or the ccb
1764 	 */
1765 	req->ccb = ccb;
1766 	ccb->ccb_h.ccb_req_ptr = req;
1767 
1768 	/* Now we build the command for the IOC */
1769 	mpt_req = req->req_vbuf;
1770 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
1771 
1772 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1773 	if (raid_passthru) {
1774 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1775 		CAMLOCK_2_MPTLOCK(mpt);
1776 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1777 			MPTLOCK_2_CAMLOCK(mpt);
1778 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1779 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
1780 			xpt_done(ccb);
1781 			return;
1782 		}
1783 		MPTLOCK_2_CAMLOCK(mpt);
1784 		mpt_req->Bus = 0;	/* we never set bus here */
1785 	} else {
1786 		tgt = ccb->ccb_h.target_id;
1787 		mpt_req->Bus = 0;	/* XXX */
1788 
1789 	}
1790 	mpt_req->SenseBufferLength =
1791 		(csio->sense_len < MPT_SENSE_SIZE) ?
1792 		 csio->sense_len : MPT_SENSE_SIZE;
1793 
1794 	/*
1795 	 * We use the message context to find the request structure when we
1796 	 * Get the command completion interrupt from the IOC.
1797 	 */
1798 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1799 
1800 	/* Which physical device to do the I/O on */
1801 	mpt_req->TargetID = tgt;
1802 
1803 	/* We assume a single level LUN type */
1804 	if (ccb->ccb_h.target_lun >= 256) {
1805 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1806 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1807 	} else {
1808 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1809 	}
1810 
1811 	/* Set the direction of the transfer */
1812 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1813 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1814 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1815 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1816 	} else {
1817 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1818 	}
1819 
1820 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1821 		switch(ccb->csio.tag_action) {
1822 		case MSG_HEAD_OF_Q_TAG:
1823 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
1824 			break;
1825 		case MSG_ACA_TASK:
1826 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
1827 			break;
1828 		case MSG_ORDERED_Q_TAG:
1829 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
1830 			break;
1831 		case MSG_SIMPLE_Q_TAG:
1832 		default:
1833 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1834 			break;
1835 		}
1836 	} else {
1837 		if (mpt->is_fc || mpt->is_sas) {
1838 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1839 		} else {
1840 			/* XXX No such thing for a target doing packetized. */
1841 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
1842 		}
1843 	}
1844 
1845 	if (mpt->is_spi) {
1846 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1847 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
1848 		}
1849 	}
1850 
1851 	/* Copy the scsi command block into place */
1852 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1853 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
1854 	} else {
1855 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
1856 	}
1857 
1858 	mpt_req->CDBLength = csio->cdb_len;
1859 	mpt_req->DataLength = csio->dxfer_len;
1860 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
1861 
1862 	/*
1863 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
1864 	 */
1865 	if (mpt->verbose == MPT_PRT_DEBUG) {
1866 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
1867 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
1868 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
1869 		if (mpt_req->Control != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
1870 			mpt_prtc(mpt, "(%s %u byte%s ",
1871 			    (mpt_req->Control == MPI_SCSIIO_CONTROL_READ)?
1872 			    "read" : "write",  csio->dxfer_len,
1873 			    (csio->dxfer_len == 1)? ")" : "s)");
1874 		}
1875 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
1876 		    ccb->ccb_h.target_lun, req, req->serno);
1877 	}
1878 
1879 	/*
1880 	 * If we have any data to send with this command map it into bus space.
1881 	 */
1882 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1883 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1884 			/*
1885 			 * We've been given a pointer to a single buffer.
1886 			 */
1887 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1888 				/*
1889 				 * Virtual address that needs to translated into
1890 				 * one or more physical address ranges.
1891 				 */
1892 				int error;
1893 				int s = splsoftvm();
1894 				error = bus_dmamap_load(mpt->buffer_dmat,
1895 				    req->dmap, csio->data_ptr, csio->dxfer_len,
1896 				    cb, req, 0);
1897 				splx(s);
1898 				if (error == EINPROGRESS) {
1899 					/*
1900 					 * So as to maintain ordering,
1901 					 * freeze the controller queue
1902 					 * until our mapping is
1903 					 * returned.
1904 					 */
1905 					xpt_freeze_simq(mpt->sim, 1);
1906 					ccbh->status |= CAM_RELEASE_SIMQ;
1907 				}
1908 			} else {
1909 				/*
1910 				 * We have been given a pointer to single
1911 				 * physical buffer.
1912 				 */
1913 				struct bus_dma_segment seg;
1914 				seg.ds_addr =
1915 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1916 				seg.ds_len = csio->dxfer_len;
1917 				(*cb)(req, &seg, 1, 0);
1918 			}
1919 		} else {
1920 			/*
1921 			 * We have been given a list of addresses.
1922 			 * This case could be easily supported but they are not
1923 			 * currently generated by the CAM subsystem so there
1924 			 * is no point in wasting the time right now.
1925 			 */
1926 			struct bus_dma_segment *segs;
1927 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1928 				(*cb)(req, NULL, 0, EFAULT);
1929 			} else {
1930 				/* Just use the segments provided */
1931 				segs = (struct bus_dma_segment *)csio->data_ptr;
1932 				(*cb)(req, segs, csio->sglist_cnt, 0);
1933 			}
1934 		}
1935 	} else {
1936 		(*cb)(req, NULL, 0, 0);
1937 	}
1938 }
1939 
1940 static int
1941 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
1942     int sleep_ok)
1943 {
1944 	int   error;
1945 	uint16_t status;
1946 	uint8_t response;
1947 
1948 	error = mpt_scsi_send_tmf(mpt,
1949 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
1950 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
1951 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1952 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
1953 	    0,	/* XXX How do I get the channel ID? */
1954 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
1955 	    lun != CAM_LUN_WILDCARD ? lun : 0,
1956 	    0, sleep_ok);
1957 
1958 	if (error != 0) {
1959 		/*
1960 		 * mpt_scsi_send_tmf hard resets on failure, so no
1961 		 * need to do so here.
1962 		 */
1963 		mpt_prt(mpt,
1964 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
1965 		return (EIO);
1966 	}
1967 
1968 	/* Wait for bus reset to be processed by the IOC. */
1969 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
1970 	    REQ_STATE_DONE, sleep_ok, 5000);
1971 
1972 	status = mpt->tmf_req->IOCStatus;
1973 	response = mpt->tmf_req->ResponseCode;
1974 	mpt->tmf_req->state = REQ_STATE_FREE;
1975 
1976 	if (error) {
1977 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
1978 		    "Resetting controller.\n");
1979 		mpt_reset(mpt, TRUE);
1980 		return (ETIMEDOUT);
1981 	}
1982 
1983 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1984 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
1985 		    "Resetting controller.\n", status);
1986 		mpt_reset(mpt, TRUE);
1987 		return (EIO);
1988 	}
1989 
1990 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
1991 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
1992 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
1993 		    "Resetting controller.\n", response);
1994 		mpt_reset(mpt, TRUE);
1995 		return (EIO);
1996 	}
1997 	return (0);
1998 }
1999 
2000 static int
2001 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2002 {
2003 	int r = 0;
2004 	request_t *req;
2005 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2006 
2007  	req = mpt_get_request(mpt, FALSE);
2008 	if (req == NULL) {
2009 		return (ENOMEM);
2010 	}
2011 	fc = req->req_vbuf;
2012 	memset(fc, 0, sizeof(*fc));
2013 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2014 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2015 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2016 	mpt_send_cmd(mpt, req);
2017 	if (dowait) {
2018 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2019 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2020 		if (r == 0) {
2021 			mpt_free_request(mpt, req);
2022 		}
2023 	}
2024 	return (r);
2025 }
2026 
2027 static int
2028 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2029 	      MSG_EVENT_NOTIFY_REPLY *msg)
2030 {
2031 	switch(msg->Event & 0xFF) {
2032 	case MPI_EVENT_UNIT_ATTENTION:
2033 		mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
2034 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
2035 		break;
2036 
2037 	case MPI_EVENT_IOC_BUS_RESET:
2038 		/* We generated a bus reset */
2039 		mpt_prt(mpt, "IOC Bus Reset Port: %d\n",
2040 		    (msg->Data[0] >> 8) & 0xff);
2041 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2042 		break;
2043 
2044 	case MPI_EVENT_EXT_BUS_RESET:
2045 		/* Someone else generated a bus reset */
2046 		mpt_prt(mpt, "External Bus Reset Detected\n");
2047 		/*
2048 		 * These replies don't return EventData like the MPI
2049 		 * spec says they do
2050 		 */
2051 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2052 		break;
2053 
2054 	case MPI_EVENT_RESCAN:
2055 		/*
2056 		 * In general this means a device has been added to the loop.
2057 		 */
2058 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
2059 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
2060 		break;
2061 
2062 	case MPI_EVENT_LINK_STATUS_CHANGE:
2063 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2064 		    (msg->Data[1] >> 8) & 0xff,
2065 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
2066 		break;
2067 
2068 	case MPI_EVENT_LOOP_STATE_CHANGE:
2069 		switch ((msg->Data[0] >> 16) & 0xff) {
2070 		case 0x01:
2071 			mpt_prt(mpt,
2072 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2073 			    "(Loop Initialization)\n",
2074 			    (msg->Data[1] >> 8) & 0xff,
2075 			    (msg->Data[0] >> 8) & 0xff,
2076 			    (msg->Data[0]     ) & 0xff);
2077 			switch ((msg->Data[0] >> 8) & 0xff) {
2078 			case 0xF7:
2079 				if ((msg->Data[0] & 0xff) == 0xF7) {
2080 					mpt_prt(mpt, "Device needs AL_PA\n");
2081 				} else {
2082 					mpt_prt(mpt, "Device %02x doesn't like "
2083 					    "FC performance\n",
2084 					    msg->Data[0] & 0xFF);
2085 				}
2086 				break;
2087 			case 0xF8:
2088 				if ((msg->Data[0] & 0xff) == 0xF7) {
2089 					mpt_prt(mpt, "Device had loop failure "
2090 					    "at its receiver prior to acquiring"
2091 					    " AL_PA\n");
2092 				} else {
2093 					mpt_prt(mpt, "Device %02x detected loop"
2094 					    " failure at its receiver\n",
2095 					    msg->Data[0] & 0xFF);
2096 				}
2097 				break;
2098 			default:
2099 				mpt_prt(mpt, "Device %02x requests that device "
2100 				    "%02x reset itself\n",
2101 				    msg->Data[0] & 0xFF,
2102 				    (msg->Data[0] >> 8) & 0xFF);
2103 				break;
2104 			}
2105 			break;
2106 		case 0x02:
2107 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2108 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2109 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2110 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2111 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2112 			break;
2113 		case 0x03:
2114 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2115 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2116 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2117 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
2118 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
2119 			break;
2120 		default:
2121 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2122 			    "FC event (%02x %02x %02x)\n",
2123 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2124 			    (msg->Data[0] >> 16) & 0xff, /* Event */
2125 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2126 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2127 		}
2128 		break;
2129 
2130 	case MPI_EVENT_LOGOUT:
2131 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2132 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
2133 		break;
2134 	case MPI_EVENT_EVENT_CHANGE:
2135 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2136 		    "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
2137 		break;
2138 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2139 		/*
2140 		 * Devices are attachin'.....
2141 		 */
2142 		mpt_prt(mpt,
2143 		    "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n");
2144 		break;
2145 	default:
2146 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2147 		    msg->Event & 0xFF);
2148 		return (0);
2149 	}
2150 	return (1);
2151 }
2152 
2153 /*
2154  * Reply path for all SCSI I/O requests, called from our
2155  * interrupt handler by extracting our handler index from
2156  * the MsgContext field of the reply from the IOC.
2157  *
2158  * This routine is optimized for the common case of a
2159  * completion without error.  All exception handling is
2160  * offloaded to non-inlined helper routines to minimize
2161  * cache footprint.
2162  */
2163 static int
2164 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2165     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2166 {
2167 	MSG_SCSI_IO_REQUEST *scsi_req;
2168 	union ccb *ccb;
2169 	target_id_t tgt;
2170 
2171 	if (req->state == REQ_STATE_FREE) {
2172 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2173 		return (TRUE);
2174 	}
2175 
2176 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2177 	ccb = req->ccb;
2178 	if (ccb == NULL) {
2179 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2180 		    req, req->serno);
2181 		return (TRUE);
2182 	}
2183 
2184 	tgt = scsi_req->TargetID;
2185 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2186 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2187 
2188 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2189 		bus_dmasync_op_t op;
2190 
2191 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2192 			op = BUS_DMASYNC_POSTREAD;
2193 		else
2194 			op = BUS_DMASYNC_POSTWRITE;
2195 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2196 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2197 	}
2198 
2199 	if (reply_frame == NULL) {
2200 		/*
2201 		 * Context only reply, completion without error status.
2202 		 */
2203 		ccb->csio.resid = 0;
2204 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2205 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2206 	} else {
2207 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2208 	}
2209 
2210 	if (mpt->outofbeer) {
2211 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2212 		mpt->outofbeer = 0;
2213 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2214 	}
2215 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2216 		struct scsi_inquiry_data *iq =
2217 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2218 		if (scsi_req->Function ==
2219 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2220 			/*
2221 			 * Fake out the device type so that only the
2222 			 * pass-thru device will attach.
2223 			 */
2224 			iq->device &= ~0x1F;
2225 			iq->device |= T_NODEVICE;
2226 		}
2227 	}
2228 	if (mpt->verbose == MPT_PRT_DEBUG) {
2229 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2230 		    req, req->serno);
2231 	}
2232 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2233 	MPTLOCK_2_CAMLOCK(mpt);
2234 	xpt_done(ccb);
2235 	CAMLOCK_2_MPTLOCK(mpt);
2236 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2237 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2238 	} else {
2239 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2240 		    req, req->serno);
2241 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2242 	}
2243 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2244 	    ("CCB req needed wakeup"));
2245 #ifdef	INVARIANTS
2246 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2247 #endif
2248 	mpt_free_request(mpt, req);
2249 	return (TRUE);
2250 }
2251 
2252 static int
2253 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2254     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2255 {
2256 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2257 
2258 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2259 #ifdef	INVARIANTS
2260 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2261 #endif
2262 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2263 	/* Record IOC Status and Response Code of TMF for any waiters. */
2264 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2265 	req->ResponseCode = tmf_reply->ResponseCode;
2266 
2267 	mpt_lprt(mpt, MPT_PRT_INFO, "TMF complete: req %p:%u status 0x%x\n",
2268 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2269 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2270 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2271 		req->state |= REQ_STATE_DONE;
2272 		wakeup(req);
2273 	} else {
2274 		mpt->tmf_req->state = REQ_STATE_FREE;
2275 	}
2276 	return (TRUE);
2277 }
2278 
2279 /*
2280  * XXX: Move to definitions file
2281  */
2282 #define	ELS	0x22
2283 #define	FC4LS	0x32
2284 #define	ABTS	0x81
2285 #define	BA_ACC	0x84
2286 
2287 #define	LS_RJT	0x01
2288 #define	LS_ACC	0x02
2289 #define	PLOGI	0x03
2290 #define	LOGO	0x05
2291 #define SRR	0x14
2292 #define PRLI	0x20
2293 #define PRLO	0x21
2294 #define ADISC	0x52
2295 #define RSCN	0x61
2296 
2297 static void
2298 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2299     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2300 {
2301 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2302 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2303 
2304 	/*
2305 	 * We are going to reuse the ELS request to send this response back.
2306 	 */
2307 	rsp = &tmp;
2308 	memset(rsp, 0, sizeof(*rsp));
2309 
2310 #ifdef	USE_IMMEDIATE_LINK_DATA
2311 	/*
2312 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2313 	 */
2314 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2315 #endif
2316 	rsp->RspLength = length;
2317 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2318 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2319 
2320 	/*
2321 	 * Copy over information from the original reply frame to
2322 	 * it's correct place in the response.
2323 	 */
2324 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2325 
2326 	/*
2327 	 * And now copy back the temporary area to the original frame.
2328 	 */
2329 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2330 	rsp = req->req_vbuf;
2331 
2332 #ifdef	USE_IMMEDIATE_LINK_DATA
2333 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2334 #else
2335 {
2336 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2337 	bus_addr_t paddr = req->req_pbuf;
2338 	paddr += MPT_RQSL(mpt);
2339 
2340 	se->FlagsLength =
2341 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2342 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2343 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2344 		MPI_SGE_FLAGS_END_OF_LIST	|
2345 		MPI_SGE_FLAGS_END_OF_BUFFER;
2346 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2347 	se->FlagsLength |= (length);
2348 	se->Address = (uint32_t) paddr;
2349 }
2350 #endif
2351 
2352 	/*
2353 	 * Send it on...
2354 	 */
2355 	mpt_send_cmd(mpt, req);
2356 }
2357 
2358 static int
2359 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2360     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2361 {
2362 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2363 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2364 	U8 rctl;
2365 	U8 type;
2366 	U8 cmd;
2367 	U16 status = le16toh(reply_frame->IOCStatus);
2368 	U32 *elsbuf;
2369 	int ioindex;
2370 	int do_refresh = TRUE;
2371 
2372 #ifdef	INVARIANTS
2373 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2374 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2375 	    req, req->serno, rp->Function));
2376 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2377 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2378 	} else {
2379 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2380 	}
2381 #endif
2382 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2383 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2384 	    req, req->serno, reply_frame, reply_frame->Function);
2385 
2386 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2387 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2388 		    status, reply_frame->Function);
2389 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2390 			/*
2391 			 * XXX: to get around shutdown issue
2392 			 */
2393 			mpt->disabled = 1;
2394 			return (TRUE);
2395 		}
2396 		return (TRUE);
2397 	}
2398 
2399 	/*
2400 	 * If the function of a link service response, we recycle the
2401 	 * response to be a refresh for a new link service request.
2402 	 *
2403 	 * The request pointer is bogus in this case and we have to fetch
2404 	 * it based upon the TransactionContext.
2405 	 */
2406 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2407 		/* Freddie Uncle Charlie Katie */
2408 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2409 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2410 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2411 				break;
2412 			}
2413 
2414 		KASSERT(ioindex < mpt->els_cmds_allocated,
2415 		    ("can't find my mommie!"));
2416 
2417 		/* remove from active list as we're going to re-post it */
2418 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2419 		req->state &= ~REQ_STATE_QUEUED;
2420 		req->state |= REQ_STATE_DONE;
2421 		mpt_fc_post_els(mpt, req, ioindex);
2422 		return (TRUE);
2423 	}
2424 
2425 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2426 		/* remove from active list as we're done */
2427 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2428 		req->state &= ~REQ_STATE_QUEUED;
2429 		req->state |= REQ_STATE_DONE;
2430 		if (req->state & REQ_STATE_TIMEDOUT) {
2431 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2432 			    "Sync Primitive Send Completed After Timeout\n");
2433 			mpt_free_request(mpt, req);
2434 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2435 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2436 			    "Async Primitive Send Complete\n");
2437 			mpt_free_request(mpt, req);
2438 		} else {
2439 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2440 			    "Sync Primitive Send Complete- Waking Waiter\n");
2441 			wakeup(req);
2442 		}
2443 		return (TRUE);
2444 	}
2445 
2446 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2447 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2448 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2449 		    rp->MsgLength, rp->MsgFlags);
2450 		return (TRUE);
2451 	}
2452 
2453 	if (rp->MsgLength <= 5) {
2454 		/*
2455 		 * This is just a ack of an original ELS buffer post
2456 		 */
2457 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2458 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2459 		return (TRUE);
2460 	}
2461 
2462 
2463 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2464 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2465 
2466 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2467 	cmd = be32toh(elsbuf[0]) >> 24;
2468 
2469 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2470 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2471 		return (TRUE);
2472 	}
2473 
2474 	ioindex = le32toh(rp->TransactionContext);
2475 	req = mpt->els_cmd_ptrs[ioindex];
2476 
2477 	if (rctl == ELS && type == 1) {
2478 		switch (cmd) {
2479 		case PRLI:
2480 			/*
2481 			 * Send back a PRLI ACC
2482 			 */
2483 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2484 			    le32toh(rp->Wwn.PortNameHigh),
2485 			    le32toh(rp->Wwn.PortNameLow));
2486 			elsbuf[0] = htobe32(0x02100014);
2487 			elsbuf[1] |= htobe32(0x00000100);
2488 			elsbuf[4] = htobe32(0x00000002);
2489 			if (mpt->role & MPT_ROLE_TARGET)
2490 				elsbuf[4] |= htobe32(0x00000010);
2491 			if (mpt->role & MPT_ROLE_INITIATOR)
2492 				elsbuf[4] |= htobe32(0x00000020);
2493 			/* remove from active list as we're done */
2494 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2495 			req->state &= ~REQ_STATE_QUEUED;
2496 			req->state |= REQ_STATE_DONE;
2497 			mpt_fc_els_send_response(mpt, req, rp, 20);
2498 			do_refresh = FALSE;
2499 			break;
2500 		case PRLO:
2501 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2502 			elsbuf[0] = htobe32(0x02100014);
2503 			elsbuf[1] = htobe32(0x08000100);
2504 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2505 			    le32toh(rp->Wwn.PortNameHigh),
2506 			    le32toh(rp->Wwn.PortNameLow));
2507 			/* remove from active list as we're done */
2508 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2509 			req->state &= ~REQ_STATE_QUEUED;
2510 			req->state |= REQ_STATE_DONE;
2511 			mpt_fc_els_send_response(mpt, req, rp, 20);
2512 			do_refresh = FALSE;
2513 			break;
2514 		default:
2515 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2516 			break;
2517 		}
2518 	} else if (rctl == ABTS && type == 0) {
2519 		uint16_t rx_id = le16toh(rp->Rxid);
2520 		uint16_t ox_id = le16toh(rp->Oxid);
2521 		request_t *tgt_req = NULL;
2522 
2523 		mpt_prt(mpt,
2524 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2525 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2526 		    le32toh(rp->Wwn.PortNameLow));
2527 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2528 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2529 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2530 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2531 		} else {
2532 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2533 		}
2534 		if (tgt_req) {
2535 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2536 			uint8_t *vbuf;
2537 			union ccb *ccb = tgt->ccb;
2538 			uint32_t ct_id;
2539 
2540 			vbuf = tgt_req->req_vbuf;
2541 			vbuf += MPT_RQSL(mpt);
2542 
2543 			/*
2544 			 * Check to make sure we have the correct command
2545 			 * The reply descriptor in the target state should
2546 			 * should contain an IoIndex that should match the
2547 			 * RX_ID.
2548 			 *
2549 			 * It'd be nice to have OX_ID to crosscheck with
2550 			 * as well.
2551 			 */
2552 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2553 
2554 			if (ct_id != rx_id) {
2555 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2556 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2557 				    rx_id, ct_id);
2558 				goto skip;
2559 			}
2560 
2561 			ccb = tgt->ccb;
2562 			if (ccb) {
2563 				mpt_prt(mpt,
2564 				    "CCB (%p): lun %u flags %x status %x\n",
2565 				    ccb, ccb->ccb_h.target_lun,
2566 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2567 			}
2568 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2569 			    "%x nxfers %x\n", tgt->state,
2570 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2571 			    tgt->nxfers);
2572   skip:
2573 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2574 				mpt_prt(mpt, "unable to start TargetAbort\n");
2575 			}
2576 		} else {
2577 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2578 		}
2579 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2580 		elsbuf[0] = htobe32(0);
2581 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2582 		elsbuf[2] = htobe32(0x000ffff);
2583 		/*
2584 		 * Dork with the reply frame so that the reponse to it
2585 		 * will be correct.
2586 		 */
2587 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2588 		/* remove from active list as we're done */
2589 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2590 		req->state &= ~REQ_STATE_QUEUED;
2591 		req->state |= REQ_STATE_DONE;
2592 		mpt_fc_els_send_response(mpt, req, rp, 12);
2593 		do_refresh = FALSE;
2594 	} else {
2595 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2596 	}
2597 	if (do_refresh == TRUE) {
2598 		/* remove from active list as we're done */
2599 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2600 		req->state &= ~REQ_STATE_QUEUED;
2601 		req->state |= REQ_STATE_DONE;
2602 		mpt_fc_post_els(mpt, req, ioindex);
2603 	}
2604 	return (TRUE);
2605 }
2606 
2607 /*
2608  * Clean up all SCSI Initiator personality state in response
2609  * to a controller reset.
2610  */
2611 static void
2612 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2613 {
2614 	/*
2615 	 * The pending list is already run down by
2616 	 * the generic handler.  Perform the same
2617 	 * operation on the timed out request list.
2618 	 */
2619 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
2620 				   MPI_IOCSTATUS_INVALID_STATE);
2621 
2622 	/*
2623 	 * XXX: We need to repost ELS and Target Command Buffers?
2624 	 */
2625 
2626 	/*
2627 	 * Inform the XPT that a bus reset has occurred.
2628 	 */
2629 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
2630 }
2631 
2632 /*
2633  * Parse additional completion information in the reply
2634  * frame for SCSI I/O requests.
2635  */
2636 static int
2637 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
2638 			     MSG_DEFAULT_REPLY *reply_frame)
2639 {
2640 	union ccb *ccb;
2641 	MSG_SCSI_IO_REPLY *scsi_io_reply;
2642 	u_int ioc_status;
2643 	u_int sstate;
2644 	u_int loginfo;
2645 
2646 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
2647 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
2648 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
2649 		("MPT SCSI I/O Handler called with incorrect reply type"));
2650 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
2651 		("MPT SCSI I/O Handler called with continuation reply"));
2652 
2653 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
2654 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
2655 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
2656 	ioc_status &= MPI_IOCSTATUS_MASK;
2657 	sstate = scsi_io_reply->SCSIState;
2658 
2659 	ccb = req->ccb;
2660 	ccb->csio.resid =
2661 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
2662 
2663 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
2664 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
2665 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2666 		ccb->csio.sense_resid =
2667 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
2668 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
2669 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
2670 	}
2671 
2672 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
2673 		/*
2674 		 * Tag messages rejected, but non-tagged retry
2675 		 * was successful.
2676 XXXX
2677 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
2678 		 */
2679 	}
2680 
2681 	switch(ioc_status) {
2682 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2683 		/*
2684 		 * XXX
2685 		 * Linux driver indicates that a zero
2686 		 * transfer length with this error code
2687 		 * indicates a CRC error.
2688 		 *
2689 		 * No need to swap the bytes for checking
2690 		 * against zero.
2691 		 */
2692 		if (scsi_io_reply->TransferCount == 0) {
2693 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2694 			break;
2695 		}
2696 		/* FALLTHROUGH */
2697 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
2698 	case MPI_IOCSTATUS_SUCCESS:
2699 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
2700 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
2701 			/*
2702 			 * Status was never returned for this transaction.
2703 			 */
2704 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
2705 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
2706 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
2707 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
2708 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
2709 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
2710 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
2711 
2712 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
2713 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2714 		} else
2715 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2716 		break;
2717 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
2718 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
2719 		break;
2720 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
2721 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2722 		break;
2723 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2724 		/*
2725 		 * Since selection timeouts and "device really not
2726 		 * there" are grouped into this error code, report
2727 		 * selection timeout.  Selection timeouts are
2728 		 * typically retried before giving up on the device
2729 		 * whereas "device not there" errors are considered
2730 		 * unretryable.
2731 		 */
2732 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2733 		break;
2734 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2735 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
2736 		break;
2737 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
2738 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
2739 		break;
2740 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
2741 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
2742 		break;
2743 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2744 		ccb->ccb_h.status = CAM_UA_TERMIO;
2745 		break;
2746 	case MPI_IOCSTATUS_INVALID_STATE:
2747 		/*
2748 		 * The IOC has been reset.  Emulate a bus reset.
2749 		 */
2750 		/* FALLTHROUGH */
2751 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
2752 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2753 		break;
2754 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
2755 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
2756 		/*
2757 		 * Don't clobber any timeout status that has
2758 		 * already been set for this transaction.  We
2759 		 * want the SCSI layer to be able to differentiate
2760 		 * between the command we aborted due to timeout
2761 		 * and any innocent bystanders.
2762 		 */
2763 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
2764 			break;
2765 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
2766 		break;
2767 
2768 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2769 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
2770 		break;
2771 	case MPI_IOCSTATUS_BUSY:
2772 		mpt_set_ccb_status(ccb, CAM_BUSY);
2773 		break;
2774 	case MPI_IOCSTATUS_INVALID_FUNCTION:
2775 	case MPI_IOCSTATUS_INVALID_SGL:
2776 	case MPI_IOCSTATUS_INTERNAL_ERROR:
2777 	case MPI_IOCSTATUS_INVALID_FIELD:
2778 	default:
2779 		/* XXX
2780 		 * Some of the above may need to kick
2781 		 * of a recovery action!!!!
2782 		 */
2783 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2784 		break;
2785 	}
2786 
2787 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2788 		mpt_freeze_ccb(ccb);
2789 	}
2790 
2791 	return (TRUE);
2792 }
2793 
2794 static void
2795 mpt_action(struct cam_sim *sim, union ccb *ccb)
2796 {
2797 	struct mpt_softc *mpt;
2798 	struct ccb_trans_settings *cts;
2799 	target_id_t tgt;
2800 	lun_id_t lun;
2801 	int raid_passthru;
2802 
2803 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2804 
2805 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
2806 	KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
2807 	raid_passthru = (sim == mpt->phydisk_sim);
2808 
2809 	tgt = ccb->ccb_h.target_id;
2810 	lun = ccb->ccb_h.target_lun;
2811 	if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ &&
2812 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
2813 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
2814 		CAMLOCK_2_MPTLOCK(mpt);
2815 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2816 			MPTLOCK_2_CAMLOCK(mpt);
2817 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2818 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2819 			xpt_done(ccb);
2820 			return;
2821 		}
2822 		MPTLOCK_2_CAMLOCK(mpt);
2823 	}
2824 	ccb->ccb_h.ccb_mpt_ptr = mpt;
2825 
2826 	switch (ccb->ccb_h.func_code) {
2827 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2828 		/*
2829 		 * Do a couple of preliminary checks...
2830 		 */
2831 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2832 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2833 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2834 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2835 				break;
2836 			}
2837 		}
2838 		/* Max supported CDB length is 16 bytes */
2839 		/* XXX Unless we implement the new 32byte message type */
2840 		if (ccb->csio.cdb_len >
2841 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
2842 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2843 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2844 			break;
2845 		}
2846 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2847 		mpt_start(sim, ccb);
2848 		return;
2849 
2850 	case XPT_RESET_BUS:
2851 	case XPT_RESET_DEV:
2852 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2853 			ccb->ccb_h.func_code == XPT_RESET_BUS ?
2854 			"XPT_RESET_BUS\n" : "XPT_RESET_DEV\n");
2855 
2856 		CAMLOCK_2_MPTLOCK(mpt);
2857 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
2858 		MPTLOCK_2_CAMLOCK(mpt);
2859 
2860 		/*
2861 		 * mpt_bus_reset is always successful in that it
2862 		 * will fall back to a hard reset should a bus
2863 		 * reset attempt fail.
2864 		 */
2865 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2866 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2867 		break;
2868 
2869 	case XPT_ABORT:
2870 	{
2871 		union ccb *accb = ccb->cab.abort_ccb;
2872 		CAMLOCK_2_MPTLOCK(mpt);
2873 		switch (accb->ccb_h.func_code) {
2874 		case XPT_ACCEPT_TARGET_IO:
2875 		case XPT_IMMED_NOTIFY:
2876 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
2877 			break;
2878 		case XPT_CONT_TARGET_IO:
2879 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
2880 			ccb->ccb_h.status = CAM_UA_ABORT;
2881 			break;
2882 		case XPT_SCSI_IO:
2883 			ccb->ccb_h.status = CAM_UA_ABORT;
2884 			break;
2885 		default:
2886 			ccb->ccb_h.status = CAM_REQ_INVALID;
2887 			break;
2888 		}
2889 		MPTLOCK_2_CAMLOCK(mpt);
2890 		break;
2891 	}
2892 
2893 #ifdef	CAM_NEW_TRAN_CODE
2894 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
2895 #else
2896 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
2897 #endif
2898 #define	DP_DISC_ENABLE	0x1
2899 #define	DP_DISC_DISABL	0x2
2900 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
2901 
2902 #define	DP_TQING_ENABLE	0x4
2903 #define	DP_TQING_DISABL	0x8
2904 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
2905 
2906 #define	DP_WIDE		0x10
2907 #define	DP_NARROW	0x20
2908 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
2909 
2910 #define	DP_SYNC		0x40
2911 
2912 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2913 	{
2914 #ifdef	CAM_NEW_TRAN_CODE
2915 		struct ccb_trans_settings_scsi *scsi;
2916 		struct ccb_trans_settings_spi *spi;
2917 #endif
2918 		uint8_t dval;
2919 		u_int period;
2920 		u_int offset;
2921 		int i, j;
2922 
2923 		cts = &ccb->cts;
2924 
2925 		if (mpt->is_fc || mpt->is_sas) {
2926 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2927 			break;
2928 		}
2929 
2930 		/*
2931 		 * Skip attempting settings on RAID volume disks.
2932 		 * Other devices on the bus get the normal treatment.
2933 		 */
2934 		if (mpt->phydisk_sim && raid_passthru == 0 &&
2935 		    mpt_is_raid_volume(mpt, tgt) != 0) {
2936 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
2937 			    "skipping transfer settings for RAID volumes\n");
2938 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2939 			break;
2940 		}
2941 
2942 		i = mpt->mpt_port_page2.PortSettings &
2943 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
2944 		j = mpt->mpt_port_page2.PortFlags &
2945 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
2946 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
2947 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
2948 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
2949 			    "honoring BIOS transfer negotiations\n");
2950 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2951 			break;
2952 		}
2953 
2954 		dval = 0;
2955 		period = 0;
2956 		offset = 0;
2957 
2958 #ifndef	CAM_NEW_TRAN_CODE
2959 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
2960 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
2961 			    DP_DISC_ENABLE : DP_DISC_DISABL;
2962 		}
2963 
2964 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
2965 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
2966 			    DP_TQING_ENABLE : DP_TQING_DISABL;
2967 		}
2968 
2969 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
2970 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
2971 		}
2972 
2973 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2974 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
2975 			dval |= DP_SYNC;
2976 			period = cts->sync_period;
2977 			offset = cts->sync_offset;
2978 		}
2979 #else
2980 		scsi = &cts->proto_specific.scsi;
2981 		spi = &cts->xport_specific.spi;
2982 
2983 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2984 			dval |= (spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
2985 			    DP_DISC_ENABLE : DP_DISC_DISABL;
2986 		}
2987 
2988 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2989 			dval |= (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
2990 			    DP_TQING_ENABLE : DP_TQING_DISABL;
2991 		}
2992 
2993 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2994 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
2995 			    DP_WIDE : DP_NARROW;
2996 		}
2997 
2998 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2999 		    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
3000 		    (spi->sync_period && spi->sync_offset)) {
3001 			dval |= DP_SYNC;
3002 			period = spi->sync_period;
3003 			offset = spi->sync_offset;
3004 		}
3005 #endif
3006 		CAMLOCK_2_MPTLOCK(mpt);
3007 		if (dval & DP_DISC_ENABLE) {
3008 			mpt->mpt_disc_enable |= (1 << tgt);
3009 		} else if (dval & DP_DISC_DISABL) {
3010 			mpt->mpt_disc_enable &= ~(1 << tgt);
3011 		}
3012 		if (dval & DP_TQING_ENABLE) {
3013 			mpt->mpt_tag_enable |= (1 << tgt);
3014 		} else if (dval & DP_TQING_DISABL) {
3015 			mpt->mpt_tag_enable &= ~(1 << tgt);
3016 		}
3017 		if (dval & DP_WIDTH) {
3018 			mpt_setwidth(mpt, tgt, 1);
3019 		}
3020 		if (dval & DP_SYNC) {
3021 			mpt_setsync(mpt, tgt, period, offset);
3022 		}
3023 
3024 		if (mpt_update_spi_config(mpt, tgt)) {
3025 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3026 		} else {
3027 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3028 		}
3029 		MPTLOCK_2_CAMLOCK(mpt);
3030 		break;
3031 	}
3032 	case XPT_GET_TRAN_SETTINGS:
3033 		cts = &ccb->cts;
3034 		if (mpt->is_fc) {
3035 #ifndef	CAM_NEW_TRAN_CODE
3036 			/*
3037 			 * a lot of normal SCSI things don't make sense.
3038 			 */
3039 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3040 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3041 			/*
3042 			 * How do you measure the width of a high
3043 			 * speed serial bus? Well, in bytes.
3044 			 *
3045 			 * Offset and period make no sense, though, so we set
3046 			 * (above) a 'base' transfer speed to be gigabit.
3047 			 */
3048 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3049 #else
3050 			struct ccb_trans_settings_fc *fc =
3051 			    &cts->xport_specific.fc;
3052 
3053 			cts->protocol = PROTO_SCSI;
3054 			cts->protocol_version = SCSI_REV_2;
3055 			cts->transport = XPORT_FC;
3056 			cts->transport_version = 0;
3057 
3058 			fc->valid = CTS_FC_VALID_SPEED;
3059 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
3060 			/* XXX: need a port database for each target */
3061 #endif
3062 		} else if (mpt->is_sas) {
3063 #ifndef	CAM_NEW_TRAN_CODE
3064 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3065 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3066 			/*
3067 			 * How do you measure the width of a high
3068 			 * speed serial bus? Well, in bytes.
3069 			 *
3070 			 * Offset and period make no sense, though, so we set
3071 			 * (above) a 'base' transfer speed to be gigabit.
3072 			 */
3073 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3074 #else
3075 			struct ccb_trans_settings_sas *sas =
3076 			    &cts->xport_specific.sas;
3077 
3078 			cts->protocol = PROTO_SCSI;
3079 			cts->protocol_version = SCSI_REV_3;
3080 			cts->transport = XPORT_SAS;
3081 			cts->transport_version = 0;
3082 
3083 			sas->valid = CTS_SAS_VALID_SPEED;
3084 			sas->bitrate = 300000;	/* XXX: Default 3Gbps */
3085 #endif
3086 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3087 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3088 			break;
3089 		}
3090 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3091 		break;
3092 
3093 	case XPT_CALC_GEOMETRY:
3094 	{
3095 		struct ccb_calc_geometry *ccg;
3096 
3097 		ccg = &ccb->ccg;
3098 		if (ccg->block_size == 0) {
3099 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3100 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3101 			break;
3102 		}
3103 		mpt_calc_geometry(ccg, /*extended*/1);
3104 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3105 		break;
3106 	}
3107 	case XPT_PATH_INQ:		/* Path routing inquiry */
3108 	{
3109 		struct ccb_pathinq *cpi = &ccb->cpi;
3110 
3111 		cpi->version_num = 1;
3112 		cpi->target_sprt = 0;
3113 		cpi->hba_eng_cnt = 0;
3114 		cpi->max_target = mpt->mpt_max_devices - 1;
3115 		/*
3116 		 * XXX: FC cards report MAX_DEVICES of 512- but we
3117 		 * XXX: seem to hang when going higher than 255.
3118 		 */
3119 		if (cpi->max_target > 255)
3120 			cpi->max_target = 255;
3121 		/*
3122 		 * XXX: VMware ESX reports > 16 devices and then dies
3123 		 * XXX: when we probe.
3124 		 */
3125 		if (mpt->is_spi && cpi->max_target > 15)
3126 			cpi->max_target = 15;
3127 		cpi->max_lun = 7;
3128 		cpi->initiator_id = mpt->mpt_ini_id;
3129 
3130 		cpi->bus_id = cam_sim_bus(sim);
3131 		/*
3132 		 * Actual speed for each device varies.
3133 		 *
3134 		 * The base speed is the speed of the underlying connection.
3135 		 * This is strictly determined for SPI (async, narrow). If
3136 		 * link is up for Fibre Channel, then speed can be gotten
3137 		 * from that.
3138 		 */
3139 		if (mpt->is_fc) {
3140 			cpi->hba_misc = PIM_NOBUSRESET;
3141 			cpi->base_transfer_speed =
3142 			    mpt->mpt_fcport_speed * 100000;
3143 			cpi->hba_inquiry = PI_TAG_ABLE;
3144 		} else if (mpt->is_sas) {
3145 			cpi->hba_misc = PIM_NOBUSRESET;
3146 			cpi->base_transfer_speed = 300000;
3147 			cpi->hba_inquiry = PI_TAG_ABLE;
3148 		} else {
3149 			cpi->hba_misc = PIM_SEQSCAN;
3150 			cpi->base_transfer_speed = 3300;
3151 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3152 		}
3153 
3154 		/*
3155 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3156 		 * wide, restrict it to one lun and have it *not* be a bus
3157 		 * that can have a SCSI bus reset.
3158 		 */
3159 		if (raid_passthru) {
3160 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3161 			cpi->initiator_id = cpi->max_target + 1;
3162 			cpi->max_lun = 0;
3163 			cpi->hba_misc |= PIM_NOBUSRESET;
3164 		}
3165 
3166 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3167 			cpi->hba_misc |= PIM_NOINITIATOR;
3168 		}
3169 		if ((mpt->role & MPT_ROLE_TARGET) != 0) {
3170 			cpi->target_sprt =
3171 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3172 		} else {
3173 			cpi->target_sprt = 0;
3174 		}
3175 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3176 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3177 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3178 		cpi->unit_number = cam_sim_unit(sim);
3179 		cpi->ccb_h.status = CAM_REQ_CMP;
3180 		break;
3181 	}
3182 	case XPT_EN_LUN:		/* Enable LUN as a target */
3183 	{
3184 		int result;
3185 
3186 		CAMLOCK_2_MPTLOCK(mpt);
3187 		if (ccb->cel.enable)
3188 			result = mpt_enable_lun(mpt,
3189 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3190 		else
3191 			result = mpt_disable_lun(mpt,
3192 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3193 		MPTLOCK_2_CAMLOCK(mpt);
3194 		if (result == 0) {
3195 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3196 		} else {
3197 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3198 		}
3199 		break;
3200 	}
3201 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3202 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3203 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3204 	{
3205 		tgt_resource_t *trtp;
3206 		lun_id_t lun = ccb->ccb_h.target_lun;
3207 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3208 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3209 		ccb->ccb_h.flags = 0;
3210 
3211 		if (lun == CAM_LUN_WILDCARD) {
3212 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3213 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3214 				break;
3215 			}
3216 			trtp = &mpt->trt_wildcard;
3217 		} else if (lun >= MPT_MAX_LUNS) {
3218 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3219 			break;
3220 		} else {
3221 			trtp = &mpt->trt[lun];
3222 		}
3223 		CAMLOCK_2_MPTLOCK(mpt);
3224 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3225 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3226 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3227 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3228 			    sim_links.stqe);
3229 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3230 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3231 			    "Put FREE INOT lun %d\n", lun);
3232 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3233 			    sim_links.stqe);
3234 		} else {
3235 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3236 		}
3237 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3238 		MPTLOCK_2_CAMLOCK(mpt);
3239 		return;
3240 	}
3241 	case XPT_CONT_TARGET_IO:
3242 		CAMLOCK_2_MPTLOCK(mpt);
3243 		mpt_target_start_io(mpt, ccb);
3244 		MPTLOCK_2_CAMLOCK(mpt);
3245 		return;
3246 
3247 	default:
3248 		ccb->ccb_h.status = CAM_REQ_INVALID;
3249 		break;
3250 	}
3251 	xpt_done(ccb);
3252 }
3253 
3254 static int
3255 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3256 {
3257 #ifdef	CAM_NEW_TRAN_CODE
3258 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3259 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3260 #endif
3261 	target_id_t tgt;
3262 	uint8_t dval, pval, oval;
3263 	int rv;
3264 
3265 	if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3266 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3267 			return (-1);
3268 		}
3269 	} else {
3270 		tgt = cts->ccb_h.target_id;
3271 	}
3272 
3273 	/*
3274 	 * XXX: We aren't looking Port Page 2 BIOS settings here.
3275 	 * XXX: For goal settings, we pick the max from port page 0
3276 	 *
3277 	 * For current settings we read the current settings out from
3278 	 * device page 0 for that target.
3279 	 */
3280 	if (IS_CURRENT_SETTINGS(cts)) {
3281 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3282 		dval = 0;
3283 
3284 		CAMLOCK_2_MPTLOCK(mpt);
3285 		tmp = mpt->mpt_dev_page0[tgt];
3286 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3287 		    sizeof(tmp), FALSE, 5000);
3288 		if (rv) {
3289 			MPTLOCK_2_CAMLOCK(mpt);
3290 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3291 			return (rv);
3292 		}
3293 		MPTLOCK_2_CAMLOCK(mpt);
3294 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3295 		    DP_WIDE : DP_NARROW;
3296 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3297 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3298 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3299 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3300 		oval = (tmp.NegotiatedParameters >> 16) & 0xff;
3301 		pval = (tmp.NegotiatedParameters >>  8) & 0xff;
3302 		mpt->mpt_dev_page0[tgt] = tmp;
3303 	} else {
3304 		/*
3305 		 * XXX: Just make theoretical maximum.
3306 		 */
3307 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE;
3308 		oval = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
3309 		pval = (mpt->mpt_port_page0.Capabilities >>  8) & 0xff;
3310 	}
3311 #ifndef	CAM_NEW_TRAN_CODE
3312 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3313 	if (dval & DP_DISC_ENABLE) {
3314 		cts->flags |= CCB_TRANS_DISC_ENB;
3315 	}
3316 	if (dval & DP_TQING_ENABLE) {
3317 		cts->flags |= CCB_TRANS_TAG_ENB;
3318 	}
3319 	if (dval & DP_WIDE) {
3320 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3321 	} else {
3322 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3323 	}
3324 	cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
3325 	    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3326 	if (oval) {
3327 		cts->sync_period = pval;
3328 		cts->sync_offset = oval;
3329 		cts->valid |=
3330 		    CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID;
3331 	}
3332 #else
3333 	cts->protocol = PROTO_SCSI;
3334 	cts->protocol_version = SCSI_REV_2;
3335 	cts->transport = XPORT_SPI;
3336 	cts->transport_version = 2;
3337 
3338 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
3339 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
3340 	if (dval & DP_DISC_ENABLE) {
3341 		spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3342 	}
3343 	if (dval & DP_TQING_ENABLE) {
3344 		scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3345 	}
3346 	if (oval && pval) {
3347 		spi->sync_offset = oval;
3348 		spi->sync_period = pval;
3349 		spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3350 		spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3351 	}
3352 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3353 	if (dval & DP_WIDE) {
3354 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3355 	} else {
3356 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3357 	}
3358 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3359 		scsi->valid = CTS_SCSI_VALID_TQ;
3360 		spi->valid |= CTS_SPI_VALID_DISC;
3361 	} else {
3362 		scsi->valid = 0;
3363 	}
3364 #endif
3365 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3366 	    "mpt_get_spi_settings[%d]: %s 0x%x period 0x%x offset %d\n", tgt,
3367 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3368 	return (0);
3369 }
3370 
3371 static void
3372 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3373 {
3374 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3375 
3376 	ptr = &mpt->mpt_dev_page1[tgt];
3377 	if (onoff) {
3378 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3379 	} else {
3380 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3381 	}
3382 }
3383 
3384 static void
3385 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3386 {
3387 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3388 
3389 	ptr = &mpt->mpt_dev_page1[tgt];
3390 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3391 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3392 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3393 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3394 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3395 	ptr->RequestedParameters |= (period << 8) | (offset << 16);
3396 	if (period < 0xa) {
3397 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3398 	}
3399 	if (period < 0x9) {
3400 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3401 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3402 	}
3403 }
3404 
3405 static int
3406 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3407 {
3408 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3409 	int rv;
3410 
3411 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3412 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3413 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3414 	tmp = mpt->mpt_dev_page1[tgt];
3415 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3416 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3417 	if (rv) {
3418 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3419 		return (-1);
3420 	}
3421 	return (0);
3422 }
3423 
3424 static void
3425 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3426 {
3427 #if __FreeBSD_version >= 500000
3428 	cam_calc_geometry(ccg, extended);
3429 #else
3430 	uint32_t size_mb;
3431 	uint32_t secs_per_cylinder;
3432 
3433 	if (ccg->block_size == 0) {
3434 		ccg->ccb_h.status = CAM_REQ_INVALID;
3435 		return;
3436 	}
3437 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3438 	if (size_mb > 1024 && extended) {
3439 		ccg->heads = 255;
3440 		ccg->secs_per_track = 63;
3441 	} else {
3442 		ccg->heads = 64;
3443 		ccg->secs_per_track = 32;
3444 	}
3445 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3446 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3447 	ccg->ccb_h.status = CAM_REQ_CMP;
3448 #endif
3449 }
3450 
3451 /****************************** Timeout Recovery ******************************/
3452 static int
3453 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3454 {
3455 	int error;
3456 
3457 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3458 	    &mpt->recovery_thread, /*flags*/0,
3459 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3460 	return (error);
3461 }
3462 
3463 static void
3464 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3465 {
3466 	if (mpt->recovery_thread == NULL) {
3467 		return;
3468 	}
3469 	mpt->shutdwn_recovery = 1;
3470 	wakeup(mpt);
3471 	/*
3472 	 * Sleep on a slightly different location
3473 	 * for this interlock just for added safety.
3474 	 */
3475 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3476 }
3477 
3478 static void
3479 mpt_recovery_thread(void *arg)
3480 {
3481 	struct mpt_softc *mpt;
3482 
3483 #if __FreeBSD_version >= 500000
3484 	mtx_lock(&Giant);
3485 #endif
3486 	mpt = (struct mpt_softc *)arg;
3487 	MPT_LOCK(mpt);
3488 	for (;;) {
3489 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3490 			if (mpt->shutdwn_recovery == 0) {
3491 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3492 			}
3493 		}
3494 		if (mpt->shutdwn_recovery != 0) {
3495 			break;
3496 		}
3497 		mpt_recover_commands(mpt);
3498 	}
3499 	mpt->recovery_thread = NULL;
3500 	wakeup(&mpt->recovery_thread);
3501 	MPT_UNLOCK(mpt);
3502 #if __FreeBSD_version >= 500000
3503 	mtx_unlock(&Giant);
3504 #endif
3505 	kthread_exit(0);
3506 }
3507 
3508 static int
3509 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3510     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3511 {
3512 	MSG_SCSI_TASK_MGMT *tmf_req;
3513 	int		    error;
3514 
3515 	/*
3516 	 * Wait for any current TMF request to complete.
3517 	 * We're only allowed to issue one TMF at a time.
3518 	 */
3519 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3520 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3521 	if (error != 0) {
3522 		mpt_reset(mpt, TRUE);
3523 		return (ETIMEDOUT);
3524 	}
3525 
3526 	mpt_assign_serno(mpt, mpt->tmf_req);
3527 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3528 
3529 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3530 	memset(tmf_req, 0, sizeof(*tmf_req));
3531 	tmf_req->TargetID = target;
3532 	tmf_req->Bus = channel;
3533 	tmf_req->ChainOffset = 0;
3534 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3535 	tmf_req->Reserved = 0;
3536 	tmf_req->TaskType = type;
3537 	tmf_req->Reserved1 = 0;
3538 	tmf_req->MsgFlags = flags;
3539 	tmf_req->MsgContext =
3540 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3541 	memset(&tmf_req->LUN, 0,
3542 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
3543 	if (lun > 256) {
3544 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3545 		tmf_req->LUN[1] = lun & 0xff;
3546 	} else {
3547 		tmf_req->LUN[1] = lun;
3548 	}
3549 	tmf_req->TaskMsgContext = abort_ctx;
3550 
3551 	mpt_lprt(mpt, MPT_PRT_INFO,
3552 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3553 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3554 	if (mpt->verbose > MPT_PRT_DEBUG) {
3555 		mpt_print_request(tmf_req);
3556 	}
3557 
3558 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3559 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3560 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3561 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3562 	if (error != MPT_OK) {
3563 		mpt_reset(mpt, TRUE);
3564 	}
3565 	return (error);
3566 }
3567 
3568 /*
3569  * When a command times out, it is placed on the requeust_timeout_list
3570  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3571  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3572  * the timedout transactions.  The next TMF is issued either by the
3573  * completion handler of the current TMF waking our recovery thread,
3574  * or the TMF timeout handler causing a hard reset sequence.
3575  */
3576 static void
3577 mpt_recover_commands(struct mpt_softc *mpt)
3578 {
3579 	request_t	   *req;
3580 	union ccb	   *ccb;
3581 	int		    error;
3582 
3583 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3584 		/*
3585 		 * No work to do- leave.
3586 		 */
3587 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3588 		return;
3589 	}
3590 
3591 	/*
3592 	 * Flush any commands whose completion coincides with their timeout.
3593 	 */
3594 	mpt_intr(mpt);
3595 
3596 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3597 		/*
3598 		 * The timedout commands have already
3599 		 * completed.  This typically means
3600 		 * that either the timeout value was on
3601 		 * the hairy edge of what the device
3602 		 * requires or - more likely - interrupts
3603 		 * are not happening.
3604 		 */
3605 		mpt_prt(mpt, "Timedout requests already complete. "
3606 		    "Interrupts may not be functioning.\n");
3607 		mpt_enable_ints(mpt);
3608 		return;
3609 	}
3610 
3611 	/*
3612 	 * We have no visibility into the current state of the
3613 	 * controller, so attempt to abort the commands in the
3614 	 * order they timed-out. For initiator commands, we
3615 	 * depend on the reply handler pulling requests off
3616 	 * the timeout list.
3617 	 */
3618 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3619 		uint16_t status;
3620 		uint8_t response;
3621 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3622 
3623 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3624 		    req, req->serno, hdrp->Function);
3625 		ccb = req->ccb;
3626 		if (ccb == NULL) {
3627 			mpt_prt(mpt, "null ccb in timed out request. "
3628 			    "Resetting Controller.\n");
3629 			mpt_reset(mpt, TRUE);
3630 			continue;
3631 		}
3632 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3633 
3634 		/*
3635 		 * Check to see if this is not an initiator command and
3636 		 * deal with it differently if it is.
3637 		 */
3638 		switch (hdrp->Function) {
3639 		case MPI_FUNCTION_SCSI_IO_REQUEST:
3640 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
3641 			break;
3642 		default:
3643 			/*
3644 			 * XXX: FIX ME: need to abort target assists...
3645 			 */
3646 			mpt_prt(mpt, "just putting it back on the pend q\n");
3647 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
3648 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
3649 			    links);
3650 			continue;
3651 		}
3652 
3653 		error = mpt_scsi_send_tmf(mpt,
3654 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3655 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
3656 		    htole32(req->index | scsi_io_handler_id), TRUE);
3657 
3658 		if (error != 0) {
3659 			/*
3660 			 * mpt_scsi_send_tmf hard resets on failure, so no
3661 			 * need to do so here.  Our queue should be emptied
3662 			 * by the hard reset.
3663 			 */
3664 			continue;
3665 		}
3666 
3667 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
3668 		    REQ_STATE_DONE, TRUE, 500);
3669 
3670 		status = mpt->tmf_req->IOCStatus;
3671 		response = mpt->tmf_req->ResponseCode;
3672 		mpt->tmf_req->state = REQ_STATE_FREE;
3673 
3674 		if (error != 0) {
3675 			/*
3676 			 * If we've errored out,, reset the controller.
3677 			 */
3678 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
3679 			    "Resetting controller\n");
3680 			mpt_reset(mpt, TRUE);
3681 			continue;
3682 		}
3683 
3684 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
3685 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
3686 			    "Resetting controller.\n", status);
3687 			mpt_reset(mpt, TRUE);
3688 			continue;
3689 		}
3690 
3691 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
3692 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
3693 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
3694 			    "Resetting controller.\n", response);
3695 			mpt_reset(mpt, TRUE);
3696 			continue;
3697 		}
3698 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
3699 	}
3700 }
3701 
3702 /************************ Target Mode Support ****************************/
3703 static void
3704 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
3705 {
3706 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3707 	PTR_SGE_TRANSACTION32 tep;
3708 	PTR_SGE_SIMPLE32 se;
3709 	bus_addr_t paddr;
3710 
3711 	paddr = req->req_pbuf;
3712 	paddr += MPT_RQSL(mpt);
3713 
3714 	fc = req->req_vbuf;
3715 	memset(fc, 0, MPT_REQUEST_AREA);
3716 	fc->BufferCount = 1;
3717 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3718 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
3719 
3720 	/*
3721 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
3722 	 * consist of a TE SGL element (with details length of zero)
3723 	 * followe by a SIMPLE SGL element which holds the address
3724 	 * of the buffer.
3725 	 */
3726 
3727 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3728 
3729 	tep->ContextSize = 4;
3730 	tep->Flags = 0;
3731 	tep->TransactionContext[0] = htole32(ioindex);
3732 
3733 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3734 	se->FlagsLength =
3735 		MPI_SGE_FLAGS_HOST_TO_IOC	|
3736 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
3737 		MPI_SGE_FLAGS_LAST_ELEMENT	|
3738 		MPI_SGE_FLAGS_END_OF_LIST	|
3739 		MPI_SGE_FLAGS_END_OF_BUFFER;
3740 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
3741 	se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3742 	se->Address = (uint32_t) paddr;
3743 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3744 	    "add ELS index %d ioindex %d for %p:%u\n",
3745 	    req->index, ioindex, req, req->serno);
3746 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
3747 	    ("mpt_fc_post_els: request not locked"));
3748 	mpt_send_cmd(mpt, req);
3749 }
3750 
3751 static void
3752 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3753 {
3754 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3755 	PTR_CMD_BUFFER_DESCRIPTOR cb;
3756 	bus_addr_t paddr;
3757 
3758 	paddr = req->req_pbuf;
3759 	paddr += MPT_RQSL(mpt);
3760 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3761 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
3762 
3763 	fc = req->req_vbuf;
3764 	fc->BufferCount = 1;
3765 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3766 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3767 
3768 	cb = &fc->Buffer[0];
3769 	cb->IoIndex = htole16(ioindex);
3770 	cb->u.PhysicalAddress32 = (U32) paddr;
3771 
3772 	mpt_check_doorbell(mpt);
3773 	mpt_send_cmd(mpt, req);
3774 }
3775 
3776 static int
3777 mpt_add_els_buffers(struct mpt_softc *mpt)
3778 {
3779 	int i;
3780 
3781 	if (mpt->is_fc == 0) {
3782 		return (TRUE);
3783 	}
3784 
3785 	if (mpt->els_cmds_allocated) {
3786 		return (TRUE);
3787 	}
3788 
3789 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
3790 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3791 
3792 	if (mpt->els_cmd_ptrs == NULL) {
3793 		return (FALSE);
3794 	}
3795 
3796 	/*
3797 	 * Feed the chip some ELS buffer resources
3798 	 */
3799 	for (i = 0; i < MPT_MAX_ELS; i++) {
3800 		request_t *req = mpt_get_request(mpt, FALSE);
3801 		if (req == NULL) {
3802 			break;
3803 		}
3804 		req->state |= REQ_STATE_LOCKED;
3805 		mpt->els_cmd_ptrs[i] = req;
3806 		mpt_fc_post_els(mpt, req, i);
3807 	}
3808 
3809 	if (i == 0) {
3810 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
3811 		free(mpt->els_cmd_ptrs, M_DEVBUF);
3812 		mpt->els_cmd_ptrs = NULL;
3813 		return (FALSE);
3814 	}
3815 	if (i != MPT_MAX_ELS) {
3816 		mpt_lprt(mpt, MPT_PRT_INFO,
3817 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
3818 	}
3819 	mpt->els_cmds_allocated = i;
3820 	return(TRUE);
3821 }
3822 
3823 static int
3824 mpt_add_target_commands(struct mpt_softc *mpt)
3825 {
3826 	int i, max;
3827 
3828 	if (mpt->tgt_cmd_ptrs) {
3829 		return (TRUE);
3830 	}
3831 
3832 	max = MPT_MAX_REQUESTS(mpt) >> 1;
3833 	if (max > mpt->mpt_max_tgtcmds) {
3834 		max = mpt->mpt_max_tgtcmds;
3835 	}
3836 	mpt->tgt_cmd_ptrs =
3837 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
3838 	if (mpt->tgt_cmd_ptrs == NULL) {
3839 		mpt_prt(mpt,
3840 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
3841 		return (FALSE);
3842 	}
3843 
3844 	for (i = 0; i < max; i++) {
3845 		request_t *req;
3846 
3847 		req = mpt_get_request(mpt, FALSE);
3848 		if (req == NULL) {
3849 			break;
3850 		}
3851 		req->state |= REQ_STATE_LOCKED;
3852 		mpt->tgt_cmd_ptrs[i] = req;
3853 		mpt_post_target_command(mpt, req, i);
3854 	}
3855 
3856 
3857 	if (i == 0) {
3858 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
3859 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
3860 		mpt->tgt_cmd_ptrs = NULL;
3861 		return (FALSE);
3862 	}
3863 
3864 	mpt->tgt_cmds_allocated = i;
3865 
3866 	if (i < max) {
3867 		mpt_lprt(mpt, MPT_PRT_INFO,
3868 		    "added %d of %d target bufs\n", i, max);
3869 	}
3870 	return (i);
3871 }
3872 
3873 static int
3874 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3875 {
3876 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3877 		mpt->twildcard = 1;
3878 	} else if (lun >= MPT_MAX_LUNS) {
3879 		return (EINVAL);
3880 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3881 		return (EINVAL);
3882 	}
3883 	if (mpt->tenabled == 0) {
3884 		if (mpt->is_fc) {
3885 			(void) mpt_fc_reset_link(mpt, 0);
3886 		}
3887 		mpt->tenabled = 1;
3888 	}
3889 	if (lun == CAM_LUN_WILDCARD) {
3890 		mpt->trt_wildcard.enabled = 1;
3891 	} else {
3892 		mpt->trt[lun].enabled = 1;
3893 	}
3894 	return (0);
3895 }
3896 
3897 static int
3898 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3899 {
3900 	int i;
3901 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3902 		mpt->twildcard = 0;
3903 	} else if (lun >= MPT_MAX_LUNS) {
3904 		return (EINVAL);
3905 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3906 		return (EINVAL);
3907 	}
3908 	if (lun == CAM_LUN_WILDCARD) {
3909 		mpt->trt_wildcard.enabled = 0;
3910 	} else {
3911 		mpt->trt[lun].enabled = 0;
3912 	}
3913 	for (i = 0; i < MPT_MAX_LUNS; i++) {
3914 		if (mpt->trt[lun].enabled) {
3915 			break;
3916 		}
3917 	}
3918 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
3919 		if (mpt->is_fc) {
3920 			(void) mpt_fc_reset_link(mpt, 0);
3921 		}
3922 		mpt->tenabled = 0;
3923 	}
3924 	return (0);
3925 }
3926 
3927 /*
3928  * Called with MPT lock held
3929  */
3930 static void
3931 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
3932 {
3933 	struct ccb_scsiio *csio = &ccb->csio;
3934 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
3935 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
3936 
3937 	switch (tgt->state) {
3938 	case TGT_STATE_IN_CAM:
3939 		break;
3940 	case TGT_STATE_MOVING_DATA:
3941 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3942 		xpt_freeze_simq(mpt->sim, 1);
3943 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3944 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3945 		MPTLOCK_2_CAMLOCK(mpt);
3946 		xpt_done(ccb);
3947 		CAMLOCK_2_MPTLOCK(mpt);
3948 		return;
3949 	default:
3950 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
3951 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
3952 		mpt_tgt_dump_req_state(mpt, cmd_req);
3953 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3954 		MPTLOCK_2_CAMLOCK(mpt);
3955 		xpt_done(ccb);
3956 		CAMLOCK_2_MPTLOCK(mpt);
3957 		return;
3958 	}
3959 
3960 	if (csio->dxfer_len) {
3961 		bus_dmamap_callback_t *cb;
3962 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
3963 		request_t *req;
3964 
3965 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
3966 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
3967 
3968 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
3969 			if (mpt->outofbeer == 0) {
3970 				mpt->outofbeer = 1;
3971 				xpt_freeze_simq(mpt->sim, 1);
3972 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
3973 			}
3974 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3975 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3976 			MPTLOCK_2_CAMLOCK(mpt);
3977 			xpt_done(ccb);
3978 			CAMLOCK_2_MPTLOCK(mpt);
3979 			return;
3980 		}
3981 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
3982 		if (sizeof (bus_addr_t) > 4) {
3983 			cb = mpt_execute_req_a64;
3984 		} else {
3985 			cb = mpt_execute_req;
3986 		}
3987 
3988 		req->ccb = ccb;
3989 		ccb->ccb_h.ccb_req_ptr = req;
3990 
3991 		/*
3992 		 * Record the currently active ccb and the
3993 		 * request for it in our target state area.
3994 		 */
3995 		tgt->ccb = ccb;
3996 		tgt->req = req;
3997 
3998 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
3999 		ta = req->req_vbuf;
4000 
4001 		if (mpt->is_sas) {
4002 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4003 			     cmd_req->req_vbuf;
4004 			ta->QueueTag = ssp->InitiatorTag;
4005 		} else if (mpt->is_spi) {
4006 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4007 			     cmd_req->req_vbuf;
4008 			ta->QueueTag = sp->Tag;
4009 		}
4010 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4011 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4012 		ta->ReplyWord = htole32(tgt->reply_desc);
4013 		if (csio->ccb_h.target_lun > 256) {
4014 			ta->LUN[0] =
4015 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4016 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4017 		} else {
4018 			ta->LUN[1] = csio->ccb_h.target_lun;
4019 		}
4020 
4021 		ta->RelativeOffset = tgt->bytes_xfered;
4022 		ta->DataLength = ccb->csio.dxfer_len;
4023 		if (ta->DataLength > tgt->resid) {
4024 			ta->DataLength = tgt->resid;
4025 		}
4026 
4027 		/*
4028 		 * XXX Should be done after data transfer completes?
4029 		 */
4030 		tgt->resid -= csio->dxfer_len;
4031 		tgt->bytes_xfered += csio->dxfer_len;
4032 
4033 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4034 			ta->TargetAssistFlags |=
4035 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4036 		}
4037 
4038 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4039 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4040 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4041 			ta->TargetAssistFlags |=
4042 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4043 		}
4044 #endif
4045 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4046 
4047 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4048 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4049 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4050 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4051 
4052 		MPTLOCK_2_CAMLOCK(mpt);
4053 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4054 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4055 				int error;
4056 				int s = splsoftvm();
4057 				error = bus_dmamap_load(mpt->buffer_dmat,
4058 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4059 				    cb, req, 0);
4060 				splx(s);
4061 				if (error == EINPROGRESS) {
4062 					xpt_freeze_simq(mpt->sim, 1);
4063 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4064 				}
4065 			} else {
4066 				/*
4067 				 * We have been given a pointer to single
4068 				 * physical buffer.
4069 				 */
4070 				struct bus_dma_segment seg;
4071 				seg.ds_addr = (bus_addr_t)
4072 				    (vm_offset_t)csio->data_ptr;
4073 				seg.ds_len = csio->dxfer_len;
4074 				(*cb)(req, &seg, 1, 0);
4075 			}
4076 		} else {
4077 			/*
4078 			 * We have been given a list of addresses.
4079 			 * This case could be easily supported but they are not
4080 			 * currently generated by the CAM subsystem so there
4081 			 * is no point in wasting the time right now.
4082 			 */
4083 			struct bus_dma_segment *sgs;
4084 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4085 				(*cb)(req, NULL, 0, EFAULT);
4086 			} else {
4087 				/* Just use the segments provided */
4088 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4089 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4090 			}
4091 		}
4092 		CAMLOCK_2_MPTLOCK(mpt);
4093 	} else {
4094 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4095 
4096 		/*
4097 		 * XXX: I don't know why this seems to happen, but
4098 		 * XXX: completing the CCB seems to make things happy.
4099 		 * XXX: This seems to happen if the initiator requests
4100 		 * XXX: enough data that we have to do multiple CTIOs.
4101 		 */
4102 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4103 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4104 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4105 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4106 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4107 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4108 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4109 			MPTLOCK_2_CAMLOCK(mpt);
4110 			xpt_done(ccb);
4111 			CAMLOCK_2_MPTLOCK(mpt);
4112 			return;
4113 		}
4114 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4115 			sp = sense;
4116 			memcpy(sp, &csio->sense_data,
4117 			   min(csio->sense_len, MPT_SENSE_SIZE));
4118 		}
4119 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4120 	}
4121 }
4122 
4123 static void
4124 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4125     uint32_t lun, int send, uint8_t *data, size_t length)
4126 {
4127 	mpt_tgt_state_t *tgt;
4128 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4129 	SGE_SIMPLE32 *se;
4130 	uint32_t flags;
4131 	uint8_t *dptr;
4132 	bus_addr_t pptr;
4133 	request_t *req;
4134 
4135 	if (length == 0) {
4136 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4137 		return;
4138 	}
4139 
4140 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4141 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4142 		mpt_prt(mpt, "out of resources- dropping local response\n");
4143 		return;
4144 	}
4145 	tgt->is_local = 1;
4146 
4147 
4148 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4149 	ta = req->req_vbuf;
4150 
4151 	if (mpt->is_sas) {
4152 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4153 		ta->QueueTag = ssp->InitiatorTag;
4154 	} else if (mpt->is_spi) {
4155 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4156 		ta->QueueTag = sp->Tag;
4157 	}
4158 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4159 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4160 	ta->ReplyWord = htole32(tgt->reply_desc);
4161 	if (lun > 256) {
4162 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4163 		ta->LUN[1] = lun & 0xff;
4164 	} else {
4165 		ta->LUN[1] = lun;
4166 	}
4167 	ta->RelativeOffset = 0;
4168 	ta->DataLength = length;
4169 
4170 	dptr = req->req_vbuf;
4171 	dptr += MPT_RQSL(mpt);
4172 	pptr = req->req_pbuf;
4173 	pptr += MPT_RQSL(mpt);
4174 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4175 
4176 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4177 	memset(se, 0,sizeof (*se));
4178 
4179 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4180 	if (send) {
4181 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4182 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4183 	}
4184 	se->Address = pptr;
4185 	MPI_pSGE_SET_LENGTH(se, length);
4186 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4187 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4188 	MPI_pSGE_SET_FLAGS(se, flags);
4189 
4190 	tgt->ccb = NULL;
4191 	tgt->req = req;
4192 	tgt->resid = 0;
4193 	tgt->bytes_xfered = length;
4194 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4195 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4196 #else
4197 	tgt->state = TGT_STATE_MOVING_DATA;
4198 #endif
4199 	mpt_send_cmd(mpt, req);
4200 }
4201 
4202 /*
4203  * Abort queued up CCBs
4204  */
4205 static cam_status
4206 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4207 {
4208 	struct mpt_hdr_stailq *lp;
4209 	struct ccb_hdr *srch;
4210 	int found = 0;
4211 	union ccb *accb = ccb->cab.abort_ccb;
4212 	tgt_resource_t *trtp;
4213 
4214 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4215 
4216 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4217 		trtp = &mpt->trt_wildcard;
4218 	} else {
4219 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4220 	}
4221 
4222 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4223 		lp = &trtp->atios;
4224 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4225 		lp = &trtp->inots;
4226 	} else {
4227 		return (CAM_REQ_INVALID);
4228 	}
4229 
4230 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4231 		if (srch == &accb->ccb_h) {
4232 			found = 1;
4233 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4234 			break;
4235 		}
4236 	}
4237 	if (found) {
4238 		accb->ccb_h.status = CAM_REQ_ABORTED;
4239 		xpt_done(accb);
4240 		return (CAM_REQ_CMP);
4241 	}
4242 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4243 	return (CAM_PATH_INVALID);
4244 }
4245 
4246 /*
4247  * Ask the MPT to abort the current target command
4248  */
4249 static int
4250 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4251 {
4252 	int error;
4253 	request_t *req;
4254 	PTR_MSG_TARGET_MODE_ABORT abtp;
4255 
4256 	req = mpt_get_request(mpt, FALSE);
4257 	if (req == NULL) {
4258 		return (-1);
4259 	}
4260 	abtp = req->req_vbuf;
4261 	memset(abtp, 0, sizeof (*abtp));
4262 
4263 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4264 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4265 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4266 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4267 	error = 0;
4268 	if (mpt->is_fc || mpt->is_sas) {
4269 		mpt_send_cmd(mpt, req);
4270 	} else {
4271 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4272 	}
4273 	return (error);
4274 }
4275 
4276 /*
4277  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4278  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4279  * FC929 to set bogus FC_RSP fields (nonzero residuals
4280  * but w/o RESID fields set). This causes QLogic initiators
4281  * to think maybe that a frame was lost.
4282  *
4283  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4284  * we use allocated requests to do TARGET_ASSIST and we
4285  * need to know when to release them.
4286  */
4287 
4288 static void
4289 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4290     uint8_t status, uint8_t const *sense_data)
4291 {
4292 	uint8_t *cmd_vbuf;
4293 	mpt_tgt_state_t *tgt;
4294 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4295 	request_t *req;
4296 	bus_addr_t paddr;
4297 	int resplen = 0;
4298 
4299 	cmd_vbuf = cmd_req->req_vbuf;
4300 	cmd_vbuf += MPT_RQSL(mpt);
4301 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4302 
4303 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4304 		if (mpt->outofbeer == 0) {
4305 			mpt->outofbeer = 1;
4306 			xpt_freeze_simq(mpt->sim, 1);
4307 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4308 		}
4309 		if (ccb) {
4310 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4311 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4312 			MPTLOCK_2_CAMLOCK(mpt);
4313 			xpt_done(ccb);
4314 			CAMLOCK_2_MPTLOCK(mpt);
4315 		} else {
4316 			mpt_prt(mpt,
4317 			    "could not allocate status request- dropping\n");
4318 		}
4319 		return;
4320 	}
4321 	req->ccb = ccb;
4322 	if (ccb) {
4323 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4324 		ccb->ccb_h.ccb_req_ptr = req;
4325 	}
4326 
4327 	/*
4328 	 * Record the currently active ccb, if any, and the
4329 	 * request for it in our target state area.
4330 	 */
4331 	tgt->ccb = ccb;
4332 	tgt->req = req;
4333 	tgt->state = TGT_STATE_SENDING_STATUS;
4334 
4335 	tp = req->req_vbuf;
4336 	paddr = req->req_pbuf;
4337 	paddr += MPT_RQSL(mpt);
4338 
4339 	memset(tp, 0, sizeof (*tp));
4340 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4341 	if (mpt->is_fc) {
4342 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4343 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4344 		uint8_t *sts_vbuf;
4345 		uint32_t *rsp;
4346 
4347 		sts_vbuf = req->req_vbuf;
4348 		sts_vbuf += MPT_RQSL(mpt);
4349 		rsp = (uint32_t *) sts_vbuf;
4350 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4351 
4352 		/*
4353 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4354 		 * It has to be big-endian in memory and is organized
4355 		 * in 32 bit words, which are much easier to deal with
4356 		 * as words which are swizzled as needed.
4357 		 *
4358 		 * All we're filling here is the FC_RSP payload.
4359 		 * We may just have the chip synthesize it if
4360 		 * we have no residual and an OK status.
4361 		 *
4362 		 */
4363 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4364 
4365 		rsp[2] = status;
4366 		if (tgt->resid) {
4367 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4368 			rsp[3] = htobe32(tgt->resid);
4369 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4370 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4371 #endif
4372 		}
4373 		if (status == SCSI_STATUS_CHECK_COND) {
4374 			int i;
4375 
4376 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4377 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4378 			if (sense_data) {
4379 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4380 			} else {
4381 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4382 				    "TION but no sense data?\n");
4383 				memset(&rsp, 0, MPT_SENSE_SIZE);
4384 			}
4385 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4386 				rsp[i] = htobe32(rsp[i]);
4387 			}
4388 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4389 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4390 #endif
4391 		}
4392 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4393 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4394 #endif
4395 		rsp[2] = htobe32(rsp[2]);
4396 	} else if (mpt->is_sas) {
4397 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4398 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4399 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4400 	} else {
4401 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4402 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4403 		tp->StatusCode = status;
4404 		tp->QueueTag = htole16(sp->Tag);
4405 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4406 	}
4407 
4408 	tp->ReplyWord = htole32(tgt->reply_desc);
4409 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4410 
4411 #ifdef	WE_CAN_USE_AUTO_REPOST
4412 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4413 #endif
4414 	if (status == SCSI_STATUS_OK && resplen == 0) {
4415 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4416 	} else {
4417 		tp->StatusDataSGE.u.Address32 = (uint32_t) paddr;
4418 		tp->StatusDataSGE.FlagsLength =
4419 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4420 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4421 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4422 			MPI_SGE_FLAGS_END_OF_LIST	|
4423 			MPI_SGE_FLAGS_END_OF_BUFFER;
4424 		tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
4425 		tp->StatusDataSGE.FlagsLength |= resplen;
4426 	}
4427 
4428 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4429 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4430 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4431 	    req->serno, tgt->resid);
4432 	if (ccb) {
4433 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4434 		ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
4435 	}
4436 	mpt_send_cmd(mpt, req);
4437 }
4438 
4439 static void
4440 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4441     tgt_resource_t *trtp, int init_id)
4442 {
4443 	struct ccb_immed_notify *inot;
4444 	mpt_tgt_state_t *tgt;
4445 
4446 	tgt = MPT_TGT_STATE(mpt, req);
4447 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4448 	if (inot == NULL) {
4449 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4450 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4451 		return;
4452 	}
4453 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4454 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4455 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4456 
4457 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4458 	inot->sense_len = 0;
4459 	memset(inot->message_args, 0, sizeof (inot->message_args));
4460 	inot->initiator_id = init_id;	/* XXX */
4461 
4462 	/*
4463 	 * This is a somewhat grotesque attempt to map from task management
4464 	 * to old style SCSI messages. God help us all.
4465 	 */
4466 	switch (fc) {
4467 	case MPT_ABORT_TASK_SET:
4468 		inot->message_args[0] = MSG_ABORT_TAG;
4469 		break;
4470 	case MPT_CLEAR_TASK_SET:
4471 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4472 		break;
4473 	case MPT_TARGET_RESET:
4474 		inot->message_args[0] = MSG_TARGET_RESET;
4475 		break;
4476 	case MPT_CLEAR_ACA:
4477 		inot->message_args[0] = MSG_CLEAR_ACA;
4478 		break;
4479 	case MPT_TERMINATE_TASK:
4480 		inot->message_args[0] = MSG_ABORT_TAG;
4481 		break;
4482 	default:
4483 		inot->message_args[0] = MSG_NOOP;
4484 		break;
4485 	}
4486 	tgt->ccb = (union ccb *) inot;
4487 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4488 	MPTLOCK_2_CAMLOCK(mpt);
4489 	xpt_done((union ccb *)inot);
4490 	CAMLOCK_2_MPTLOCK(mpt);
4491 }
4492 
4493 static void
4494 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4495 {
4496 	struct ccb_accept_tio *atiop;
4497 	lun_id_t lun;
4498 	int tag_action = 0;
4499 	mpt_tgt_state_t *tgt;
4500 	tgt_resource_t *trtp = NULL;
4501 	U8 *lunptr;
4502 	U8 *vbuf;
4503 	U16 itag;
4504 	U16 ioindex;
4505 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4506 	uint8_t *cdbp;
4507 
4508 	/*
4509 	 * First, DMA sync the received command-
4510 	 * which is in the *request* * phys area.
4511 	 *
4512 	 * XXX: We could optimize this for a range
4513 	 */
4514 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
4515 	    BUS_DMASYNC_POSTREAD);
4516 
4517 	/*
4518 	 * Stash info for the current command where we can get at it later.
4519 	 */
4520 	vbuf = req->req_vbuf;
4521 	vbuf += MPT_RQSL(mpt);
4522 
4523 	/*
4524 	 * Get our state pointer set up.
4525 	 */
4526 	tgt = MPT_TGT_STATE(mpt, req);
4527 	if (tgt->state != TGT_STATE_LOADED) {
4528 		mpt_tgt_dump_req_state(mpt, req);
4529 		panic("bad target state in mpt_scsi_tgt_atio");
4530 	}
4531 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4532 	tgt->state = TGT_STATE_IN_CAM;
4533 	tgt->reply_desc = reply_desc;
4534 	ioindex = GET_IO_INDEX(reply_desc);
4535 
4536 	if (mpt->is_fc) {
4537 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4538 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4539 		if (fc->FcpCntl[2]) {
4540 			/*
4541 			 * Task Management Request
4542 			 */
4543 			switch (fc->FcpCntl[2]) {
4544 			case 0x2:
4545 				fct = MPT_ABORT_TASK_SET;
4546 				break;
4547 			case 0x4:
4548 				fct = MPT_CLEAR_TASK_SET;
4549 				break;
4550 			case 0x20:
4551 				fct = MPT_TARGET_RESET;
4552 				break;
4553 			case 0x40:
4554 				fct = MPT_CLEAR_ACA;
4555 				break;
4556 			case 0x80:
4557 				fct = MPT_TERMINATE_TASK;
4558 				break;
4559 			default:
4560 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4561 				    fc->FcpCntl[2]);
4562 				mpt_scsi_tgt_status(mpt, 0, req,
4563 				    SCSI_STATUS_OK, 0);
4564 				return;
4565 			}
4566 		} else {
4567 			switch (fc->FcpCntl[1]) {
4568 			case 0:
4569 				tag_action = MSG_SIMPLE_Q_TAG;
4570 				break;
4571 			case 1:
4572 				tag_action = MSG_HEAD_OF_Q_TAG;
4573 				break;
4574 			case 2:
4575 				tag_action = MSG_ORDERED_Q_TAG;
4576 				break;
4577 			default:
4578 				/*
4579 				 * Bah. Ignore Untagged Queing and ACA
4580 				 */
4581 				tag_action = MSG_SIMPLE_Q_TAG;
4582 				break;
4583 			}
4584 		}
4585 		tgt->resid = be32toh(fc->FcpDl);
4586 		cdbp = fc->FcpCdb;
4587 		lunptr = fc->FcpLun;
4588 		itag = be16toh(fc->OptionalOxid);
4589 	} else if (mpt->is_sas) {
4590 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4591 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4592 		cdbp = ssp->CDB;
4593 		lunptr = ssp->LogicalUnitNumber;
4594 		itag = ssp->InitiatorTag;
4595 	} else {
4596 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4597 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4598 		cdbp = sp->CDB;
4599 		lunptr = sp->LogicalUnitNumber;
4600 		itag = sp->Tag;
4601 	}
4602 
4603 	/*
4604 	 * Generate a simple lun
4605 	 */
4606 	switch (lunptr[0] & 0xc0) {
4607 	case 0x40:
4608 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4609 		break;
4610 	case 0:
4611 		lun = lunptr[1];
4612 		break;
4613 	default:
4614 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4615 		lun = 0xffff;
4616 		break;
4617 	}
4618 
4619 	/*
4620 	 * Deal with non-enabled or bad luns here.
4621 	 */
4622 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4623 	    mpt->trt[lun].enabled == 0) {
4624 		if (mpt->twildcard) {
4625 			trtp = &mpt->trt_wildcard;
4626 		} else if (fct == MPT_NIL_TMT_VALUE) {
4627 			/*
4628 			 * In this case, we haven't got an upstream listener
4629 			 * for either a specific lun or wildcard luns. We
4630 			 * have to make some sensible response. For regular
4631 			 * inquiry, just return some NOT HERE inquiry data.
4632 			 * For VPD inquiry, report illegal field in cdb.
4633 			 * For REQUEST SENSE, just return NO SENSE data.
4634 			 * REPORT LUNS gets illegal command.
4635 			 * All other commands get 'no such device'.
4636 			 */
4637 
4638 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
4639 
4640 			mpt_prt(mpt, "CMD 0x%x to unmanaged lun %u\n",
4641 			    cdbp[0], lun);
4642 
4643 			memset(buf, 0, MPT_SENSE_SIZE);
4644 			cond = SCSI_STATUS_CHECK_COND;
4645 			buf[0] = 0xf0;
4646 			buf[2] = 0x5;
4647 			buf[7] = 0x8;
4648 			sp = buf;
4649 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4650 
4651 			switch (cdbp[0]) {
4652 			case INQUIRY:
4653 			{
4654 				static uint8_t iqd[8] = {
4655 				    0x7f, 0x0, 0x4, 0x12, 0x0
4656 				};
4657 				if (cdbp[1] != 0) {
4658 					buf[12] = 0x26;
4659 					buf[13] = 0x01;
4660 					break;
4661 				}
4662 				mpt_prt(mpt, "local inquiry\n");
4663 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4664 				    iqd, sizeof (iqd));
4665 				return;
4666 			}
4667 			case REQUEST_SENSE:
4668 			{
4669 				buf[2] = 0x0;
4670 				mpt_prt(mpt, "local request sense\n");
4671 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4672 				    buf, sizeof (buf));
4673 				return;
4674 			}
4675 			case REPORT_LUNS:
4676 				buf[12] = 0x26;
4677 				break;
4678 			default:
4679 				buf[12] = 0x25;
4680 				break;
4681 			}
4682 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
4683 			return;
4684 		}
4685 		/* otherwise, leave trtp NULL */
4686 	} else {
4687 		trtp = &mpt->trt[lun];
4688 	}
4689 
4690 	/*
4691 	 * Deal with any task management
4692 	 */
4693 	if (fct != MPT_NIL_TMT_VALUE) {
4694 		if (trtp == NULL) {
4695 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
4696 			    fct);
4697 			mpt_scsi_tgt_status(mpt, 0, req,
4698 			    SCSI_STATUS_OK, 0);
4699 		} else {
4700 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
4701 			    GET_INITIATOR_INDEX(reply_desc));
4702 		}
4703 		return;
4704 	}
4705 
4706 
4707 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
4708 	if (atiop == NULL) {
4709 		mpt_lprt(mpt, MPT_PRT_WARN,
4710 		    "no ATIOs for lun %u- sending back %s\n", lun,
4711 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
4712 		mpt_scsi_tgt_status(mpt, NULL, req,
4713 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
4714 		    NULL);
4715 		return;
4716 	}
4717 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
4718 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4719 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
4720 	atiop->ccb_h.ccb_mpt_ptr = mpt;
4721 	atiop->ccb_h.status = CAM_CDB_RECVD;
4722 	atiop->ccb_h.target_lun = lun;
4723 	atiop->sense_len = 0;
4724 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
4725 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
4726 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
4727 
4728 	/*
4729 	 * The tag we construct here allows us to find the
4730 	 * original request that the command came in with.
4731 	 *
4732 	 * This way we don't have to depend on anything but the
4733 	 * tag to find things when CCBs show back up from CAM.
4734 	 */
4735 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4736 	tgt->tag_id = atiop->tag_id;
4737 	if (tag_action) {
4738 		atiop->tag_action = tag_action;
4739 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
4740 	}
4741 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4742 		int i;
4743 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
4744 		    atiop->ccb_h.target_lun);
4745 		for (i = 0; i < atiop->cdb_len; i++) {
4746 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
4747 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
4748 		}
4749 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
4750 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
4751 	}
4752 
4753 	MPTLOCK_2_CAMLOCK(mpt);
4754 	xpt_done((union ccb *)atiop);
4755 	CAMLOCK_2_MPTLOCK(mpt);
4756 }
4757 
4758 static void
4759 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
4760 {
4761 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4762 
4763 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
4764 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
4765 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
4766 	    tgt->tag_id, tgt->state);
4767 }
4768 
4769 static void
4770 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
4771 {
4772 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
4773 	    req->index, req->index, req->state);
4774 	mpt_tgt_dump_tgt_state(mpt, req);
4775 }
4776 
4777 static int
4778 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
4779     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4780 {
4781 	int dbg;
4782 	union ccb *ccb;
4783 	U16 status;
4784 
4785 	if (reply_frame == NULL) {
4786 		/*
4787 		 * Figure out what the state of the command is.
4788 		 */
4789 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4790 
4791 #ifdef	INVARIANTS
4792 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
4793 		if (tgt->req) {
4794 			mpt_req_not_spcl(mpt, tgt->req,
4795 			    "turbo scsi_tgt_reply associated req", __LINE__);
4796 		}
4797 #endif
4798 		switch(tgt->state) {
4799 		case TGT_STATE_LOADED:
4800 			/*
4801 			 * This is a new command starting.
4802 			 */
4803 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
4804 			break;
4805 		case TGT_STATE_MOVING_DATA:
4806 		{
4807 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4808 
4809 			ccb = tgt->ccb;
4810 			if (tgt->req == NULL) {
4811 				panic("mpt: turbo target reply with null "
4812 				    "associated request moving data");
4813 				/* NOTREACHED */
4814 			}
4815 			if (ccb == NULL) {
4816 				if (tgt->is_local == 0) {
4817 					panic("mpt: turbo target reply with "
4818 					    "null associated ccb moving data");
4819 					/* NOTREACHED */
4820 				}
4821 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4822 				    "TARGET_ASSIST local done\n");
4823 				TAILQ_REMOVE(&mpt->request_pending_list,
4824 				    tgt->req, links);
4825 				mpt_free_request(mpt, tgt->req);
4826 				tgt->req = NULL;
4827 				mpt_scsi_tgt_status(mpt, NULL, req,
4828 				    0, NULL);
4829 				return (TRUE);
4830 			}
4831 			tgt->ccb = NULL;
4832 			tgt->nxfers++;
4833 			untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
4834 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4835 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
4836 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
4837 			/*
4838 			 * Free the Target Assist Request
4839 			 */
4840 			KASSERT(tgt->req->ccb == ccb,
4841 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
4842 			    tgt->req->serno, tgt->req->ccb));
4843 			TAILQ_REMOVE(&mpt->request_pending_list,
4844 			    tgt->req, links);
4845 			mpt_free_request(mpt, tgt->req);
4846 			tgt->req = NULL;
4847 
4848 			/*
4849 			 * Do we need to send status now? That is, are
4850 			 * we done with all our data transfers?
4851 			 */
4852 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4853 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4854 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4855 				KASSERT(ccb->ccb_h.status,
4856 				    ("zero ccb sts at %d\n", __LINE__));
4857 				tgt->state = TGT_STATE_IN_CAM;
4858 				if (mpt->outofbeer) {
4859 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4860 					mpt->outofbeer = 0;
4861 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4862 				}
4863 				MPTLOCK_2_CAMLOCK(mpt);
4864 				xpt_done(ccb);
4865 				CAMLOCK_2_MPTLOCK(mpt);
4866 				break;
4867 			}
4868 			/*
4869 			 * Otherwise, send status (and sense)
4870 			 */
4871 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4872 				sp = sense;
4873 				memcpy(sp, &ccb->csio.sense_data,
4874 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
4875 			}
4876 			mpt_scsi_tgt_status(mpt, ccb, req,
4877 			    ccb->csio.scsi_status, sp);
4878 			break;
4879 		}
4880 		case TGT_STATE_SENDING_STATUS:
4881 		case TGT_STATE_MOVING_DATA_AND_STATUS:
4882 		{
4883 			int ioindex;
4884 			ccb = tgt->ccb;
4885 
4886 			if (tgt->req == NULL) {
4887 				panic("mpt: turbo target reply with null "
4888 				    "associated request sending status");
4889 				/* NOTREACHED */
4890 			}
4891 
4892 			if (ccb) {
4893 				tgt->ccb = NULL;
4894 				if (tgt->state ==
4895 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
4896 					tgt->nxfers++;
4897 				}
4898 				untimeout(mpt_timeout, ccb,
4899 				    ccb->ccb_h.timeout_ch);
4900 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4901 					ccb->ccb_h.status |= CAM_SENT_SENSE;
4902 				}
4903 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4904 				    "TARGET_STATUS tag %x sts %x flgs %x req "
4905 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
4906 				    ccb->ccb_h.flags, tgt->req);
4907 				/*
4908 				 * Free the Target Send Status Request
4909 				 */
4910 				KASSERT(tgt->req->ccb == ccb,
4911 				    ("tgt->req %p:%u tgt->req->ccb %p",
4912 				    tgt->req, tgt->req->serno, tgt->req->ccb));
4913 				/*
4914 				 * Notify CAM that we're done
4915 				 */
4916 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4917 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4918 				KASSERT(ccb->ccb_h.status,
4919 				    ("ZERO ccb sts at %d\n", __LINE__));
4920 				tgt->ccb = NULL;
4921 			} else {
4922 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4923 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
4924 				    tgt->req, tgt->req->serno);
4925 			}
4926 			TAILQ_REMOVE(&mpt->request_pending_list,
4927 			    tgt->req, links);
4928 			mpt_free_request(mpt, tgt->req);
4929 			tgt->req = NULL;
4930 
4931 			/*
4932 			 * And re-post the Command Buffer.
4933 			 * This will reset the state.
4934 			 */
4935 			ioindex = GET_IO_INDEX(reply_desc);
4936 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4937 			tgt->is_local = 0;
4938 			mpt_post_target_command(mpt, req, ioindex);
4939 
4940 			/*
4941 			 * And post a done for anyone who cares
4942 			 */
4943 			if (ccb) {
4944 				if (mpt->outofbeer) {
4945 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4946 					mpt->outofbeer = 0;
4947 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4948 				}
4949 				MPTLOCK_2_CAMLOCK(mpt);
4950 				xpt_done(ccb);
4951 				CAMLOCK_2_MPTLOCK(mpt);
4952 			}
4953 			break;
4954 		}
4955 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
4956 			tgt->state = TGT_STATE_LOADED;
4957 			break;
4958 		default:
4959 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
4960 			    "Reply Function\n", tgt->state);
4961 		}
4962 		return (TRUE);
4963 	}
4964 
4965 	status = le16toh(reply_frame->IOCStatus);
4966 	if (status != MPI_IOCSTATUS_SUCCESS) {
4967 		dbg = MPT_PRT_ERROR;
4968 	} else {
4969 		dbg = MPT_PRT_DEBUG1;
4970 	}
4971 
4972 	mpt_lprt(mpt, dbg,
4973 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
4974 	     req, req->serno, reply_frame, reply_frame->Function, status);
4975 
4976 	switch (reply_frame->Function) {
4977 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
4978 	{
4979 		mpt_tgt_state_t *tgt;
4980 #ifdef	INVARIANTS
4981 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
4982 #endif
4983 		if (status != MPI_IOCSTATUS_SUCCESS) {
4984 			/*
4985 			 * XXX What to do?
4986 			 */
4987 			break;
4988 		}
4989 		tgt = MPT_TGT_STATE(mpt, req);
4990 		KASSERT(tgt->state == TGT_STATE_LOADING,
4991 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
4992 		mpt_assign_serno(mpt, req);
4993 		tgt->state = TGT_STATE_LOADED;
4994 		break;
4995 	}
4996 	case MPI_FUNCTION_TARGET_ASSIST:
4997 #ifdef	INVARIANTS
4998 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
4999 #endif
5000 		mpt_prt(mpt, "target assist completion\n");
5001 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5002 		mpt_free_request(mpt, req);
5003 		break;
5004 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5005 #ifdef	INVARIANTS
5006 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5007 #endif
5008 		mpt_prt(mpt, "status send completion\n");
5009 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5010 		mpt_free_request(mpt, req);
5011 		break;
5012 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5013 	{
5014 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5015 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5016 		PTR_MSG_TARGET_MODE_ABORT abtp =
5017 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5018 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5019 #ifdef	INVARIANTS
5020 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5021 #endif
5022 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5023 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5024 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5025 		mpt_free_request(mpt, req);
5026 		break;
5027 	}
5028 	default:
5029 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5030 		    "0x%x\n", reply_frame->Function);
5031 		break;
5032 	}
5033 	return (TRUE);
5034 }
5035