xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 3fe92528afe8313fecf48822dde74bad5e380f48)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #if __FreeBSD_version >= 500000
108 #include <sys/sysctl.h>
109 #endif
110 #include <sys/callout.h>
111 #include <sys/kthread.h>
112 
113 static void mpt_poll(struct cam_sim *);
114 static timeout_t mpt_timeout;
115 static void mpt_action(struct cam_sim *, union ccb *);
116 static int
117 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
118 static void mpt_setwidth(struct mpt_softc *, int, int);
119 static void mpt_setsync(struct mpt_softc *, int, int, int);
120 static int mpt_update_spi_config(struct mpt_softc *, int);
121 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
122 
123 static mpt_reply_handler_t mpt_scsi_reply_handler;
124 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
125 static mpt_reply_handler_t mpt_fc_els_reply_handler;
126 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
127 					MSG_DEFAULT_REPLY *);
128 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
129 static int mpt_fc_reset_link(struct mpt_softc *, int);
130 
131 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
132 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
133 static void mpt_recovery_thread(void *arg);
134 static void mpt_recover_commands(struct mpt_softc *mpt);
135 
136 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
137     u_int, u_int, u_int, int);
138 
139 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
140 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
141 static int mpt_add_els_buffers(struct mpt_softc *mpt);
142 static int mpt_add_target_commands(struct mpt_softc *mpt);
143 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
145 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
146 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
147 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
148 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
149     uint8_t, uint8_t const *);
150 static void
151 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
152     tgt_resource_t *, int);
153 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
154 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
155 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
156 
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 
161 static mpt_probe_handler_t	mpt_cam_probe;
162 static mpt_attach_handler_t	mpt_cam_attach;
163 static mpt_enable_handler_t	mpt_cam_enable;
164 static mpt_ready_handler_t	mpt_cam_ready;
165 static mpt_event_handler_t	mpt_cam_event;
166 static mpt_reset_handler_t	mpt_cam_ioc_reset;
167 static mpt_detach_handler_t	mpt_cam_detach;
168 
169 static struct mpt_personality mpt_cam_personality =
170 {
171 	.name		= "mpt_cam",
172 	.probe		= mpt_cam_probe,
173 	.attach		= mpt_cam_attach,
174 	.enable		= mpt_cam_enable,
175 	.ready		= mpt_cam_ready,
176 	.event		= mpt_cam_event,
177 	.reset		= mpt_cam_ioc_reset,
178 	.detach		= mpt_cam_detach,
179 };
180 
181 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
182 
183 int
184 mpt_cam_probe(struct mpt_softc *mpt)
185 {
186 	int role;
187 
188 	/*
189 	 * Only attach to nodes that support the initiator or target role
190 	 * (or want to) or have RAID physical devices that need CAM pass-thru
191 	 * support.
192 	 */
193 	if (mpt->do_cfg_role) {
194 		role = mpt->cfg_role;
195 	} else {
196 		role = mpt->role;
197 	}
198 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
199 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
200 		return (0);
201 	}
202 	return (ENODEV);
203 }
204 
205 int
206 mpt_cam_attach(struct mpt_softc *mpt)
207 {
208 	struct cam_devq *devq;
209 	mpt_handler_t	 handler;
210 	int		 maxq;
211 	int		 error;
212 
213 	TAILQ_INIT(&mpt->request_timeout_list);
214 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
215 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
216 
217 	handler.reply_handler = mpt_scsi_reply_handler;
218 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
219 				     &scsi_io_handler_id);
220 	if (error != 0) {
221 		goto cleanup0;
222 	}
223 
224 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
225 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
226 				     &scsi_tmf_handler_id);
227 	if (error != 0) {
228 		goto cleanup0;
229 	}
230 
231 	/*
232 	 * If we're fibre channel and could support target mode, we register
233 	 * an ELS reply handler and give it resources.
234 	 */
235 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
236 		handler.reply_handler = mpt_fc_els_reply_handler;
237 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
238 		    &fc_els_handler_id);
239 		if (error != 0) {
240 			goto cleanup0;
241 		}
242 		if (mpt_add_els_buffers(mpt) == FALSE) {
243 			error = ENOMEM;
244 			goto cleanup0;
245 		}
246 		maxq -= mpt->els_cmds_allocated;
247 	}
248 
249 	/*
250 	 * If we support target mode, we register a reply handler for it,
251 	 * but don't add command resources until we actually enable target
252 	 * mode.
253 	 */
254 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
255 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
256 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
257 		    &mpt->scsi_tgt_handler_id);
258 		if (error != 0) {
259 			goto cleanup0;
260 		}
261 	}
262 
263 	/*
264 	 * We keep one request reserved for timeout TMF requests.
265 	 */
266 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
267 	if (mpt->tmf_req == NULL) {
268 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
269 		error = ENOMEM;
270 		goto cleanup0;
271 	}
272 
273 	/*
274 	 * Mark the request as free even though not on the free list.
275 	 * There is only one TMF request allowed to be outstanding at
276 	 * a time and the TMF routines perform their own allocation
277 	 * tracking using the standard state flags.
278 	 */
279 	mpt->tmf_req->state = REQ_STATE_FREE;
280 	maxq--;
281 
282 	if (mpt_spawn_recovery_thread(mpt) != 0) {
283 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
284 		error = ENOMEM;
285 		goto cleanup0;
286 	}
287 
288 	/*
289 	 * The rest of this is CAM foo, for which we need to drop our lock
290 	 */
291 	MPTLOCK_2_CAMLOCK(mpt);
292 
293 	/*
294 	 * Create the device queue for our SIM(s).
295 	 */
296 	devq = cam_simq_alloc(maxq);
297 	if (devq == NULL) {
298 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
299 		error = ENOMEM;
300 		goto cleanup;
301 	}
302 
303 	/*
304 	 * Construct our SIM entry.
305 	 */
306 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
307 	    mpt->unit, 1, maxq, devq);
308 	if (mpt->sim == NULL) {
309 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
310 		cam_simq_free(devq);
311 		error = ENOMEM;
312 		goto cleanup;
313 	}
314 
315 	/*
316 	 * Register exactly this bus.
317 	 */
318 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
319 		mpt_prt(mpt, "Bus registration Failed!\n");
320 		error = ENOMEM;
321 		goto cleanup;
322 	}
323 
324 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
325 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
326 		mpt_prt(mpt, "Unable to allocate Path!\n");
327 		error = ENOMEM;
328 		goto cleanup;
329 	}
330 
331 	/*
332 	 * Only register a second bus for RAID physical
333 	 * devices if the controller supports RAID.
334 	 */
335 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
336 		CAMLOCK_2_MPTLOCK(mpt);
337 		return (0);
338 	}
339 
340 	/*
341 	 * Create a "bus" to export all hidden disks to CAM.
342 	 */
343 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
344 	    mpt->unit, 1, maxq, devq);
345 	if (mpt->phydisk_sim == NULL) {
346 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
347 		error = ENOMEM;
348 		goto cleanup;
349 	}
350 
351 	/*
352 	 * Register this bus.
353 	 */
354 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
355 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
356 		error = ENOMEM;
357 		goto cleanup;
358 	}
359 
360 	if (xpt_create_path(&mpt->phydisk_path, NULL,
361 	    cam_sim_path(mpt->phydisk_sim),
362 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
363 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
364 		error = ENOMEM;
365 		goto cleanup;
366 	}
367 	CAMLOCK_2_MPTLOCK(mpt);
368 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
369 	return (0);
370 
371 cleanup:
372 	CAMLOCK_2_MPTLOCK(mpt);
373 cleanup0:
374 	mpt_cam_detach(mpt);
375 	return (error);
376 }
377 
378 /*
379  * Read FC configuration information
380  */
381 static int
382 mpt_read_config_info_fc(struct mpt_softc *mpt)
383 {
384 	char *topology = NULL;
385 	int rv;
386 
387 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
388 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
389 	if (rv) {
390 		return (-1);
391 	}
392 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
393 		 mpt->mpt_fcport_page0.Header.PageVersion,
394 		 mpt->mpt_fcport_page0.Header.PageLength,
395 		 mpt->mpt_fcport_page0.Header.PageNumber,
396 		 mpt->mpt_fcport_page0.Header.PageType);
397 
398 
399 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
400 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
401 	if (rv) {
402 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
403 		return (-1);
404 	}
405 
406 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
407 
408 	switch (mpt->mpt_fcport_page0.Flags &
409 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
410 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
411 		mpt->mpt_fcport_speed = 0;
412 		topology = "<NO LOOP>";
413 		break;
414 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
415 		topology = "N-Port";
416 		break;
417 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
418 		topology = "NL-Port";
419 		break;
420 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
421 		topology = "F-Port";
422 		break;
423 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
424 		topology = "FL-Port";
425 		break;
426 	default:
427 		mpt->mpt_fcport_speed = 0;
428 		topology = "?";
429 		break;
430 	}
431 
432 	mpt_lprt(mpt, MPT_PRT_INFO,
433 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
434 	    "Speed %u-Gbit\n", topology,
435 	    mpt->mpt_fcport_page0.WWNN.High,
436 	    mpt->mpt_fcport_page0.WWNN.Low,
437 	    mpt->mpt_fcport_page0.WWPN.High,
438 	    mpt->mpt_fcport_page0.WWPN.Low,
439 	    mpt->mpt_fcport_speed);
440 #if __FreeBSD_version >= 500000
441 	{
442 		struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
443 		struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
444 
445 		snprintf(mpt->scinfo.fc.wwnn,
446 		    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
447 		    mpt->mpt_fcport_page0.WWNN.High,
448 		    mpt->mpt_fcport_page0.WWNN.Low);
449 
450 		snprintf(mpt->scinfo.fc.wwpn,
451 		    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
452 		    mpt->mpt_fcport_page0.WWPN.High,
453 		    mpt->mpt_fcport_page0.WWPN.Low);
454 
455 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
456 		       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
457 		       "World Wide Node Name");
458 
459 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
460 		       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
461 		       "World Wide Port Name");
462 
463 	}
464 #endif
465 	return (0);
466 }
467 
468 /*
469  * Set FC configuration information.
470  */
471 static int
472 mpt_set_initial_config_fc(struct mpt_softc *mpt)
473 {
474 
475 	CONFIG_PAGE_FC_PORT_1 fc;
476 	U32 fl;
477 	int r, doit = 0;
478 	int role;
479 
480 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
481 	    &fc.Header, FALSE, 5000);
482 	if (r) {
483 		mpt_prt(mpt, "failed to read FC page 1 header\n");
484 		return (mpt_fc_reset_link(mpt, 1));
485 	}
486 
487 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
488 	    &fc.Header, sizeof (fc), FALSE, 5000);
489 	if (r) {
490 		mpt_prt(mpt, "failed to read FC page 1\n");
491 		return (mpt_fc_reset_link(mpt, 1));
492 	}
493 
494 	/*
495 	 * Check our flags to make sure we support the role we want.
496 	 */
497 	doit = 0;
498 	role = 0;
499 	fl = le32toh(fc.Flags);;
500 
501 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
502 		role |= MPT_ROLE_INITIATOR;
503 	}
504 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
505 		role |= MPT_ROLE_TARGET;
506 	}
507 
508 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
509 
510 	if (mpt->do_cfg_role == 0) {
511 		role = mpt->cfg_role;
512 	} else {
513 		mpt->do_cfg_role = 0;
514 	}
515 
516 	if (role != mpt->cfg_role) {
517 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
518 			if ((role & MPT_ROLE_INITIATOR) == 0) {
519 				mpt_prt(mpt, "adding initiator role\n");
520 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
521 				doit++;
522 			} else {
523 				mpt_prt(mpt, "keeping initiator role\n");
524 			}
525 		} else if (role & MPT_ROLE_INITIATOR) {
526 			mpt_prt(mpt, "removing initiator role\n");
527 			doit++;
528 		}
529 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
530 			if ((role & MPT_ROLE_TARGET) == 0) {
531 				mpt_prt(mpt, "adding target role\n");
532 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
533 				doit++;
534 			} else {
535 				mpt_prt(mpt, "keeping target role\n");
536 			}
537 		} else if (role & MPT_ROLE_TARGET) {
538 			mpt_prt(mpt, "removing target role\n");
539 			doit++;
540 		}
541 		mpt->role = mpt->cfg_role;
542 	}
543 
544 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
545 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
546 			mpt_prt(mpt, "adding OXID option\n");
547 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
548 			doit++;
549 		}
550 	}
551 
552 	if (doit) {
553 		fc.Flags = htole32(fl);
554 		r = mpt_write_cfg_page(mpt,
555 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
556 		    sizeof(fc), FALSE, 5000);
557 		if (r != 0) {
558 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
559 			return (0);
560 		}
561 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
562 		    "effect until next reboot or IOC reset\n");
563 	}
564 	return (0);
565 }
566 
567 /*
568  * Read SAS configuration information. Nothing to do yet.
569  */
570 static int
571 mpt_read_config_info_sas(struct mpt_softc *mpt)
572 {
573 	return (0);
574 }
575 
576 /*
577  * Set SAS configuration information. Nothing to do yet.
578  */
579 static int
580 mpt_set_initial_config_sas(struct mpt_softc *mpt)
581 {
582 	return (0);
583 }
584 
585 /*
586  * Read SCSI configuration information
587  */
588 static int
589 mpt_read_config_info_spi(struct mpt_softc *mpt)
590 {
591 	int rv, i;
592 
593 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
594 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
595 	if (rv) {
596 		return (-1);
597 	}
598 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
599 	    mpt->mpt_port_page0.Header.PageVersion,
600 	    mpt->mpt_port_page0.Header.PageLength,
601 	    mpt->mpt_port_page0.Header.PageNumber,
602 	    mpt->mpt_port_page0.Header.PageType);
603 
604 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
605 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
606 	if (rv) {
607 		return (-1);
608 	}
609 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
610 	    mpt->mpt_port_page1.Header.PageVersion,
611 	    mpt->mpt_port_page1.Header.PageLength,
612 	    mpt->mpt_port_page1.Header.PageNumber,
613 	    mpt->mpt_port_page1.Header.PageType);
614 
615 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
616 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
617 	if (rv) {
618 		return (-1);
619 	}
620 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
621 	    mpt->mpt_port_page2.Header.PageVersion,
622 	    mpt->mpt_port_page2.Header.PageLength,
623 	    mpt->mpt_port_page2.Header.PageNumber,
624 	    mpt->mpt_port_page2.Header.PageType);
625 
626 	for (i = 0; i < 16; i++) {
627 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
628 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
629 		if (rv) {
630 			return (-1);
631 		}
632 		mpt_lprt(mpt, MPT_PRT_DEBUG,
633 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
634 		    mpt->mpt_dev_page0[i].Header.PageVersion,
635 		    mpt->mpt_dev_page0[i].Header.PageLength,
636 		    mpt->mpt_dev_page0[i].Header.PageNumber,
637 		    mpt->mpt_dev_page0[i].Header.PageType);
638 
639 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
640 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
641 		if (rv) {
642 			return (-1);
643 		}
644 		mpt_lprt(mpt, MPT_PRT_DEBUG,
645 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
646 		    mpt->mpt_dev_page1[i].Header.PageVersion,
647 		    mpt->mpt_dev_page1[i].Header.PageLength,
648 		    mpt->mpt_dev_page1[i].Header.PageNumber,
649 		    mpt->mpt_dev_page1[i].Header.PageType);
650 	}
651 
652 	/*
653 	 * At this point, we don't *have* to fail. As long as we have
654 	 * valid config header information, we can (barely) lurch
655 	 * along.
656 	 */
657 
658 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
659 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
660 	if (rv) {
661 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
662 	} else {
663 		mpt_lprt(mpt, MPT_PRT_DEBUG,
664 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
665 		    mpt->mpt_port_page0.Capabilities,
666 		    mpt->mpt_port_page0.PhysicalInterface);
667 	}
668 
669 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
670 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
671 	if (rv) {
672 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
673 	} else {
674 		mpt_lprt(mpt, MPT_PRT_DEBUG,
675 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
676 		    mpt->mpt_port_page1.Configuration,
677 		    mpt->mpt_port_page1.OnBusTimerValue);
678 	}
679 
680 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
681 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
682 	if (rv) {
683 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
684 	} else {
685 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
686 		    "Port Page 2: Flags %x Settings %x\n",
687 		    mpt->mpt_port_page2.PortFlags,
688 		    mpt->mpt_port_page2.PortSettings);
689 		for (i = 0; i < 16; i++) {
690 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
691 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
692 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
693 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
694 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
695 		}
696 	}
697 
698 	for (i = 0; i < 16; i++) {
699 		rv = mpt_read_cur_cfg_page(mpt, i,
700 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
701 		    FALSE, 5000);
702 		if (rv) {
703 			mpt_prt(mpt,
704 			    "cannot read SPI Target %d Device Page 0\n", i);
705 			continue;
706 		}
707 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
708 		    "target %d page 0: Negotiated Params %x Information %x\n",
709 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
710 		    mpt->mpt_dev_page0[i].Information);
711 
712 		rv = mpt_read_cur_cfg_page(mpt, i,
713 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
714 		    FALSE, 5000);
715 		if (rv) {
716 			mpt_prt(mpt,
717 			    "cannot read SPI Target %d Device Page 1\n", i);
718 			continue;
719 		}
720 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
721 		    "target %d page 1: Requested Params %x Configuration %x\n",
722 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
723 		    mpt->mpt_dev_page1[i].Configuration);
724 	}
725 	return (0);
726 }
727 
728 /*
729  * Validate SPI configuration information.
730  *
731  * In particular, validate SPI Port Page 1.
732  */
733 static int
734 mpt_set_initial_config_spi(struct mpt_softc *mpt)
735 {
736 	int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
737 	int error;
738 
739 	mpt->mpt_disc_enable = 0xff;
740 	mpt->mpt_tag_enable = 0;
741 
742 	if (mpt->mpt_port_page1.Configuration != pp1val) {
743 		CONFIG_PAGE_SCSI_PORT_1 tmp;
744 
745 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
746 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
747 		tmp = mpt->mpt_port_page1;
748 		tmp.Configuration = pp1val;
749 		error = mpt_write_cur_cfg_page(mpt, 0,
750 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
751 		if (error) {
752 			return (-1);
753 		}
754 		error = mpt_read_cur_cfg_page(mpt, 0,
755 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
756 		if (error) {
757 			return (-1);
758 		}
759 		if (tmp.Configuration != pp1val) {
760 			mpt_prt(mpt,
761 			    "failed to reset SPI Port Page 1 Config value\n");
762 			return (-1);
763 		}
764 		mpt->mpt_port_page1 = tmp;
765 	}
766 
767 	/*
768 	 * The purpose of this exercise is to get
769 	 * all targets back to async/narrow.
770 	 *
771 	 * We skip this step if the BIOS has already negotiated
772 	 * speeds with the targets and does not require us to
773 	 * do Domain Validation.
774 	 */
775 	i = mpt->mpt_port_page2.PortSettings &
776 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
777 	j = mpt->mpt_port_page2.PortFlags &
778 	    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
779 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* &&
780 	    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) {
781 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
782 		    "honoring BIOS transfer negotiations\n");
783 	} else {
784 		for (i = 0; i < 16; i++) {
785 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
786 			mpt->mpt_dev_page1[i].Configuration = 0;
787 			(void) mpt_update_spi_config(mpt, i);
788 		}
789 	}
790 	return (0);
791 }
792 
793 int
794 mpt_cam_enable(struct mpt_softc *mpt)
795 {
796 	if (mpt->is_fc) {
797 		if (mpt_read_config_info_fc(mpt)) {
798 			return (EIO);
799 		}
800 		if (mpt_set_initial_config_fc(mpt)) {
801 			return (EIO);
802 		}
803 	} else if (mpt->is_sas) {
804 		if (mpt_read_config_info_sas(mpt)) {
805 			return (EIO);
806 		}
807 		if (mpt_set_initial_config_sas(mpt)) {
808 			return (EIO);
809 		}
810 	} else if (mpt->is_spi) {
811 		if (mpt_read_config_info_spi(mpt)) {
812 			return (EIO);
813 		}
814 		if (mpt_set_initial_config_spi(mpt)) {
815 			return (EIO);
816 		}
817 	}
818 	return (0);
819 }
820 
821 void
822 mpt_cam_ready(struct mpt_softc *mpt)
823 {
824 	/*
825 	 * If we're in target mode, hang out resources now
826 	 * so we don't cause the world to hang talking to us.
827 	 */
828 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
829 		/*
830 		 * Try to add some target command resources
831 		 */
832 		MPT_LOCK(mpt);
833 		if (mpt_add_target_commands(mpt) == FALSE) {
834 			mpt_prt(mpt, "failed to add target commands\n");
835 		}
836 		MPT_UNLOCK(mpt);
837 	}
838 }
839 
840 void
841 mpt_cam_detach(struct mpt_softc *mpt)
842 {
843 	mpt_handler_t handler;
844 
845 	mpt_terminate_recovery_thread(mpt);
846 
847 	handler.reply_handler = mpt_scsi_reply_handler;
848 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
849 			       scsi_io_handler_id);
850 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
851 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
852 			       scsi_tmf_handler_id);
853 	handler.reply_handler = mpt_fc_els_reply_handler;
854 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
855 			       fc_els_handler_id);
856 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
857 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
858 			       mpt->scsi_tgt_handler_id);
859 
860 	if (mpt->tmf_req != NULL) {
861 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
862 		mpt_free_request(mpt, mpt->tmf_req);
863 		mpt->tmf_req = NULL;
864 	}
865 
866 	if (mpt->sim != NULL) {
867 		MPTLOCK_2_CAMLOCK(mpt);
868 		xpt_free_path(mpt->path);
869 		xpt_bus_deregister(cam_sim_path(mpt->sim));
870 		cam_sim_free(mpt->sim, TRUE);
871 		mpt->sim = NULL;
872 		CAMLOCK_2_MPTLOCK(mpt);
873 	}
874 
875 	if (mpt->phydisk_sim != NULL) {
876 		MPTLOCK_2_CAMLOCK(mpt);
877 		xpt_free_path(mpt->phydisk_path);
878 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
879 		cam_sim_free(mpt->phydisk_sim, TRUE);
880 		mpt->phydisk_sim = NULL;
881 		CAMLOCK_2_MPTLOCK(mpt);
882 	}
883 }
884 
885 /* This routine is used after a system crash to dump core onto the swap device.
886  */
887 static void
888 mpt_poll(struct cam_sim *sim)
889 {
890 	struct mpt_softc *mpt;
891 
892 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
893 	MPT_LOCK(mpt);
894 	mpt_intr(mpt);
895 	MPT_UNLOCK(mpt);
896 }
897 
898 /*
899  * Watchdog timeout routine for SCSI requests.
900  */
901 static void
902 mpt_timeout(void *arg)
903 {
904 	union ccb	 *ccb;
905 	struct mpt_softc *mpt;
906 	request_t	 *req;
907 
908 	ccb = (union ccb *)arg;
909 	mpt = ccb->ccb_h.ccb_mpt_ptr;
910 
911 	MPT_LOCK(mpt);
912 	req = ccb->ccb_h.ccb_req_ptr;
913 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
914 	    req->serno, ccb, req->ccb);
915 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
916 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
917 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
918 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
919 		req->state |= REQ_STATE_TIMEDOUT;
920 		mpt_wakeup_recovery_thread(mpt);
921 	}
922 	MPT_UNLOCK(mpt);
923 }
924 
925 /*
926  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
927  *
928  * Takes a list of physical segments and builds the SGL for SCSI IO command
929  * and forwards the commard to the IOC after one last check that CAM has not
930  * aborted the transaction.
931  */
932 static void
933 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
934 {
935 	request_t *req, *trq;
936 	char *mpt_off;
937 	union ccb *ccb;
938 	struct mpt_softc *mpt;
939 	int seg, first_lim;
940 	uint32_t flags, nxt_off;
941 	void *sglp = NULL;
942 	MSG_REQUEST_HEADER *hdrp;
943 	SGE_SIMPLE64 *se;
944 	SGE_CHAIN64 *ce;
945 	int istgt = 0;
946 
947 	req = (request_t *)arg;
948 	ccb = req->ccb;
949 
950 	mpt = ccb->ccb_h.ccb_mpt_ptr;
951 	req = ccb->ccb_h.ccb_req_ptr;
952 
953 	hdrp = req->req_vbuf;
954 	mpt_off = req->req_vbuf;
955 
956 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
957 		error = EFBIG;
958 	}
959 
960 	if (error == 0) {
961 		switch (hdrp->Function) {
962 		case MPI_FUNCTION_SCSI_IO_REQUEST:
963 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
964 			istgt = 0;
965 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
966 			break;
967 		case MPI_FUNCTION_TARGET_ASSIST:
968 			istgt = 1;
969 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
970 			break;
971 		default:
972 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
973 			    hdrp->Function);
974 			error = EINVAL;
975 			break;
976 		}
977 	}
978 
979 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
980 		error = EFBIG;
981 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
982 		    nseg, mpt->max_seg_cnt);
983 	}
984 
985 bad:
986 	if (error != 0) {
987 		if (error != EFBIG && error != ENOMEM) {
988 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
989 		}
990 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
991 			cam_status status;
992 			mpt_freeze_ccb(ccb);
993 			if (error == EFBIG) {
994 				status = CAM_REQ_TOO_BIG;
995 			} else if (error == ENOMEM) {
996 				if (mpt->outofbeer == 0) {
997 					mpt->outofbeer = 1;
998 					xpt_freeze_simq(mpt->sim, 1);
999 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1000 					    "FREEZEQ\n");
1001 				}
1002 				status = CAM_REQUEUE_REQ;
1003 			} else {
1004 				status = CAM_REQ_CMP_ERR;
1005 			}
1006 			mpt_set_ccb_status(ccb, status);
1007 		}
1008 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1009 			request_t *cmd_req =
1010 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1011 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1012 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1013 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1014 		}
1015 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1016 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1017 		xpt_done(ccb);
1018 		CAMLOCK_2_MPTLOCK(mpt);
1019 		mpt_free_request(mpt, req);
1020 		MPTLOCK_2_CAMLOCK(mpt);
1021 		return;
1022 	}
1023 
1024 	/*
1025 	 * No data to transfer?
1026 	 * Just make a single simple SGL with zero length.
1027 	 */
1028 
1029 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1030 		int tidx = ((char *)sglp) - mpt_off;
1031 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1032 	}
1033 
1034 	if (nseg == 0) {
1035 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1036 		MPI_pSGE_SET_FLAGS(se1,
1037 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1038 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1039 		goto out;
1040 	}
1041 
1042 
1043 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1044 	if (istgt == 0) {
1045 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1046 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1047 		}
1048 	} else {
1049 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1050 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1051 		}
1052 	}
1053 
1054 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1055 		bus_dmasync_op_t op;
1056 		if (istgt == 0) {
1057 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1058 				op = BUS_DMASYNC_PREREAD;
1059 			} else {
1060 				op = BUS_DMASYNC_PREWRITE;
1061 			}
1062 		} else {
1063 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1064 				op = BUS_DMASYNC_PREWRITE;
1065 			} else {
1066 				op = BUS_DMASYNC_PREREAD;
1067 			}
1068 		}
1069 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1070 	}
1071 
1072 	/*
1073 	 * Okay, fill in what we can at the end of the command frame.
1074 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1075 	 * the command frame.
1076 	 *
1077 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1078 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1079 	 * that.
1080 	 */
1081 
1082 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1083 		first_lim = nseg;
1084 	} else {
1085 		/*
1086 		 * Leave room for CHAIN element
1087 		 */
1088 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1089 	}
1090 
1091 	se = (SGE_SIMPLE64 *) sglp;
1092 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1093 		uint32_t tf;
1094 
1095 		memset(se, 0, sizeof (*se));
1096 		se->Address.Low = dm_segs->ds_addr;
1097 		if (sizeof(bus_addr_t) > 4) {
1098 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
1099 		}
1100 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1101 		tf = flags;
1102 		if (seg == first_lim - 1) {
1103 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1104 		}
1105 		if (seg == nseg - 1) {
1106 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1107 				MPI_SGE_FLAGS_END_OF_BUFFER;
1108 		}
1109 		MPI_pSGE_SET_FLAGS(se, tf);
1110 	}
1111 
1112 	if (seg == nseg) {
1113 		goto out;
1114 	}
1115 
1116 	/*
1117 	 * Tell the IOC where to find the first chain element.
1118 	 */
1119 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1120 	nxt_off = MPT_RQSL(mpt);
1121 	trq = req;
1122 
1123 	/*
1124 	 * Make up the rest of the data segments out of a chain element
1125 	 * (contiained in the current request frame) which points to
1126 	 * SIMPLE64 elements in the next request frame, possibly ending
1127 	 * with *another* chain element (if there's more).
1128 	 */
1129 	while (seg < nseg) {
1130 		int this_seg_lim;
1131 		uint32_t tf, cur_off;
1132 		bus_addr_t chain_list_addr;
1133 
1134 		/*
1135 		 * Point to the chain descriptor. Note that the chain
1136 		 * descriptor is at the end of the *previous* list (whether
1137 		 * chain or simple).
1138 		 */
1139 		ce = (SGE_CHAIN64 *) se;
1140 
1141 		/*
1142 		 * Before we change our current pointer, make  sure we won't
1143 		 * overflow the request area with this frame. Note that we
1144 		 * test against 'greater than' here as it's okay in this case
1145 		 * to have next offset be just outside the request area.
1146 		 */
1147 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1148 			nxt_off = MPT_REQUEST_AREA;
1149 			goto next_chain;
1150 		}
1151 
1152 		/*
1153 		 * Set our SGE element pointer to the beginning of the chain
1154 		 * list and update our next chain list offset.
1155 		 */
1156 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1157 		cur_off = nxt_off;
1158 		nxt_off += MPT_RQSL(mpt);
1159 
1160 		/*
1161 		 * Now initialized the chain descriptor.
1162 		 */
1163 		memset(ce, 0, sizeof (*ce));
1164 
1165 		/*
1166 		 * Get the physical address of the chain list.
1167 		 */
1168 		chain_list_addr = trq->req_pbuf;
1169 		chain_list_addr += cur_off;
1170 		if (sizeof (bus_addr_t) > 4) {
1171 			ce->Address.High =
1172 			    (uint32_t) ((uint64_t)chain_list_addr >> 32);
1173 		}
1174 		ce->Address.Low = (uint32_t) chain_list_addr;
1175 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1176 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1177 
1178 		/*
1179 		 * If we have more than a frame's worth of segments left,
1180 		 * set up the chain list to have the last element be another
1181 		 * chain descriptor.
1182 		 */
1183 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1184 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1185 			/*
1186 			 * The length of the chain is the length in bytes of the
1187 			 * number of segments plus the next chain element.
1188 			 *
1189 			 * The next chain descriptor offset is the length,
1190 			 * in words, of the number of segments.
1191 			 */
1192 			ce->Length = (this_seg_lim - seg) *
1193 			    sizeof (SGE_SIMPLE64);
1194 			ce->NextChainOffset = ce->Length >> 2;
1195 			ce->Length += sizeof (SGE_CHAIN64);
1196 		} else {
1197 			this_seg_lim = nseg;
1198 			ce->Length = (this_seg_lim - seg) *
1199 			    sizeof (SGE_SIMPLE64);
1200 		}
1201 
1202 		/*
1203 		 * Fill in the chain list SGE elements with our segment data.
1204 		 *
1205 		 * If we're the last element in this chain list, set the last
1206 		 * element flag. If we're the completely last element period,
1207 		 * set the end of list and end of buffer flags.
1208 		 */
1209 		while (seg < this_seg_lim) {
1210 			memset(se, 0, sizeof (*se));
1211 			se->Address.Low = dm_segs->ds_addr;
1212 			if (sizeof (bus_addr_t) > 4) {
1213 				se->Address.High =
1214 				    ((uint64_t)dm_segs->ds_addr) >> 32;
1215 			}
1216 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1217 			tf = flags;
1218 			if (seg ==  this_seg_lim - 1) {
1219 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1220 			}
1221 			if (seg == nseg - 1) {
1222 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1223 					MPI_SGE_FLAGS_END_OF_BUFFER;
1224 			}
1225 			MPI_pSGE_SET_FLAGS(se, tf);
1226 			se++;
1227 			seg++;
1228 			dm_segs++;
1229 		}
1230 
1231     next_chain:
1232 		/*
1233 		 * If we have more segments to do and we've used up all of
1234 		 * the space in a request area, go allocate another one
1235 		 * and chain to that.
1236 		 */
1237 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1238 			request_t *nrq;
1239 
1240 			CAMLOCK_2_MPTLOCK(mpt);
1241 			nrq = mpt_get_request(mpt, FALSE);
1242 			MPTLOCK_2_CAMLOCK(mpt);
1243 
1244 			if (nrq == NULL) {
1245 				error = ENOMEM;
1246 				goto bad;
1247 			}
1248 
1249 			/*
1250 			 * Append the new request area on the tail of our list.
1251 			 */
1252 			if ((trq = req->chain) == NULL) {
1253 				req->chain = nrq;
1254 			} else {
1255 				while (trq->chain != NULL) {
1256 					trq = trq->chain;
1257 				}
1258 				trq->chain = nrq;
1259 			}
1260 			trq = nrq;
1261 			mpt_off = trq->req_vbuf;
1262 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1263 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1264 			}
1265 			nxt_off = 0;
1266 		}
1267 	}
1268 out:
1269 
1270 	/*
1271 	 * Last time we need to check if this CCB needs to be aborted.
1272 	 */
1273 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1274 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1275 			request_t *cmd_req =
1276 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1277 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1278 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1279 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1280 		}
1281 		mpt_prt(mpt,
1282 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1283 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1284 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1285 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1286 		}
1287 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1288 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1289 		xpt_done(ccb);
1290 		CAMLOCK_2_MPTLOCK(mpt);
1291 		mpt_free_request(mpt, req);
1292 		MPTLOCK_2_CAMLOCK(mpt);
1293 		return;
1294 	}
1295 
1296 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1297 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1298 		ccb->ccb_h.timeout_ch =
1299 			timeout(mpt_timeout, (caddr_t)ccb,
1300 				(ccb->ccb_h.timeout * hz) / 1000);
1301 	} else {
1302 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1303 	}
1304 	if (mpt->verbose > MPT_PRT_DEBUG) {
1305 		int nc = 0;
1306 		mpt_print_request(req->req_vbuf);
1307 		for (trq = req->chain; trq; trq = trq->chain) {
1308 			printf("  Additional Chain Area %d\n", nc++);
1309 			mpt_dump_sgl(trq->req_vbuf, 0);
1310 		}
1311 	}
1312 
1313 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1314 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1315 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1316 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1317 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1318 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1319 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1320 		} else {
1321 			tgt->state = TGT_STATE_MOVING_DATA;
1322 		}
1323 #else
1324 		tgt->state = TGT_STATE_MOVING_DATA;
1325 #endif
1326 	}
1327 	CAMLOCK_2_MPTLOCK(mpt);
1328 	mpt_send_cmd(mpt, req);
1329 	MPTLOCK_2_CAMLOCK(mpt);
1330 }
1331 
1332 static void
1333 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1334 {
1335 	request_t *req, *trq;
1336 	char *mpt_off;
1337 	union ccb *ccb;
1338 	struct mpt_softc *mpt;
1339 	int seg, first_lim;
1340 	uint32_t flags, nxt_off;
1341 	void *sglp = NULL;
1342 	MSG_REQUEST_HEADER *hdrp;
1343 	SGE_SIMPLE32 *se;
1344 	SGE_CHAIN32 *ce;
1345 	int istgt = 0;
1346 
1347 	req = (request_t *)arg;
1348 	ccb = req->ccb;
1349 
1350 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1351 	req = ccb->ccb_h.ccb_req_ptr;
1352 
1353 	hdrp = req->req_vbuf;
1354 	mpt_off = req->req_vbuf;
1355 
1356 
1357 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1358 		error = EFBIG;
1359 	}
1360 
1361 	if (error == 0) {
1362 		switch (hdrp->Function) {
1363 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1364 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1365 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1366 			break;
1367 		case MPI_FUNCTION_TARGET_ASSIST:
1368 			istgt = 1;
1369 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1370 			break;
1371 		default:
1372 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1373 			    hdrp->Function);
1374 			error = EINVAL;
1375 			break;
1376 		}
1377 	}
1378 
1379 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1380 		error = EFBIG;
1381 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1382 		    nseg, mpt->max_seg_cnt);
1383 	}
1384 
1385 bad:
1386 	if (error != 0) {
1387 		if (error != EFBIG && error != ENOMEM) {
1388 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1389 		}
1390 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1391 			cam_status status;
1392 			mpt_freeze_ccb(ccb);
1393 			if (error == EFBIG) {
1394 				status = CAM_REQ_TOO_BIG;
1395 			} else if (error == ENOMEM) {
1396 				if (mpt->outofbeer == 0) {
1397 					mpt->outofbeer = 1;
1398 					xpt_freeze_simq(mpt->sim, 1);
1399 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1400 					    "FREEZEQ\n");
1401 				}
1402 				status = CAM_REQUEUE_REQ;
1403 			} else {
1404 				status = CAM_REQ_CMP_ERR;
1405 			}
1406 			mpt_set_ccb_status(ccb, status);
1407 		}
1408 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1409 			request_t *cmd_req =
1410 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1411 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1412 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1413 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1414 		}
1415 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1416 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1417 		xpt_done(ccb);
1418 		CAMLOCK_2_MPTLOCK(mpt);
1419 		mpt_free_request(mpt, req);
1420 		MPTLOCK_2_CAMLOCK(mpt);
1421 		return;
1422 	}
1423 
1424 	/*
1425 	 * No data to transfer?
1426 	 * Just make a single simple SGL with zero length.
1427 	 */
1428 
1429 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1430 		int tidx = ((char *)sglp) - mpt_off;
1431 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1432 	}
1433 
1434 	if (nseg == 0) {
1435 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1436 		MPI_pSGE_SET_FLAGS(se1,
1437 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1438 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1439 		goto out;
1440 	}
1441 
1442 
1443 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1444 	if (istgt == 0) {
1445 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1446 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1447 		}
1448 	} else {
1449 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1450 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1451 		}
1452 	}
1453 
1454 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1455 		bus_dmasync_op_t op;
1456 		if (istgt) {
1457 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1458 				op = BUS_DMASYNC_PREREAD;
1459 			} else {
1460 				op = BUS_DMASYNC_PREWRITE;
1461 			}
1462 		} else {
1463 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1464 				op = BUS_DMASYNC_PREWRITE;
1465 			} else {
1466 				op = BUS_DMASYNC_PREREAD;
1467 			}
1468 		}
1469 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1470 	}
1471 
1472 	/*
1473 	 * Okay, fill in what we can at the end of the command frame.
1474 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1475 	 * the command frame.
1476 	 *
1477 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1478 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1479 	 * that.
1480 	 */
1481 
1482 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1483 		first_lim = nseg;
1484 	} else {
1485 		/*
1486 		 * Leave room for CHAIN element
1487 		 */
1488 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1489 	}
1490 
1491 	se = (SGE_SIMPLE32 *) sglp;
1492 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1493 		uint32_t tf;
1494 
1495 		memset(se, 0,sizeof (*se));
1496 		se->Address = dm_segs->ds_addr;
1497 
1498 
1499 
1500 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1501 		tf = flags;
1502 		if (seg == first_lim - 1) {
1503 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1504 		}
1505 		if (seg == nseg - 1) {
1506 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1507 				MPI_SGE_FLAGS_END_OF_BUFFER;
1508 		}
1509 		MPI_pSGE_SET_FLAGS(se, tf);
1510 	}
1511 
1512 	if (seg == nseg) {
1513 		goto out;
1514 	}
1515 
1516 	/*
1517 	 * Tell the IOC where to find the first chain element.
1518 	 */
1519 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1520 	nxt_off = MPT_RQSL(mpt);
1521 	trq = req;
1522 
1523 	/*
1524 	 * Make up the rest of the data segments out of a chain element
1525 	 * (contiained in the current request frame) which points to
1526 	 * SIMPLE32 elements in the next request frame, possibly ending
1527 	 * with *another* chain element (if there's more).
1528 	 */
1529 	while (seg < nseg) {
1530 		int this_seg_lim;
1531 		uint32_t tf, cur_off;
1532 		bus_addr_t chain_list_addr;
1533 
1534 		/*
1535 		 * Point to the chain descriptor. Note that the chain
1536 		 * descriptor is at the end of the *previous* list (whether
1537 		 * chain or simple).
1538 		 */
1539 		ce = (SGE_CHAIN32 *) se;
1540 
1541 		/*
1542 		 * Before we change our current pointer, make  sure we won't
1543 		 * overflow the request area with this frame. Note that we
1544 		 * test against 'greater than' here as it's okay in this case
1545 		 * to have next offset be just outside the request area.
1546 		 */
1547 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1548 			nxt_off = MPT_REQUEST_AREA;
1549 			goto next_chain;
1550 		}
1551 
1552 		/*
1553 		 * Set our SGE element pointer to the beginning of the chain
1554 		 * list and update our next chain list offset.
1555 		 */
1556 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1557 		cur_off = nxt_off;
1558 		nxt_off += MPT_RQSL(mpt);
1559 
1560 		/*
1561 		 * Now initialized the chain descriptor.
1562 		 */
1563 		memset(ce, 0, sizeof (*ce));
1564 
1565 		/*
1566 		 * Get the physical address of the chain list.
1567 		 */
1568 		chain_list_addr = trq->req_pbuf;
1569 		chain_list_addr += cur_off;
1570 
1571 
1572 
1573 		ce->Address = chain_list_addr;
1574 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1575 
1576 
1577 		/*
1578 		 * If we have more than a frame's worth of segments left,
1579 		 * set up the chain list to have the last element be another
1580 		 * chain descriptor.
1581 		 */
1582 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1583 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1584 			/*
1585 			 * The length of the chain is the length in bytes of the
1586 			 * number of segments plus the next chain element.
1587 			 *
1588 			 * The next chain descriptor offset is the length,
1589 			 * in words, of the number of segments.
1590 			 */
1591 			ce->Length = (this_seg_lim - seg) *
1592 			    sizeof (SGE_SIMPLE32);
1593 			ce->NextChainOffset = ce->Length >> 2;
1594 			ce->Length += sizeof (SGE_CHAIN32);
1595 		} else {
1596 			this_seg_lim = nseg;
1597 			ce->Length = (this_seg_lim - seg) *
1598 			    sizeof (SGE_SIMPLE32);
1599 		}
1600 
1601 		/*
1602 		 * Fill in the chain list SGE elements with our segment data.
1603 		 *
1604 		 * If we're the last element in this chain list, set the last
1605 		 * element flag. If we're the completely last element period,
1606 		 * set the end of list and end of buffer flags.
1607 		 */
1608 		while (seg < this_seg_lim) {
1609 			memset(se, 0, sizeof (*se));
1610 			se->Address = dm_segs->ds_addr;
1611 
1612 
1613 
1614 
1615 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1616 			tf = flags;
1617 			if (seg ==  this_seg_lim - 1) {
1618 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1619 			}
1620 			if (seg == nseg - 1) {
1621 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1622 					MPI_SGE_FLAGS_END_OF_BUFFER;
1623 			}
1624 			MPI_pSGE_SET_FLAGS(se, tf);
1625 			se++;
1626 			seg++;
1627 			dm_segs++;
1628 		}
1629 
1630     next_chain:
1631 		/*
1632 		 * If we have more segments to do and we've used up all of
1633 		 * the space in a request area, go allocate another one
1634 		 * and chain to that.
1635 		 */
1636 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1637 			request_t *nrq;
1638 
1639 			CAMLOCK_2_MPTLOCK(mpt);
1640 			nrq = mpt_get_request(mpt, FALSE);
1641 			MPTLOCK_2_CAMLOCK(mpt);
1642 
1643 			if (nrq == NULL) {
1644 				error = ENOMEM;
1645 				goto bad;
1646 			}
1647 
1648 			/*
1649 			 * Append the new request area on the tail of our list.
1650 			 */
1651 			if ((trq = req->chain) == NULL) {
1652 				req->chain = nrq;
1653 			} else {
1654 				while (trq->chain != NULL) {
1655 					trq = trq->chain;
1656 				}
1657 				trq->chain = nrq;
1658 			}
1659 			trq = nrq;
1660 			mpt_off = trq->req_vbuf;
1661 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1662 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1663 			}
1664 			nxt_off = 0;
1665 		}
1666 	}
1667 out:
1668 
1669 	/*
1670 	 * Last time we need to check if this CCB needs to be aborted.
1671 	 */
1672 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1673 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1674 			request_t *cmd_req =
1675 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1676 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1677 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1678 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1679 		}
1680 		mpt_prt(mpt,
1681 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1682 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1683 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1684 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1685 		}
1686 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1687 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1688 		xpt_done(ccb);
1689 		CAMLOCK_2_MPTLOCK(mpt);
1690 		mpt_free_request(mpt, req);
1691 		MPTLOCK_2_CAMLOCK(mpt);
1692 		return;
1693 	}
1694 
1695 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1696 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1697 		ccb->ccb_h.timeout_ch =
1698 			timeout(mpt_timeout, (caddr_t)ccb,
1699 				(ccb->ccb_h.timeout * hz) / 1000);
1700 	} else {
1701 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1702 	}
1703 	if (mpt->verbose > MPT_PRT_DEBUG) {
1704 		int nc = 0;
1705 		mpt_print_request(req->req_vbuf);
1706 		for (trq = req->chain; trq; trq = trq->chain) {
1707 			printf("  Additional Chain Area %d\n", nc++);
1708 			mpt_dump_sgl(trq->req_vbuf, 0);
1709 		}
1710 	}
1711 
1712 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1713 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1714 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1715 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1716 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1717 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1718 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1719 		} else {
1720 			tgt->state = TGT_STATE_MOVING_DATA;
1721 		}
1722 #else
1723 		tgt->state = TGT_STATE_MOVING_DATA;
1724 #endif
1725 	}
1726 	CAMLOCK_2_MPTLOCK(mpt);
1727 	mpt_send_cmd(mpt, req);
1728 	MPTLOCK_2_CAMLOCK(mpt);
1729 }
1730 
1731 static void
1732 mpt_start(struct cam_sim *sim, union ccb *ccb)
1733 {
1734 	request_t *req;
1735 	struct mpt_softc *mpt;
1736 	MSG_SCSI_IO_REQUEST *mpt_req;
1737 	struct ccb_scsiio *csio = &ccb->csio;
1738 	struct ccb_hdr *ccbh = &ccb->ccb_h;
1739 	bus_dmamap_callback_t *cb;
1740 	target_id_t tgt;
1741 	int raid_passthru;
1742 
1743 	/* Get the pointer for the physical addapter */
1744 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1745 	raid_passthru = (sim == mpt->phydisk_sim);
1746 
1747 	CAMLOCK_2_MPTLOCK(mpt);
1748 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
1749 		if (mpt->outofbeer == 0) {
1750 			mpt->outofbeer = 1;
1751 			xpt_freeze_simq(mpt->sim, 1);
1752 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1753 		}
1754 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1755 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
1756 		MPTLOCK_2_CAMLOCK(mpt);
1757 		xpt_done(ccb);
1758 		return;
1759 	}
1760 #ifdef	INVARIANTS
1761 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
1762 #endif
1763 	MPTLOCK_2_CAMLOCK(mpt);
1764 
1765 	if (sizeof (bus_addr_t) > 4) {
1766 		cb = mpt_execute_req_a64;
1767 	} else {
1768 		cb = mpt_execute_req;
1769 	}
1770 
1771 	/*
1772 	 * Link the ccb and the request structure so we can find
1773 	 * the other knowing either the request or the ccb
1774 	 */
1775 	req->ccb = ccb;
1776 	ccb->ccb_h.ccb_req_ptr = req;
1777 
1778 	/* Now we build the command for the IOC */
1779 	mpt_req = req->req_vbuf;
1780 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
1781 
1782 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1783 	if (raid_passthru) {
1784 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1785 		CAMLOCK_2_MPTLOCK(mpt);
1786 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1787 			MPTLOCK_2_CAMLOCK(mpt);
1788 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1789 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
1790 			xpt_done(ccb);
1791 			return;
1792 		}
1793 		MPTLOCK_2_CAMLOCK(mpt);
1794 		mpt_req->Bus = 0;	/* we never set bus here */
1795 	} else {
1796 		tgt = ccb->ccb_h.target_id;
1797 		mpt_req->Bus = 0;	/* XXX */
1798 
1799 	}
1800 	mpt_req->SenseBufferLength =
1801 		(csio->sense_len < MPT_SENSE_SIZE) ?
1802 		 csio->sense_len : MPT_SENSE_SIZE;
1803 
1804 	/*
1805 	 * We use the message context to find the request structure when we
1806 	 * Get the command completion interrupt from the IOC.
1807 	 */
1808 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1809 
1810 	/* Which physical device to do the I/O on */
1811 	mpt_req->TargetID = tgt;
1812 
1813 	/* We assume a single level LUN type */
1814 	if (ccb->ccb_h.target_lun >= 256) {
1815 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1816 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1817 	} else {
1818 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1819 	}
1820 
1821 	/* Set the direction of the transfer */
1822 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1823 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1824 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1825 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1826 	} else {
1827 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1828 	}
1829 
1830 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1831 		switch(ccb->csio.tag_action) {
1832 		case MSG_HEAD_OF_Q_TAG:
1833 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
1834 			break;
1835 		case MSG_ACA_TASK:
1836 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
1837 			break;
1838 		case MSG_ORDERED_Q_TAG:
1839 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
1840 			break;
1841 		case MSG_SIMPLE_Q_TAG:
1842 		default:
1843 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1844 			break;
1845 		}
1846 	} else {
1847 		if (mpt->is_fc || mpt->is_sas) {
1848 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1849 		} else {
1850 			/* XXX No such thing for a target doing packetized. */
1851 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
1852 		}
1853 	}
1854 
1855 	if (mpt->is_spi) {
1856 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1857 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
1858 		}
1859 	}
1860 
1861 	/* Copy the scsi command block into place */
1862 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1863 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
1864 	} else {
1865 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
1866 	}
1867 
1868 	mpt_req->CDBLength = csio->cdb_len;
1869 	mpt_req->DataLength = csio->dxfer_len;
1870 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
1871 
1872 	/*
1873 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
1874 	 */
1875 	if (mpt->verbose == MPT_PRT_DEBUG) {
1876 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
1877 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
1878 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
1879 		if (mpt_req->Control != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
1880 			mpt_prtc(mpt, "(%s %u byte%s ",
1881 			    (mpt_req->Control == MPI_SCSIIO_CONTROL_READ)?
1882 			    "read" : "write",  csio->dxfer_len,
1883 			    (csio->dxfer_len == 1)? ")" : "s)");
1884 		}
1885 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
1886 		    ccb->ccb_h.target_lun, req, req->serno);
1887 	}
1888 
1889 	/*
1890 	 * If we have any data to send with this command map it into bus space.
1891 	 */
1892 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1893 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1894 			/*
1895 			 * We've been given a pointer to a single buffer.
1896 			 */
1897 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1898 				/*
1899 				 * Virtual address that needs to translated into
1900 				 * one or more physical address ranges.
1901 				 */
1902 				int error;
1903 				int s = splsoftvm();
1904 				error = bus_dmamap_load(mpt->buffer_dmat,
1905 				    req->dmap, csio->data_ptr, csio->dxfer_len,
1906 				    cb, req, 0);
1907 				splx(s);
1908 				if (error == EINPROGRESS) {
1909 					/*
1910 					 * So as to maintain ordering,
1911 					 * freeze the controller queue
1912 					 * until our mapping is
1913 					 * returned.
1914 					 */
1915 					xpt_freeze_simq(mpt->sim, 1);
1916 					ccbh->status |= CAM_RELEASE_SIMQ;
1917 				}
1918 			} else {
1919 				/*
1920 				 * We have been given a pointer to single
1921 				 * physical buffer.
1922 				 */
1923 				struct bus_dma_segment seg;
1924 				seg.ds_addr =
1925 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1926 				seg.ds_len = csio->dxfer_len;
1927 				(*cb)(req, &seg, 1, 0);
1928 			}
1929 		} else {
1930 			/*
1931 			 * We have been given a list of addresses.
1932 			 * This case could be easily supported but they are not
1933 			 * currently generated by the CAM subsystem so there
1934 			 * is no point in wasting the time right now.
1935 			 */
1936 			struct bus_dma_segment *segs;
1937 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1938 				(*cb)(req, NULL, 0, EFAULT);
1939 			} else {
1940 				/* Just use the segments provided */
1941 				segs = (struct bus_dma_segment *)csio->data_ptr;
1942 				(*cb)(req, segs, csio->sglist_cnt, 0);
1943 			}
1944 		}
1945 	} else {
1946 		(*cb)(req, NULL, 0, 0);
1947 	}
1948 }
1949 
1950 static int
1951 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
1952     int sleep_ok)
1953 {
1954 	int   error;
1955 	uint16_t status;
1956 	uint8_t response;
1957 
1958 	error = mpt_scsi_send_tmf(mpt,
1959 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
1960 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
1961 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1962 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
1963 	    0,	/* XXX How do I get the channel ID? */
1964 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
1965 	    lun != CAM_LUN_WILDCARD ? lun : 0,
1966 	    0, sleep_ok);
1967 
1968 	if (error != 0) {
1969 		/*
1970 		 * mpt_scsi_send_tmf hard resets on failure, so no
1971 		 * need to do so here.
1972 		 */
1973 		mpt_prt(mpt,
1974 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
1975 		return (EIO);
1976 	}
1977 
1978 	/* Wait for bus reset to be processed by the IOC. */
1979 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
1980 	    REQ_STATE_DONE, sleep_ok, 5000);
1981 
1982 	status = mpt->tmf_req->IOCStatus;
1983 	response = mpt->tmf_req->ResponseCode;
1984 	mpt->tmf_req->state = REQ_STATE_FREE;
1985 
1986 	if (error) {
1987 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
1988 		    "Resetting controller.\n");
1989 		mpt_reset(mpt, TRUE);
1990 		return (ETIMEDOUT);
1991 	}
1992 
1993 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1994 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
1995 		    "Resetting controller.\n", status);
1996 		mpt_reset(mpt, TRUE);
1997 		return (EIO);
1998 	}
1999 
2000 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2001 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2002 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2003 		    "Resetting controller.\n", response);
2004 		mpt_reset(mpt, TRUE);
2005 		return (EIO);
2006 	}
2007 	return (0);
2008 }
2009 
2010 static int
2011 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2012 {
2013 	int r = 0;
2014 	request_t *req;
2015 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2016 
2017  	req = mpt_get_request(mpt, FALSE);
2018 	if (req == NULL) {
2019 		return (ENOMEM);
2020 	}
2021 	fc = req->req_vbuf;
2022 	memset(fc, 0, sizeof(*fc));
2023 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2024 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2025 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2026 	mpt_send_cmd(mpt, req);
2027 	if (dowait) {
2028 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2029 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2030 		if (r == 0) {
2031 			mpt_free_request(mpt, req);
2032 		}
2033 	}
2034 	return (r);
2035 }
2036 
2037 static int
2038 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2039 	      MSG_EVENT_NOTIFY_REPLY *msg)
2040 {
2041 
2042 	switch(msg->Event & 0xFF) {
2043 	case MPI_EVENT_UNIT_ATTENTION:
2044 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2045 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
2046 		break;
2047 
2048 	case MPI_EVENT_IOC_BUS_RESET:
2049 		/* We generated a bus reset */
2050 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2051 		    (msg->Data[0] >> 8) & 0xff);
2052 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2053 		break;
2054 
2055 	case MPI_EVENT_EXT_BUS_RESET:
2056 		/* Someone else generated a bus reset */
2057 		mpt_prt(mpt, "External Bus Reset Detected\n");
2058 		/*
2059 		 * These replies don't return EventData like the MPI
2060 		 * spec says they do
2061 		 */
2062 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2063 		break;
2064 
2065 	case MPI_EVENT_RESCAN:
2066 		/*
2067 		 * In general this means a device has been added to the loop.
2068 		 */
2069 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
2070 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
2071 		break;
2072 
2073 	case MPI_EVENT_LINK_STATUS_CHANGE:
2074 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2075 		    (msg->Data[1] >> 8) & 0xff,
2076 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
2077 		break;
2078 
2079 	case MPI_EVENT_LOOP_STATE_CHANGE:
2080 		switch ((msg->Data[0] >> 16) & 0xff) {
2081 		case 0x01:
2082 			mpt_prt(mpt,
2083 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2084 			    "(Loop Initialization)\n",
2085 			    (msg->Data[1] >> 8) & 0xff,
2086 			    (msg->Data[0] >> 8) & 0xff,
2087 			    (msg->Data[0]     ) & 0xff);
2088 			switch ((msg->Data[0] >> 8) & 0xff) {
2089 			case 0xF7:
2090 				if ((msg->Data[0] & 0xff) == 0xF7) {
2091 					mpt_prt(mpt, "Device needs AL_PA\n");
2092 				} else {
2093 					mpt_prt(mpt, "Device %02x doesn't like "
2094 					    "FC performance\n",
2095 					    msg->Data[0] & 0xFF);
2096 				}
2097 				break;
2098 			case 0xF8:
2099 				if ((msg->Data[0] & 0xff) == 0xF7) {
2100 					mpt_prt(mpt, "Device had loop failure "
2101 					    "at its receiver prior to acquiring"
2102 					    " AL_PA\n");
2103 				} else {
2104 					mpt_prt(mpt, "Device %02x detected loop"
2105 					    " failure at its receiver\n",
2106 					    msg->Data[0] & 0xFF);
2107 				}
2108 				break;
2109 			default:
2110 				mpt_prt(mpt, "Device %02x requests that device "
2111 				    "%02x reset itself\n",
2112 				    msg->Data[0] & 0xFF,
2113 				    (msg->Data[0] >> 8) & 0xFF);
2114 				break;
2115 			}
2116 			break;
2117 		case 0x02:
2118 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2119 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2120 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2121 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2122 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2123 			break;
2124 		case 0x03:
2125 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2126 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2127 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2128 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
2129 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
2130 			break;
2131 		default:
2132 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2133 			    "FC event (%02x %02x %02x)\n",
2134 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2135 			    (msg->Data[0] >> 16) & 0xff, /* Event */
2136 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2137 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2138 		}
2139 		break;
2140 
2141 	case MPI_EVENT_LOGOUT:
2142 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2143 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
2144 		break;
2145 	case MPI_EVENT_EVENT_CHANGE:
2146 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2147 		    "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
2148 		break;
2149 	case MPI_EVENT_QUEUE_FULL:
2150 	{
2151 		PTR_EVENT_DATA_QUEUE_FULL pqf =
2152 		    (PTR_EVENT_DATA_QUEUE_FULL) msg->Data;
2153 		mpt_prt(mpt, "QUEUE_FULL: Bus 0x%02x Target 0x%02x Depth %d\n",
2154 		    pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2155 		break;
2156 	}
2157 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2158 	{
2159 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2160 		    "mpt_cam_event: SAS_DEVICE_STATUS_CHANGE\n");
2161 		break;
2162 	}
2163 	case MPI_EVENT_SAS_SES:
2164 	{
2165 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2166 		    "mpt_cam_event: MPI_EVENT_SAS_SES\n");
2167 		break;
2168 	}
2169 	default:
2170 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2171 		    msg->Event & 0xFF);
2172 		return (0);
2173 	}
2174 	return (1);
2175 }
2176 
2177 /*
2178  * Reply path for all SCSI I/O requests, called from our
2179  * interrupt handler by extracting our handler index from
2180  * the MsgContext field of the reply from the IOC.
2181  *
2182  * This routine is optimized for the common case of a
2183  * completion without error.  All exception handling is
2184  * offloaded to non-inlined helper routines to minimize
2185  * cache footprint.
2186  */
2187 static int
2188 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2189     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2190 {
2191 	MSG_SCSI_IO_REQUEST *scsi_req;
2192 	union ccb *ccb;
2193 	target_id_t tgt;
2194 
2195 	if (req->state == REQ_STATE_FREE) {
2196 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2197 		return (TRUE);
2198 	}
2199 
2200 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2201 	ccb = req->ccb;
2202 	if (ccb == NULL) {
2203 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2204 		    req, req->serno);
2205 		return (TRUE);
2206 	}
2207 
2208 	tgt = scsi_req->TargetID;
2209 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2210 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2211 
2212 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2213 		bus_dmasync_op_t op;
2214 
2215 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2216 			op = BUS_DMASYNC_POSTREAD;
2217 		else
2218 			op = BUS_DMASYNC_POSTWRITE;
2219 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2220 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2221 	}
2222 
2223 	if (reply_frame == NULL) {
2224 		/*
2225 		 * Context only reply, completion without error status.
2226 		 */
2227 		ccb->csio.resid = 0;
2228 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2229 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2230 	} else {
2231 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2232 	}
2233 
2234 	if (mpt->outofbeer) {
2235 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2236 		mpt->outofbeer = 0;
2237 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2238 	}
2239 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2240 		struct scsi_inquiry_data *iq =
2241 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2242 		if (scsi_req->Function ==
2243 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2244 			/*
2245 			 * Fake out the device type so that only the
2246 			 * pass-thru device will attach.
2247 			 */
2248 			iq->device &= ~0x1F;
2249 			iq->device |= T_NODEVICE;
2250 		}
2251 	}
2252 	if (mpt->verbose == MPT_PRT_DEBUG) {
2253 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2254 		    req, req->serno);
2255 	}
2256 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2257 	MPTLOCK_2_CAMLOCK(mpt);
2258 	xpt_done(ccb);
2259 	CAMLOCK_2_MPTLOCK(mpt);
2260 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2261 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2262 	} else {
2263 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2264 		    req, req->serno);
2265 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2266 	}
2267 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2268 	    ("CCB req needed wakeup"));
2269 #ifdef	INVARIANTS
2270 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2271 #endif
2272 	mpt_free_request(mpt, req);
2273 	return (TRUE);
2274 }
2275 
2276 static int
2277 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2278     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2279 {
2280 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2281 
2282 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2283 #ifdef	INVARIANTS
2284 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2285 #endif
2286 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2287 	/* Record IOC Status and Response Code of TMF for any waiters. */
2288 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2289 	req->ResponseCode = tmf_reply->ResponseCode;
2290 
2291 	mpt_lprt(mpt, MPT_PRT_INFO, "TMF complete: req %p:%u status 0x%x\n",
2292 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2293 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2294 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2295 		req->state |= REQ_STATE_DONE;
2296 		wakeup(req);
2297 	} else {
2298 		mpt->tmf_req->state = REQ_STATE_FREE;
2299 	}
2300 	return (TRUE);
2301 }
2302 
2303 /*
2304  * XXX: Move to definitions file
2305  */
2306 #define	ELS	0x22
2307 #define	FC4LS	0x32
2308 #define	ABTS	0x81
2309 #define	BA_ACC	0x84
2310 
2311 #define	LS_RJT	0x01
2312 #define	LS_ACC	0x02
2313 #define	PLOGI	0x03
2314 #define	LOGO	0x05
2315 #define SRR	0x14
2316 #define PRLI	0x20
2317 #define PRLO	0x21
2318 #define ADISC	0x52
2319 #define RSCN	0x61
2320 
2321 static void
2322 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2323     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2324 {
2325 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2326 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2327 
2328 	/*
2329 	 * We are going to reuse the ELS request to send this response back.
2330 	 */
2331 	rsp = &tmp;
2332 	memset(rsp, 0, sizeof(*rsp));
2333 
2334 #ifdef	USE_IMMEDIATE_LINK_DATA
2335 	/*
2336 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2337 	 */
2338 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2339 #endif
2340 	rsp->RspLength = length;
2341 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2342 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2343 
2344 	/*
2345 	 * Copy over information from the original reply frame to
2346 	 * it's correct place in the response.
2347 	 */
2348 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2349 
2350 	/*
2351 	 * And now copy back the temporary area to the original frame.
2352 	 */
2353 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2354 	rsp = req->req_vbuf;
2355 
2356 #ifdef	USE_IMMEDIATE_LINK_DATA
2357 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2358 #else
2359 {
2360 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2361 	bus_addr_t paddr = req->req_pbuf;
2362 	paddr += MPT_RQSL(mpt);
2363 
2364 	se->FlagsLength =
2365 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2366 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2367 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2368 		MPI_SGE_FLAGS_END_OF_LIST	|
2369 		MPI_SGE_FLAGS_END_OF_BUFFER;
2370 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2371 	se->FlagsLength |= (length);
2372 	se->Address = (uint32_t) paddr;
2373 }
2374 #endif
2375 
2376 	/*
2377 	 * Send it on...
2378 	 */
2379 	mpt_send_cmd(mpt, req);
2380 }
2381 
2382 static int
2383 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2384     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2385 {
2386 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2387 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2388 	U8 rctl;
2389 	U8 type;
2390 	U8 cmd;
2391 	U16 status = le16toh(reply_frame->IOCStatus);
2392 	U32 *elsbuf;
2393 	int ioindex;
2394 	int do_refresh = TRUE;
2395 
2396 #ifdef	INVARIANTS
2397 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2398 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2399 	    req, req->serno, rp->Function));
2400 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2401 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2402 	} else {
2403 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2404 	}
2405 #endif
2406 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2407 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2408 	    req, req->serno, reply_frame, reply_frame->Function);
2409 
2410 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2411 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2412 		    status, reply_frame->Function);
2413 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2414 			/*
2415 			 * XXX: to get around shutdown issue
2416 			 */
2417 			mpt->disabled = 1;
2418 			return (TRUE);
2419 		}
2420 		return (TRUE);
2421 	}
2422 
2423 	/*
2424 	 * If the function of a link service response, we recycle the
2425 	 * response to be a refresh for a new link service request.
2426 	 *
2427 	 * The request pointer is bogus in this case and we have to fetch
2428 	 * it based upon the TransactionContext.
2429 	 */
2430 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2431 		/* Freddie Uncle Charlie Katie */
2432 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2433 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2434 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2435 				break;
2436 			}
2437 
2438 		KASSERT(ioindex < mpt->els_cmds_allocated,
2439 		    ("can't find my mommie!"));
2440 
2441 		/* remove from active list as we're going to re-post it */
2442 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2443 		req->state &= ~REQ_STATE_QUEUED;
2444 		req->state |= REQ_STATE_DONE;
2445 		mpt_fc_post_els(mpt, req, ioindex);
2446 		return (TRUE);
2447 	}
2448 
2449 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2450 		/* remove from active list as we're done */
2451 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2452 		req->state &= ~REQ_STATE_QUEUED;
2453 		req->state |= REQ_STATE_DONE;
2454 		if (req->state & REQ_STATE_TIMEDOUT) {
2455 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2456 			    "Sync Primitive Send Completed After Timeout\n");
2457 			mpt_free_request(mpt, req);
2458 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2459 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2460 			    "Async Primitive Send Complete\n");
2461 			mpt_free_request(mpt, req);
2462 		} else {
2463 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2464 			    "Sync Primitive Send Complete- Waking Waiter\n");
2465 			wakeup(req);
2466 		}
2467 		return (TRUE);
2468 	}
2469 
2470 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2471 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2472 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2473 		    rp->MsgLength, rp->MsgFlags);
2474 		return (TRUE);
2475 	}
2476 
2477 	if (rp->MsgLength <= 5) {
2478 		/*
2479 		 * This is just a ack of an original ELS buffer post
2480 		 */
2481 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2482 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2483 		return (TRUE);
2484 	}
2485 
2486 
2487 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2488 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2489 
2490 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2491 	cmd = be32toh(elsbuf[0]) >> 24;
2492 
2493 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2494 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2495 		return (TRUE);
2496 	}
2497 
2498 	ioindex = le32toh(rp->TransactionContext);
2499 	req = mpt->els_cmd_ptrs[ioindex];
2500 
2501 	if (rctl == ELS && type == 1) {
2502 		switch (cmd) {
2503 		case PRLI:
2504 			/*
2505 			 * Send back a PRLI ACC
2506 			 */
2507 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2508 			    le32toh(rp->Wwn.PortNameHigh),
2509 			    le32toh(rp->Wwn.PortNameLow));
2510 			elsbuf[0] = htobe32(0x02100014);
2511 			elsbuf[1] |= htobe32(0x00000100);
2512 			elsbuf[4] = htobe32(0x00000002);
2513 			if (mpt->role & MPT_ROLE_TARGET)
2514 				elsbuf[4] |= htobe32(0x00000010);
2515 			if (mpt->role & MPT_ROLE_INITIATOR)
2516 				elsbuf[4] |= htobe32(0x00000020);
2517 			/* remove from active list as we're done */
2518 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2519 			req->state &= ~REQ_STATE_QUEUED;
2520 			req->state |= REQ_STATE_DONE;
2521 			mpt_fc_els_send_response(mpt, req, rp, 20);
2522 			do_refresh = FALSE;
2523 			break;
2524 		case PRLO:
2525 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2526 			elsbuf[0] = htobe32(0x02100014);
2527 			elsbuf[1] = htobe32(0x08000100);
2528 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2529 			    le32toh(rp->Wwn.PortNameHigh),
2530 			    le32toh(rp->Wwn.PortNameLow));
2531 			/* remove from active list as we're done */
2532 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2533 			req->state &= ~REQ_STATE_QUEUED;
2534 			req->state |= REQ_STATE_DONE;
2535 			mpt_fc_els_send_response(mpt, req, rp, 20);
2536 			do_refresh = FALSE;
2537 			break;
2538 		default:
2539 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2540 			break;
2541 		}
2542 	} else if (rctl == ABTS && type == 0) {
2543 		uint16_t rx_id = le16toh(rp->Rxid);
2544 		uint16_t ox_id = le16toh(rp->Oxid);
2545 		request_t *tgt_req = NULL;
2546 
2547 		mpt_prt(mpt,
2548 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2549 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2550 		    le32toh(rp->Wwn.PortNameLow));
2551 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2552 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2553 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2554 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2555 		} else {
2556 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2557 		}
2558 		if (tgt_req) {
2559 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2560 			uint8_t *vbuf;
2561 			union ccb *ccb = tgt->ccb;
2562 			uint32_t ct_id;
2563 
2564 			vbuf = tgt_req->req_vbuf;
2565 			vbuf += MPT_RQSL(mpt);
2566 
2567 			/*
2568 			 * Check to make sure we have the correct command
2569 			 * The reply descriptor in the target state should
2570 			 * should contain an IoIndex that should match the
2571 			 * RX_ID.
2572 			 *
2573 			 * It'd be nice to have OX_ID to crosscheck with
2574 			 * as well.
2575 			 */
2576 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2577 
2578 			if (ct_id != rx_id) {
2579 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2580 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2581 				    rx_id, ct_id);
2582 				goto skip;
2583 			}
2584 
2585 			ccb = tgt->ccb;
2586 			if (ccb) {
2587 				mpt_prt(mpt,
2588 				    "CCB (%p): lun %u flags %x status %x\n",
2589 				    ccb, ccb->ccb_h.target_lun,
2590 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2591 			}
2592 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2593 			    "%x nxfers %x\n", tgt->state,
2594 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2595 			    tgt->nxfers);
2596   skip:
2597 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2598 				mpt_prt(mpt, "unable to start TargetAbort\n");
2599 			}
2600 		} else {
2601 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2602 		}
2603 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2604 		elsbuf[0] = htobe32(0);
2605 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2606 		elsbuf[2] = htobe32(0x000ffff);
2607 		/*
2608 		 * Dork with the reply frame so that the reponse to it
2609 		 * will be correct.
2610 		 */
2611 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2612 		/* remove from active list as we're done */
2613 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2614 		req->state &= ~REQ_STATE_QUEUED;
2615 		req->state |= REQ_STATE_DONE;
2616 		mpt_fc_els_send_response(mpt, req, rp, 12);
2617 		do_refresh = FALSE;
2618 	} else {
2619 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2620 	}
2621 	if (do_refresh == TRUE) {
2622 		/* remove from active list as we're done */
2623 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2624 		req->state &= ~REQ_STATE_QUEUED;
2625 		req->state |= REQ_STATE_DONE;
2626 		mpt_fc_post_els(mpt, req, ioindex);
2627 	}
2628 	return (TRUE);
2629 }
2630 
2631 /*
2632  * Clean up all SCSI Initiator personality state in response
2633  * to a controller reset.
2634  */
2635 static void
2636 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2637 {
2638 	/*
2639 	 * The pending list is already run down by
2640 	 * the generic handler.  Perform the same
2641 	 * operation on the timed out request list.
2642 	 */
2643 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
2644 				   MPI_IOCSTATUS_INVALID_STATE);
2645 
2646 	/*
2647 	 * XXX: We need to repost ELS and Target Command Buffers?
2648 	 */
2649 
2650 	/*
2651 	 * Inform the XPT that a bus reset has occurred.
2652 	 */
2653 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
2654 }
2655 
2656 /*
2657  * Parse additional completion information in the reply
2658  * frame for SCSI I/O requests.
2659  */
2660 static int
2661 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
2662 			     MSG_DEFAULT_REPLY *reply_frame)
2663 {
2664 	union ccb *ccb;
2665 	MSG_SCSI_IO_REPLY *scsi_io_reply;
2666 	u_int ioc_status;
2667 	u_int sstate;
2668 	u_int loginfo;
2669 
2670 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
2671 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
2672 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
2673 		("MPT SCSI I/O Handler called with incorrect reply type"));
2674 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
2675 		("MPT SCSI I/O Handler called with continuation reply"));
2676 
2677 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
2678 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
2679 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
2680 	ioc_status &= MPI_IOCSTATUS_MASK;
2681 	sstate = scsi_io_reply->SCSIState;
2682 
2683 	ccb = req->ccb;
2684 	ccb->csio.resid =
2685 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
2686 
2687 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
2688 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
2689 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2690 		ccb->csio.sense_resid =
2691 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
2692 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
2693 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
2694 	}
2695 
2696 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
2697 		/*
2698 		 * Tag messages rejected, but non-tagged retry
2699 		 * was successful.
2700 XXXX
2701 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
2702 		 */
2703 	}
2704 
2705 	switch(ioc_status) {
2706 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2707 		/*
2708 		 * XXX
2709 		 * Linux driver indicates that a zero
2710 		 * transfer length with this error code
2711 		 * indicates a CRC error.
2712 		 *
2713 		 * No need to swap the bytes for checking
2714 		 * against zero.
2715 		 */
2716 		if (scsi_io_reply->TransferCount == 0) {
2717 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2718 			break;
2719 		}
2720 		/* FALLTHROUGH */
2721 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
2722 	case MPI_IOCSTATUS_SUCCESS:
2723 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
2724 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
2725 			/*
2726 			 * Status was never returned for this transaction.
2727 			 */
2728 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
2729 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
2730 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
2731 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
2732 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
2733 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
2734 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
2735 
2736 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
2737 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2738 		} else
2739 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2740 		break;
2741 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
2742 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
2743 		break;
2744 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
2745 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2746 		break;
2747 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2748 		/*
2749 		 * Since selection timeouts and "device really not
2750 		 * there" are grouped into this error code, report
2751 		 * selection timeout.  Selection timeouts are
2752 		 * typically retried before giving up on the device
2753 		 * whereas "device not there" errors are considered
2754 		 * unretryable.
2755 		 */
2756 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2757 		break;
2758 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2759 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
2760 		break;
2761 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
2762 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
2763 		break;
2764 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
2765 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
2766 		break;
2767 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2768 		ccb->ccb_h.status = CAM_UA_TERMIO;
2769 		break;
2770 	case MPI_IOCSTATUS_INVALID_STATE:
2771 		/*
2772 		 * The IOC has been reset.  Emulate a bus reset.
2773 		 */
2774 		/* FALLTHROUGH */
2775 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
2776 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2777 		break;
2778 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
2779 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
2780 		/*
2781 		 * Don't clobber any timeout status that has
2782 		 * already been set for this transaction.  We
2783 		 * want the SCSI layer to be able to differentiate
2784 		 * between the command we aborted due to timeout
2785 		 * and any innocent bystanders.
2786 		 */
2787 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
2788 			break;
2789 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
2790 		break;
2791 
2792 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2793 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
2794 		break;
2795 	case MPI_IOCSTATUS_BUSY:
2796 		mpt_set_ccb_status(ccb, CAM_BUSY);
2797 		break;
2798 	case MPI_IOCSTATUS_INVALID_FUNCTION:
2799 	case MPI_IOCSTATUS_INVALID_SGL:
2800 	case MPI_IOCSTATUS_INTERNAL_ERROR:
2801 	case MPI_IOCSTATUS_INVALID_FIELD:
2802 	default:
2803 		/* XXX
2804 		 * Some of the above may need to kick
2805 		 * of a recovery action!!!!
2806 		 */
2807 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2808 		break;
2809 	}
2810 
2811 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2812 		mpt_freeze_ccb(ccb);
2813 	}
2814 
2815 	return (TRUE);
2816 }
2817 
2818 static void
2819 mpt_action(struct cam_sim *sim, union ccb *ccb)
2820 {
2821 	struct mpt_softc *mpt;
2822 	struct ccb_trans_settings *cts;
2823 	target_id_t tgt;
2824 	lun_id_t lun;
2825 	int raid_passthru;
2826 
2827 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2828 
2829 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
2830 	KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
2831 	raid_passthru = (sim == mpt->phydisk_sim);
2832 
2833 	tgt = ccb->ccb_h.target_id;
2834 	lun = ccb->ccb_h.target_lun;
2835 	if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ &&
2836 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
2837 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
2838 		CAMLOCK_2_MPTLOCK(mpt);
2839 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2840 			MPTLOCK_2_CAMLOCK(mpt);
2841 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2842 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2843 			xpt_done(ccb);
2844 			return;
2845 		}
2846 		MPTLOCK_2_CAMLOCK(mpt);
2847 	}
2848 	ccb->ccb_h.ccb_mpt_ptr = mpt;
2849 
2850 	switch (ccb->ccb_h.func_code) {
2851 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2852 		/*
2853 		 * Do a couple of preliminary checks...
2854 		 */
2855 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2856 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2857 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2858 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2859 				break;
2860 			}
2861 		}
2862 		/* Max supported CDB length is 16 bytes */
2863 		/* XXX Unless we implement the new 32byte message type */
2864 		if (ccb->csio.cdb_len >
2865 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
2866 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2867 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2868 			break;
2869 		}
2870 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2871 		mpt_start(sim, ccb);
2872 		return;
2873 
2874 	case XPT_RESET_BUS:
2875 	case XPT_RESET_DEV:
2876 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2877 			ccb->ccb_h.func_code == XPT_RESET_BUS ?
2878 			"XPT_RESET_BUS\n" : "XPT_RESET_DEV\n");
2879 
2880 		CAMLOCK_2_MPTLOCK(mpt);
2881 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
2882 		MPTLOCK_2_CAMLOCK(mpt);
2883 
2884 		/*
2885 		 * mpt_bus_reset is always successful in that it
2886 		 * will fall back to a hard reset should a bus
2887 		 * reset attempt fail.
2888 		 */
2889 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2890 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2891 		break;
2892 
2893 	case XPT_ABORT:
2894 	{
2895 		union ccb *accb = ccb->cab.abort_ccb;
2896 		CAMLOCK_2_MPTLOCK(mpt);
2897 		switch (accb->ccb_h.func_code) {
2898 		case XPT_ACCEPT_TARGET_IO:
2899 		case XPT_IMMED_NOTIFY:
2900 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
2901 			break;
2902 		case XPT_CONT_TARGET_IO:
2903 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
2904 			ccb->ccb_h.status = CAM_UA_ABORT;
2905 			break;
2906 		case XPT_SCSI_IO:
2907 			ccb->ccb_h.status = CAM_UA_ABORT;
2908 			break;
2909 		default:
2910 			ccb->ccb_h.status = CAM_REQ_INVALID;
2911 			break;
2912 		}
2913 		MPTLOCK_2_CAMLOCK(mpt);
2914 		break;
2915 	}
2916 
2917 #ifdef	CAM_NEW_TRAN_CODE
2918 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
2919 #else
2920 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
2921 #endif
2922 #define	DP_DISC_ENABLE	0x1
2923 #define	DP_DISC_DISABL	0x2
2924 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
2925 
2926 #define	DP_TQING_ENABLE	0x4
2927 #define	DP_TQING_DISABL	0x8
2928 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
2929 
2930 #define	DP_WIDE		0x10
2931 #define	DP_NARROW	0x20
2932 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
2933 
2934 #define	DP_SYNC		0x40
2935 
2936 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2937 	{
2938 #ifdef	CAM_NEW_TRAN_CODE
2939 		struct ccb_trans_settings_scsi *scsi;
2940 		struct ccb_trans_settings_spi *spi;
2941 #endif
2942 		uint8_t dval;
2943 		u_int period;
2944 		u_int offset;
2945 		int i, j;
2946 
2947 		cts = &ccb->cts;
2948 
2949 		if (mpt->is_fc || mpt->is_sas) {
2950 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2951 			break;
2952 		}
2953 
2954 		/*
2955 		 * Skip attempting settings on RAID volume disks.
2956 		 * Other devices on the bus get the normal treatment.
2957 		 */
2958 		if (mpt->phydisk_sim && raid_passthru == 0 &&
2959 		    mpt_is_raid_volume(mpt, tgt) != 0) {
2960 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
2961 			    "skipping transfer settings for RAID volumes\n");
2962 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2963 			break;
2964 		}
2965 
2966 		i = mpt->mpt_port_page2.PortSettings &
2967 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
2968 		j = mpt->mpt_port_page2.PortFlags &
2969 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
2970 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
2971 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
2972 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
2973 			    "honoring BIOS transfer negotiations\n");
2974 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2975 			break;
2976 		}
2977 
2978 		dval = 0;
2979 		period = 0;
2980 		offset = 0;
2981 
2982 #ifndef	CAM_NEW_TRAN_CODE
2983 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
2984 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
2985 			    DP_DISC_ENABLE : DP_DISC_DISABL;
2986 		}
2987 
2988 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
2989 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
2990 			    DP_TQING_ENABLE : DP_TQING_DISABL;
2991 		}
2992 
2993 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
2994 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
2995 		}
2996 
2997 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2998 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
2999 			dval |= DP_SYNC;
3000 			period = cts->sync_period;
3001 			offset = cts->sync_offset;
3002 		}
3003 #else
3004 		scsi = &cts->proto_specific.scsi;
3005 		spi = &cts->xport_specific.spi;
3006 
3007 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3008 			dval |= (spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3009 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3010 		}
3011 
3012 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3013 			dval |= (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3014 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3015 		}
3016 
3017 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3018 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3019 			    DP_WIDE : DP_NARROW;
3020 		}
3021 
3022 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
3023 		    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
3024 		    (spi->sync_period && spi->sync_offset)) {
3025 			dval |= DP_SYNC;
3026 			period = spi->sync_period;
3027 			offset = spi->sync_offset;
3028 		}
3029 #endif
3030 		CAMLOCK_2_MPTLOCK(mpt);
3031 		if (dval & DP_DISC_ENABLE) {
3032 			mpt->mpt_disc_enable |= (1 << tgt);
3033 		} else if (dval & DP_DISC_DISABL) {
3034 			mpt->mpt_disc_enable &= ~(1 << tgt);
3035 		}
3036 		if (dval & DP_TQING_ENABLE) {
3037 			mpt->mpt_tag_enable |= (1 << tgt);
3038 		} else if (dval & DP_TQING_DISABL) {
3039 			mpt->mpt_tag_enable &= ~(1 << tgt);
3040 		}
3041 		if (dval & DP_WIDTH) {
3042 			mpt_setwidth(mpt, tgt, 1);
3043 		}
3044 		if (dval & DP_SYNC) {
3045 			mpt_setsync(mpt, tgt, period, offset);
3046 		}
3047 
3048 		if (mpt_update_spi_config(mpt, tgt)) {
3049 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3050 		} else {
3051 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3052 		}
3053 		MPTLOCK_2_CAMLOCK(mpt);
3054 		break;
3055 	}
3056 	case XPT_GET_TRAN_SETTINGS:
3057 		cts = &ccb->cts;
3058 		if (mpt->is_fc) {
3059 #ifndef	CAM_NEW_TRAN_CODE
3060 			/*
3061 			 * a lot of normal SCSI things don't make sense.
3062 			 */
3063 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3064 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3065 			/*
3066 			 * How do you measure the width of a high
3067 			 * speed serial bus? Well, in bytes.
3068 			 *
3069 			 * Offset and period make no sense, though, so we set
3070 			 * (above) a 'base' transfer speed to be gigabit.
3071 			 */
3072 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3073 #else
3074 			struct ccb_trans_settings_fc *fc =
3075 			    &cts->xport_specific.fc;
3076 
3077 			cts->protocol = PROTO_SCSI;
3078 			cts->protocol_version = SCSI_REV_2;
3079 			cts->transport = XPORT_FC;
3080 			cts->transport_version = 0;
3081 
3082 			fc->valid = CTS_FC_VALID_SPEED;
3083 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
3084 			/* XXX: need a port database for each target */
3085 #endif
3086 		} else if (mpt->is_sas) {
3087 #ifndef	CAM_NEW_TRAN_CODE
3088 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3089 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3090 			/*
3091 			 * How do you measure the width of a high
3092 			 * speed serial bus? Well, in bytes.
3093 			 *
3094 			 * Offset and period make no sense, though, so we set
3095 			 * (above) a 'base' transfer speed to be gigabit.
3096 			 */
3097 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3098 #else
3099 			struct ccb_trans_settings_sas *sas =
3100 			    &cts->xport_specific.sas;
3101 
3102 			cts->protocol = PROTO_SCSI;
3103 			cts->protocol_version = SCSI_REV_3;
3104 			cts->transport = XPORT_SAS;
3105 			cts->transport_version = 0;
3106 
3107 			sas->valid = CTS_SAS_VALID_SPEED;
3108 			sas->bitrate = 300000;	/* XXX: Default 3Gbps */
3109 #endif
3110 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3111 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3112 			break;
3113 		}
3114 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3115 		break;
3116 
3117 	case XPT_CALC_GEOMETRY:
3118 	{
3119 		struct ccb_calc_geometry *ccg;
3120 
3121 		ccg = &ccb->ccg;
3122 		if (ccg->block_size == 0) {
3123 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3124 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3125 			break;
3126 		}
3127 		mpt_calc_geometry(ccg, /*extended*/1);
3128 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3129 		break;
3130 	}
3131 	case XPT_PATH_INQ:		/* Path routing inquiry */
3132 	{
3133 		struct ccb_pathinq *cpi = &ccb->cpi;
3134 
3135 		cpi->version_num = 1;
3136 		cpi->target_sprt = 0;
3137 		cpi->hba_eng_cnt = 0;
3138 		cpi->max_target = mpt->mpt_max_devices - 1;
3139 		/*
3140 		 * XXX: FC cards report MAX_DEVICES of 512- but we
3141 		 * XXX: seem to hang when going higher than 255.
3142 		 */
3143 		if (cpi->max_target > 255)
3144 			cpi->max_target = 255;
3145 		/*
3146 		 * XXX: VMware ESX reports > 16 devices and then dies
3147 		 * XXX: when we probe.
3148 		 */
3149 		if (mpt->is_spi && cpi->max_target > 15)
3150 			cpi->max_target = 15;
3151 		cpi->max_lun = 7;
3152 		cpi->initiator_id = mpt->mpt_ini_id;
3153 
3154 		cpi->bus_id = cam_sim_bus(sim);
3155 		/*
3156 		 * Actual speed for each device varies.
3157 		 *
3158 		 * The base speed is the speed of the underlying connection.
3159 		 * This is strictly determined for SPI (async, narrow). If
3160 		 * link is up for Fibre Channel, then speed can be gotten
3161 		 * from that.
3162 		 */
3163 		if (mpt->is_fc) {
3164 			cpi->hba_misc = PIM_NOBUSRESET;
3165 			cpi->base_transfer_speed =
3166 			    mpt->mpt_fcport_speed * 100000;
3167 			cpi->hba_inquiry = PI_TAG_ABLE;
3168 		} else if (mpt->is_sas) {
3169 			cpi->hba_misc = PIM_NOBUSRESET;
3170 			cpi->base_transfer_speed = 300000;
3171 			cpi->hba_inquiry = PI_TAG_ABLE;
3172 		} else {
3173 			cpi->hba_misc = PIM_SEQSCAN;
3174 			cpi->base_transfer_speed = 3300;
3175 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3176 		}
3177 
3178 		/*
3179 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3180 		 * wide, restrict it to one lun and have it *not* be a bus
3181 		 * that can have a SCSI bus reset.
3182 		 */
3183 		if (raid_passthru) {
3184 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3185 			cpi->initiator_id = cpi->max_target + 1;
3186 			cpi->max_lun = 0;
3187 			cpi->hba_misc |= PIM_NOBUSRESET;
3188 		}
3189 
3190 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3191 			cpi->hba_misc |= PIM_NOINITIATOR;
3192 		}
3193 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3194 			cpi->target_sprt =
3195 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3196 		} else {
3197 			cpi->target_sprt = 0;
3198 		}
3199 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3200 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3201 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3202 		cpi->unit_number = cam_sim_unit(sim);
3203 		cpi->ccb_h.status = CAM_REQ_CMP;
3204 		break;
3205 	}
3206 	case XPT_EN_LUN:		/* Enable LUN as a target */
3207 	{
3208 		int result;
3209 
3210 		CAMLOCK_2_MPTLOCK(mpt);
3211 		if (ccb->cel.enable)
3212 			result = mpt_enable_lun(mpt,
3213 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3214 		else
3215 			result = mpt_disable_lun(mpt,
3216 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3217 		MPTLOCK_2_CAMLOCK(mpt);
3218 		if (result == 0) {
3219 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3220 		} else {
3221 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3222 		}
3223 		break;
3224 	}
3225 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3226 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3227 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3228 	{
3229 		tgt_resource_t *trtp;
3230 		lun_id_t lun = ccb->ccb_h.target_lun;
3231 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3232 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3233 		ccb->ccb_h.flags = 0;
3234 
3235 		if (lun == CAM_LUN_WILDCARD) {
3236 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3237 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3238 				break;
3239 			}
3240 			trtp = &mpt->trt_wildcard;
3241 		} else if (lun >= MPT_MAX_LUNS) {
3242 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3243 			break;
3244 		} else {
3245 			trtp = &mpt->trt[lun];
3246 		}
3247 		CAMLOCK_2_MPTLOCK(mpt);
3248 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3249 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3250 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3251 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3252 			    sim_links.stqe);
3253 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3254 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3255 			    "Put FREE INOT lun %d\n", lun);
3256 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3257 			    sim_links.stqe);
3258 		} else {
3259 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3260 		}
3261 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3262 		MPTLOCK_2_CAMLOCK(mpt);
3263 		return;
3264 	}
3265 	case XPT_CONT_TARGET_IO:
3266 		CAMLOCK_2_MPTLOCK(mpt);
3267 		mpt_target_start_io(mpt, ccb);
3268 		MPTLOCK_2_CAMLOCK(mpt);
3269 		return;
3270 
3271 	default:
3272 		ccb->ccb_h.status = CAM_REQ_INVALID;
3273 		break;
3274 	}
3275 	xpt_done(ccb);
3276 }
3277 
3278 static int
3279 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3280 {
3281 #ifdef	CAM_NEW_TRAN_CODE
3282 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3283 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3284 #endif
3285 	target_id_t tgt;
3286 	uint8_t dval, pval, oval;
3287 	int rv;
3288 
3289 	if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3290 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3291 			return (-1);
3292 		}
3293 	} else {
3294 		tgt = cts->ccb_h.target_id;
3295 	}
3296 
3297 	/*
3298 	 * XXX: We aren't looking Port Page 2 BIOS settings here.
3299 	 * XXX: For goal settings, we pick the max from port page 0
3300 	 *
3301 	 * For current settings we read the current settings out from
3302 	 * device page 0 for that target.
3303 	 */
3304 	if (IS_CURRENT_SETTINGS(cts)) {
3305 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3306 		dval = 0;
3307 
3308 		CAMLOCK_2_MPTLOCK(mpt);
3309 		tmp = mpt->mpt_dev_page0[tgt];
3310 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3311 		    sizeof(tmp), FALSE, 5000);
3312 		if (rv) {
3313 			MPTLOCK_2_CAMLOCK(mpt);
3314 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3315 			return (rv);
3316 		}
3317 		MPTLOCK_2_CAMLOCK(mpt);
3318 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3319 		    DP_WIDE : DP_NARROW;
3320 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3321 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3322 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3323 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3324 		oval = (tmp.NegotiatedParameters >> 16) & 0xff;
3325 		pval = (tmp.NegotiatedParameters >>  8) & 0xff;
3326 		mpt->mpt_dev_page0[tgt] = tmp;
3327 	} else {
3328 		/*
3329 		 * XXX: Just make theoretical maximum.
3330 		 */
3331 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE;
3332 		oval = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
3333 		pval = (mpt->mpt_port_page0.Capabilities >>  8) & 0xff;
3334 	}
3335 #ifndef	CAM_NEW_TRAN_CODE
3336 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3337 	if (dval & DP_DISC_ENABLE) {
3338 		cts->flags |= CCB_TRANS_DISC_ENB;
3339 	}
3340 	if (dval & DP_TQING_ENABLE) {
3341 		cts->flags |= CCB_TRANS_TAG_ENB;
3342 	}
3343 	if (dval & DP_WIDE) {
3344 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3345 	} else {
3346 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3347 	}
3348 	cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
3349 	    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3350 	if (oval) {
3351 		cts->sync_period = pval;
3352 		cts->sync_offset = oval;
3353 		cts->valid |=
3354 		    CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID;
3355 	}
3356 #else
3357 	cts->protocol = PROTO_SCSI;
3358 	cts->protocol_version = SCSI_REV_2;
3359 	cts->transport = XPORT_SPI;
3360 	cts->transport_version = 2;
3361 
3362 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
3363 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
3364 	if (dval & DP_DISC_ENABLE) {
3365 		spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3366 	}
3367 	if (dval & DP_TQING_ENABLE) {
3368 		scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3369 	}
3370 	if (oval && pval) {
3371 		spi->sync_offset = oval;
3372 		spi->sync_period = pval;
3373 		spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3374 		spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3375 	}
3376 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3377 	if (dval & DP_WIDE) {
3378 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3379 	} else {
3380 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3381 	}
3382 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3383 		scsi->valid = CTS_SCSI_VALID_TQ;
3384 		spi->valid |= CTS_SPI_VALID_DISC;
3385 	} else {
3386 		scsi->valid = 0;
3387 	}
3388 #endif
3389 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3390 	    "mpt_get_spi_settings[%d]: %s 0x%x period 0x%x offset %d\n", tgt,
3391 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3392 	return (0);
3393 }
3394 
3395 static void
3396 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3397 {
3398 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3399 
3400 	ptr = &mpt->mpt_dev_page1[tgt];
3401 	if (onoff) {
3402 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3403 	} else {
3404 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3405 	}
3406 }
3407 
3408 static void
3409 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3410 {
3411 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3412 
3413 	ptr = &mpt->mpt_dev_page1[tgt];
3414 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3415 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3416 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3417 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3418 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3419 	ptr->RequestedParameters |= (period << 8) | (offset << 16);
3420 	if (period < 0xa) {
3421 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3422 	}
3423 	if (period < 0x9) {
3424 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3425 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3426 	}
3427 }
3428 
3429 static int
3430 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3431 {
3432 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3433 	int rv;
3434 
3435 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3436 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3437 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3438 	tmp = mpt->mpt_dev_page1[tgt];
3439 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3440 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3441 	if (rv) {
3442 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3443 		return (-1);
3444 	}
3445 	return (0);
3446 }
3447 
3448 static void
3449 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3450 {
3451 #if __FreeBSD_version >= 500000
3452 	cam_calc_geometry(ccg, extended);
3453 #else
3454 	uint32_t size_mb;
3455 	uint32_t secs_per_cylinder;
3456 
3457 	if (ccg->block_size == 0) {
3458 		ccg->ccb_h.status = CAM_REQ_INVALID;
3459 		return;
3460 	}
3461 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3462 	if (size_mb > 1024 && extended) {
3463 		ccg->heads = 255;
3464 		ccg->secs_per_track = 63;
3465 	} else {
3466 		ccg->heads = 64;
3467 		ccg->secs_per_track = 32;
3468 	}
3469 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3470 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3471 	ccg->ccb_h.status = CAM_REQ_CMP;
3472 #endif
3473 }
3474 
3475 /****************************** Timeout Recovery ******************************/
3476 static int
3477 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3478 {
3479 	int error;
3480 
3481 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3482 	    &mpt->recovery_thread, /*flags*/0,
3483 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3484 	return (error);
3485 }
3486 
3487 static void
3488 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3489 {
3490 	if (mpt->recovery_thread == NULL) {
3491 		return;
3492 	}
3493 	mpt->shutdwn_recovery = 1;
3494 	wakeup(mpt);
3495 	/*
3496 	 * Sleep on a slightly different location
3497 	 * for this interlock just for added safety.
3498 	 */
3499 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3500 }
3501 
3502 static void
3503 mpt_recovery_thread(void *arg)
3504 {
3505 	struct mpt_softc *mpt;
3506 
3507 #if __FreeBSD_version >= 500000
3508 	mtx_lock(&Giant);
3509 #endif
3510 	mpt = (struct mpt_softc *)arg;
3511 	MPT_LOCK(mpt);
3512 	for (;;) {
3513 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3514 			if (mpt->shutdwn_recovery == 0) {
3515 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3516 			}
3517 		}
3518 		if (mpt->shutdwn_recovery != 0) {
3519 			break;
3520 		}
3521 		mpt_recover_commands(mpt);
3522 	}
3523 	mpt->recovery_thread = NULL;
3524 	wakeup(&mpt->recovery_thread);
3525 	MPT_UNLOCK(mpt);
3526 #if __FreeBSD_version >= 500000
3527 	mtx_unlock(&Giant);
3528 #endif
3529 	kthread_exit(0);
3530 }
3531 
3532 static int
3533 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3534     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3535 {
3536 	MSG_SCSI_TASK_MGMT *tmf_req;
3537 	int		    error;
3538 
3539 	/*
3540 	 * Wait for any current TMF request to complete.
3541 	 * We're only allowed to issue one TMF at a time.
3542 	 */
3543 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3544 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3545 	if (error != 0) {
3546 		mpt_reset(mpt, TRUE);
3547 		return (ETIMEDOUT);
3548 	}
3549 
3550 	mpt_assign_serno(mpt, mpt->tmf_req);
3551 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3552 
3553 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3554 	memset(tmf_req, 0, sizeof(*tmf_req));
3555 	tmf_req->TargetID = target;
3556 	tmf_req->Bus = channel;
3557 	tmf_req->ChainOffset = 0;
3558 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3559 	tmf_req->Reserved = 0;
3560 	tmf_req->TaskType = type;
3561 	tmf_req->Reserved1 = 0;
3562 	tmf_req->MsgFlags = flags;
3563 	tmf_req->MsgContext =
3564 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3565 	memset(&tmf_req->LUN, 0,
3566 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
3567 	if (lun > 256) {
3568 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3569 		tmf_req->LUN[1] = lun & 0xff;
3570 	} else {
3571 		tmf_req->LUN[1] = lun;
3572 	}
3573 	tmf_req->TaskMsgContext = abort_ctx;
3574 
3575 	mpt_lprt(mpt, MPT_PRT_INFO,
3576 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3577 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3578 	if (mpt->verbose > MPT_PRT_DEBUG) {
3579 		mpt_print_request(tmf_req);
3580 	}
3581 
3582 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3583 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3584 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3585 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3586 	if (error != MPT_OK) {
3587 		mpt_reset(mpt, TRUE);
3588 	}
3589 	return (error);
3590 }
3591 
3592 /*
3593  * When a command times out, it is placed on the requeust_timeout_list
3594  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3595  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3596  * the timedout transactions.  The next TMF is issued either by the
3597  * completion handler of the current TMF waking our recovery thread,
3598  * or the TMF timeout handler causing a hard reset sequence.
3599  */
3600 static void
3601 mpt_recover_commands(struct mpt_softc *mpt)
3602 {
3603 	request_t	   *req;
3604 	union ccb	   *ccb;
3605 	int		    error;
3606 
3607 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3608 		/*
3609 		 * No work to do- leave.
3610 		 */
3611 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3612 		return;
3613 	}
3614 
3615 	/*
3616 	 * Flush any commands whose completion coincides with their timeout.
3617 	 */
3618 	mpt_intr(mpt);
3619 
3620 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3621 		/*
3622 		 * The timedout commands have already
3623 		 * completed.  This typically means
3624 		 * that either the timeout value was on
3625 		 * the hairy edge of what the device
3626 		 * requires or - more likely - interrupts
3627 		 * are not happening.
3628 		 */
3629 		mpt_prt(mpt, "Timedout requests already complete. "
3630 		    "Interrupts may not be functioning.\n");
3631 		mpt_enable_ints(mpt);
3632 		return;
3633 	}
3634 
3635 	/*
3636 	 * We have no visibility into the current state of the
3637 	 * controller, so attempt to abort the commands in the
3638 	 * order they timed-out. For initiator commands, we
3639 	 * depend on the reply handler pulling requests off
3640 	 * the timeout list.
3641 	 */
3642 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3643 		uint16_t status;
3644 		uint8_t response;
3645 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3646 
3647 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3648 		    req, req->serno, hdrp->Function);
3649 		ccb = req->ccb;
3650 		if (ccb == NULL) {
3651 			mpt_prt(mpt, "null ccb in timed out request. "
3652 			    "Resetting Controller.\n");
3653 			mpt_reset(mpt, TRUE);
3654 			continue;
3655 		}
3656 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3657 
3658 		/*
3659 		 * Check to see if this is not an initiator command and
3660 		 * deal with it differently if it is.
3661 		 */
3662 		switch (hdrp->Function) {
3663 		case MPI_FUNCTION_SCSI_IO_REQUEST:
3664 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
3665 			break;
3666 		default:
3667 			/*
3668 			 * XXX: FIX ME: need to abort target assists...
3669 			 */
3670 			mpt_prt(mpt, "just putting it back on the pend q\n");
3671 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
3672 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
3673 			    links);
3674 			continue;
3675 		}
3676 
3677 		error = mpt_scsi_send_tmf(mpt,
3678 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3679 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
3680 		    htole32(req->index | scsi_io_handler_id), TRUE);
3681 
3682 		if (error != 0) {
3683 			/*
3684 			 * mpt_scsi_send_tmf hard resets on failure, so no
3685 			 * need to do so here.  Our queue should be emptied
3686 			 * by the hard reset.
3687 			 */
3688 			continue;
3689 		}
3690 
3691 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
3692 		    REQ_STATE_DONE, TRUE, 500);
3693 
3694 		status = mpt->tmf_req->IOCStatus;
3695 		response = mpt->tmf_req->ResponseCode;
3696 		mpt->tmf_req->state = REQ_STATE_FREE;
3697 
3698 		if (error != 0) {
3699 			/*
3700 			 * If we've errored out,, reset the controller.
3701 			 */
3702 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
3703 			    "Resetting controller\n");
3704 			mpt_reset(mpt, TRUE);
3705 			continue;
3706 		}
3707 
3708 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
3709 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
3710 			    "Resetting controller.\n", status);
3711 			mpt_reset(mpt, TRUE);
3712 			continue;
3713 		}
3714 
3715 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
3716 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
3717 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
3718 			    "Resetting controller.\n", response);
3719 			mpt_reset(mpt, TRUE);
3720 			continue;
3721 		}
3722 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
3723 	}
3724 }
3725 
3726 /************************ Target Mode Support ****************************/
3727 static void
3728 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
3729 {
3730 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3731 	PTR_SGE_TRANSACTION32 tep;
3732 	PTR_SGE_SIMPLE32 se;
3733 	bus_addr_t paddr;
3734 
3735 	paddr = req->req_pbuf;
3736 	paddr += MPT_RQSL(mpt);
3737 
3738 	fc = req->req_vbuf;
3739 	memset(fc, 0, MPT_REQUEST_AREA);
3740 	fc->BufferCount = 1;
3741 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3742 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
3743 
3744 	/*
3745 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
3746 	 * consist of a TE SGL element (with details length of zero)
3747 	 * followe by a SIMPLE SGL element which holds the address
3748 	 * of the buffer.
3749 	 */
3750 
3751 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3752 
3753 	tep->ContextSize = 4;
3754 	tep->Flags = 0;
3755 	tep->TransactionContext[0] = htole32(ioindex);
3756 
3757 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3758 	se->FlagsLength =
3759 		MPI_SGE_FLAGS_HOST_TO_IOC	|
3760 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
3761 		MPI_SGE_FLAGS_LAST_ELEMENT	|
3762 		MPI_SGE_FLAGS_END_OF_LIST	|
3763 		MPI_SGE_FLAGS_END_OF_BUFFER;
3764 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
3765 	se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3766 	se->Address = (uint32_t) paddr;
3767 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3768 	    "add ELS index %d ioindex %d for %p:%u\n",
3769 	    req->index, ioindex, req, req->serno);
3770 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
3771 	    ("mpt_fc_post_els: request not locked"));
3772 	mpt_send_cmd(mpt, req);
3773 }
3774 
3775 static void
3776 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3777 {
3778 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3779 	PTR_CMD_BUFFER_DESCRIPTOR cb;
3780 	bus_addr_t paddr;
3781 
3782 	paddr = req->req_pbuf;
3783 	paddr += MPT_RQSL(mpt);
3784 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3785 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
3786 
3787 	fc = req->req_vbuf;
3788 	fc->BufferCount = 1;
3789 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3790 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3791 
3792 	cb = &fc->Buffer[0];
3793 	cb->IoIndex = htole16(ioindex);
3794 	cb->u.PhysicalAddress32 = (U32) paddr;
3795 
3796 	mpt_check_doorbell(mpt);
3797 	mpt_send_cmd(mpt, req);
3798 }
3799 
3800 static int
3801 mpt_add_els_buffers(struct mpt_softc *mpt)
3802 {
3803 	int i;
3804 
3805 	if (mpt->is_fc == 0) {
3806 		return (TRUE);
3807 	}
3808 
3809 	if (mpt->els_cmds_allocated) {
3810 		return (TRUE);
3811 	}
3812 
3813 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
3814 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3815 
3816 	if (mpt->els_cmd_ptrs == NULL) {
3817 		return (FALSE);
3818 	}
3819 
3820 	/*
3821 	 * Feed the chip some ELS buffer resources
3822 	 */
3823 	for (i = 0; i < MPT_MAX_ELS; i++) {
3824 		request_t *req = mpt_get_request(mpt, FALSE);
3825 		if (req == NULL) {
3826 			break;
3827 		}
3828 		req->state |= REQ_STATE_LOCKED;
3829 		mpt->els_cmd_ptrs[i] = req;
3830 		mpt_fc_post_els(mpt, req, i);
3831 	}
3832 
3833 	if (i == 0) {
3834 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
3835 		free(mpt->els_cmd_ptrs, M_DEVBUF);
3836 		mpt->els_cmd_ptrs = NULL;
3837 		return (FALSE);
3838 	}
3839 	if (i != MPT_MAX_ELS) {
3840 		mpt_lprt(mpt, MPT_PRT_INFO,
3841 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
3842 	}
3843 	mpt->els_cmds_allocated = i;
3844 	return(TRUE);
3845 }
3846 
3847 static int
3848 mpt_add_target_commands(struct mpt_softc *mpt)
3849 {
3850 	int i, max;
3851 
3852 	if (mpt->tgt_cmd_ptrs) {
3853 		return (TRUE);
3854 	}
3855 
3856 	max = MPT_MAX_REQUESTS(mpt) >> 1;
3857 	if (max > mpt->mpt_max_tgtcmds) {
3858 		max = mpt->mpt_max_tgtcmds;
3859 	}
3860 	mpt->tgt_cmd_ptrs =
3861 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
3862 	if (mpt->tgt_cmd_ptrs == NULL) {
3863 		mpt_prt(mpt,
3864 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
3865 		return (FALSE);
3866 	}
3867 
3868 	for (i = 0; i < max; i++) {
3869 		request_t *req;
3870 
3871 		req = mpt_get_request(mpt, FALSE);
3872 		if (req == NULL) {
3873 			break;
3874 		}
3875 		req->state |= REQ_STATE_LOCKED;
3876 		mpt->tgt_cmd_ptrs[i] = req;
3877 		mpt_post_target_command(mpt, req, i);
3878 	}
3879 
3880 
3881 	if (i == 0) {
3882 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
3883 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
3884 		mpt->tgt_cmd_ptrs = NULL;
3885 		return (FALSE);
3886 	}
3887 
3888 	mpt->tgt_cmds_allocated = i;
3889 
3890 	if (i < max) {
3891 		mpt_lprt(mpt, MPT_PRT_INFO,
3892 		    "added %d of %d target bufs\n", i, max);
3893 	}
3894 	return (i);
3895 }
3896 
3897 static int
3898 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3899 {
3900 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3901 		mpt->twildcard = 1;
3902 	} else if (lun >= MPT_MAX_LUNS) {
3903 		return (EINVAL);
3904 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3905 		return (EINVAL);
3906 	}
3907 	if (mpt->tenabled == 0) {
3908 		if (mpt->is_fc) {
3909 			(void) mpt_fc_reset_link(mpt, 0);
3910 		}
3911 		mpt->tenabled = 1;
3912 	}
3913 	if (lun == CAM_LUN_WILDCARD) {
3914 		mpt->trt_wildcard.enabled = 1;
3915 	} else {
3916 		mpt->trt[lun].enabled = 1;
3917 	}
3918 	return (0);
3919 }
3920 
3921 static int
3922 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3923 {
3924 	int i;
3925 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3926 		mpt->twildcard = 0;
3927 	} else if (lun >= MPT_MAX_LUNS) {
3928 		return (EINVAL);
3929 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3930 		return (EINVAL);
3931 	}
3932 	if (lun == CAM_LUN_WILDCARD) {
3933 		mpt->trt_wildcard.enabled = 0;
3934 	} else {
3935 		mpt->trt[lun].enabled = 0;
3936 	}
3937 	for (i = 0; i < MPT_MAX_LUNS; i++) {
3938 		if (mpt->trt[lun].enabled) {
3939 			break;
3940 		}
3941 	}
3942 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
3943 		if (mpt->is_fc) {
3944 			(void) mpt_fc_reset_link(mpt, 0);
3945 		}
3946 		mpt->tenabled = 0;
3947 	}
3948 	return (0);
3949 }
3950 
3951 /*
3952  * Called with MPT lock held
3953  */
3954 static void
3955 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
3956 {
3957 	struct ccb_scsiio *csio = &ccb->csio;
3958 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
3959 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
3960 
3961 	switch (tgt->state) {
3962 	case TGT_STATE_IN_CAM:
3963 		break;
3964 	case TGT_STATE_MOVING_DATA:
3965 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3966 		xpt_freeze_simq(mpt->sim, 1);
3967 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3968 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3969 		MPTLOCK_2_CAMLOCK(mpt);
3970 		xpt_done(ccb);
3971 		CAMLOCK_2_MPTLOCK(mpt);
3972 		return;
3973 	default:
3974 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
3975 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
3976 		mpt_tgt_dump_req_state(mpt, cmd_req);
3977 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3978 		MPTLOCK_2_CAMLOCK(mpt);
3979 		xpt_done(ccb);
3980 		CAMLOCK_2_MPTLOCK(mpt);
3981 		return;
3982 	}
3983 
3984 	if (csio->dxfer_len) {
3985 		bus_dmamap_callback_t *cb;
3986 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
3987 		request_t *req;
3988 
3989 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
3990 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
3991 
3992 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
3993 			if (mpt->outofbeer == 0) {
3994 				mpt->outofbeer = 1;
3995 				xpt_freeze_simq(mpt->sim, 1);
3996 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
3997 			}
3998 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3999 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4000 			MPTLOCK_2_CAMLOCK(mpt);
4001 			xpt_done(ccb);
4002 			CAMLOCK_2_MPTLOCK(mpt);
4003 			return;
4004 		}
4005 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4006 		if (sizeof (bus_addr_t) > 4) {
4007 			cb = mpt_execute_req_a64;
4008 		} else {
4009 			cb = mpt_execute_req;
4010 		}
4011 
4012 		req->ccb = ccb;
4013 		ccb->ccb_h.ccb_req_ptr = req;
4014 
4015 		/*
4016 		 * Record the currently active ccb and the
4017 		 * request for it in our target state area.
4018 		 */
4019 		tgt->ccb = ccb;
4020 		tgt->req = req;
4021 
4022 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4023 		ta = req->req_vbuf;
4024 
4025 		if (mpt->is_sas) {
4026 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4027 			     cmd_req->req_vbuf;
4028 			ta->QueueTag = ssp->InitiatorTag;
4029 		} else if (mpt->is_spi) {
4030 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4031 			     cmd_req->req_vbuf;
4032 			ta->QueueTag = sp->Tag;
4033 		}
4034 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4035 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4036 		ta->ReplyWord = htole32(tgt->reply_desc);
4037 		if (csio->ccb_h.target_lun > 256) {
4038 			ta->LUN[0] =
4039 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4040 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4041 		} else {
4042 			ta->LUN[1] = csio->ccb_h.target_lun;
4043 		}
4044 
4045 		ta->RelativeOffset = tgt->bytes_xfered;
4046 		ta->DataLength = ccb->csio.dxfer_len;
4047 		if (ta->DataLength > tgt->resid) {
4048 			ta->DataLength = tgt->resid;
4049 		}
4050 
4051 		/*
4052 		 * XXX Should be done after data transfer completes?
4053 		 */
4054 		tgt->resid -= csio->dxfer_len;
4055 		tgt->bytes_xfered += csio->dxfer_len;
4056 
4057 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4058 			ta->TargetAssistFlags |=
4059 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4060 		}
4061 
4062 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4063 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4064 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4065 			ta->TargetAssistFlags |=
4066 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4067 		}
4068 #endif
4069 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4070 
4071 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4072 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4073 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4074 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4075 
4076 		MPTLOCK_2_CAMLOCK(mpt);
4077 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4078 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4079 				int error;
4080 				int s = splsoftvm();
4081 				error = bus_dmamap_load(mpt->buffer_dmat,
4082 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4083 				    cb, req, 0);
4084 				splx(s);
4085 				if (error == EINPROGRESS) {
4086 					xpt_freeze_simq(mpt->sim, 1);
4087 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4088 				}
4089 			} else {
4090 				/*
4091 				 * We have been given a pointer to single
4092 				 * physical buffer.
4093 				 */
4094 				struct bus_dma_segment seg;
4095 				seg.ds_addr = (bus_addr_t)
4096 				    (vm_offset_t)csio->data_ptr;
4097 				seg.ds_len = csio->dxfer_len;
4098 				(*cb)(req, &seg, 1, 0);
4099 			}
4100 		} else {
4101 			/*
4102 			 * We have been given a list of addresses.
4103 			 * This case could be easily supported but they are not
4104 			 * currently generated by the CAM subsystem so there
4105 			 * is no point in wasting the time right now.
4106 			 */
4107 			struct bus_dma_segment *sgs;
4108 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4109 				(*cb)(req, NULL, 0, EFAULT);
4110 			} else {
4111 				/* Just use the segments provided */
4112 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4113 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4114 			}
4115 		}
4116 		CAMLOCK_2_MPTLOCK(mpt);
4117 	} else {
4118 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4119 
4120 		/*
4121 		 * XXX: I don't know why this seems to happen, but
4122 		 * XXX: completing the CCB seems to make things happy.
4123 		 * XXX: This seems to happen if the initiator requests
4124 		 * XXX: enough data that we have to do multiple CTIOs.
4125 		 */
4126 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4127 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4128 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4129 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4130 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4131 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4132 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4133 			MPTLOCK_2_CAMLOCK(mpt);
4134 			xpt_done(ccb);
4135 			CAMLOCK_2_MPTLOCK(mpt);
4136 			return;
4137 		}
4138 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4139 			sp = sense;
4140 			memcpy(sp, &csio->sense_data,
4141 			   min(csio->sense_len, MPT_SENSE_SIZE));
4142 		}
4143 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4144 	}
4145 }
4146 
4147 static void
4148 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4149     uint32_t lun, int send, uint8_t *data, size_t length)
4150 {
4151 	mpt_tgt_state_t *tgt;
4152 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4153 	SGE_SIMPLE32 *se;
4154 	uint32_t flags;
4155 	uint8_t *dptr;
4156 	bus_addr_t pptr;
4157 	request_t *req;
4158 
4159 	if (length == 0) {
4160 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4161 		return;
4162 	}
4163 
4164 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4165 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4166 		mpt_prt(mpt, "out of resources- dropping local response\n");
4167 		return;
4168 	}
4169 	tgt->is_local = 1;
4170 
4171 
4172 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4173 	ta = req->req_vbuf;
4174 
4175 	if (mpt->is_sas) {
4176 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4177 		ta->QueueTag = ssp->InitiatorTag;
4178 	} else if (mpt->is_spi) {
4179 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4180 		ta->QueueTag = sp->Tag;
4181 	}
4182 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4183 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4184 	ta->ReplyWord = htole32(tgt->reply_desc);
4185 	if (lun > 256) {
4186 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4187 		ta->LUN[1] = lun & 0xff;
4188 	} else {
4189 		ta->LUN[1] = lun;
4190 	}
4191 	ta->RelativeOffset = 0;
4192 	ta->DataLength = length;
4193 
4194 	dptr = req->req_vbuf;
4195 	dptr += MPT_RQSL(mpt);
4196 	pptr = req->req_pbuf;
4197 	pptr += MPT_RQSL(mpt);
4198 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4199 
4200 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4201 	memset(se, 0,sizeof (*se));
4202 
4203 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4204 	if (send) {
4205 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4206 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4207 	}
4208 	se->Address = pptr;
4209 	MPI_pSGE_SET_LENGTH(se, length);
4210 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4211 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4212 	MPI_pSGE_SET_FLAGS(se, flags);
4213 
4214 	tgt->ccb = NULL;
4215 	tgt->req = req;
4216 	tgt->resid = 0;
4217 	tgt->bytes_xfered = length;
4218 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4219 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4220 #else
4221 	tgt->state = TGT_STATE_MOVING_DATA;
4222 #endif
4223 	mpt_send_cmd(mpt, req);
4224 }
4225 
4226 /*
4227  * Abort queued up CCBs
4228  */
4229 static cam_status
4230 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4231 {
4232 	struct mpt_hdr_stailq *lp;
4233 	struct ccb_hdr *srch;
4234 	int found = 0;
4235 	union ccb *accb = ccb->cab.abort_ccb;
4236 	tgt_resource_t *trtp;
4237 
4238 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4239 
4240 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4241 		trtp = &mpt->trt_wildcard;
4242 	} else {
4243 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4244 	}
4245 
4246 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4247 		lp = &trtp->atios;
4248 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4249 		lp = &trtp->inots;
4250 	} else {
4251 		return (CAM_REQ_INVALID);
4252 	}
4253 
4254 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4255 		if (srch == &accb->ccb_h) {
4256 			found = 1;
4257 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4258 			break;
4259 		}
4260 	}
4261 	if (found) {
4262 		accb->ccb_h.status = CAM_REQ_ABORTED;
4263 		xpt_done(accb);
4264 		return (CAM_REQ_CMP);
4265 	}
4266 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4267 	return (CAM_PATH_INVALID);
4268 }
4269 
4270 /*
4271  * Ask the MPT to abort the current target command
4272  */
4273 static int
4274 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4275 {
4276 	int error;
4277 	request_t *req;
4278 	PTR_MSG_TARGET_MODE_ABORT abtp;
4279 
4280 	req = mpt_get_request(mpt, FALSE);
4281 	if (req == NULL) {
4282 		return (-1);
4283 	}
4284 	abtp = req->req_vbuf;
4285 	memset(abtp, 0, sizeof (*abtp));
4286 
4287 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4288 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4289 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4290 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4291 	error = 0;
4292 	if (mpt->is_fc || mpt->is_sas) {
4293 		mpt_send_cmd(mpt, req);
4294 	} else {
4295 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4296 	}
4297 	return (error);
4298 }
4299 
4300 /*
4301  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4302  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4303  * FC929 to set bogus FC_RSP fields (nonzero residuals
4304  * but w/o RESID fields set). This causes QLogic initiators
4305  * to think maybe that a frame was lost.
4306  *
4307  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4308  * we use allocated requests to do TARGET_ASSIST and we
4309  * need to know when to release them.
4310  */
4311 
4312 static void
4313 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4314     uint8_t status, uint8_t const *sense_data)
4315 {
4316 	uint8_t *cmd_vbuf;
4317 	mpt_tgt_state_t *tgt;
4318 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4319 	request_t *req;
4320 	bus_addr_t paddr;
4321 	int resplen = 0;
4322 
4323 	cmd_vbuf = cmd_req->req_vbuf;
4324 	cmd_vbuf += MPT_RQSL(mpt);
4325 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4326 
4327 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4328 		if (mpt->outofbeer == 0) {
4329 			mpt->outofbeer = 1;
4330 			xpt_freeze_simq(mpt->sim, 1);
4331 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4332 		}
4333 		if (ccb) {
4334 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4335 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4336 			MPTLOCK_2_CAMLOCK(mpt);
4337 			xpt_done(ccb);
4338 			CAMLOCK_2_MPTLOCK(mpt);
4339 		} else {
4340 			mpt_prt(mpt,
4341 			    "could not allocate status request- dropping\n");
4342 		}
4343 		return;
4344 	}
4345 	req->ccb = ccb;
4346 	if (ccb) {
4347 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4348 		ccb->ccb_h.ccb_req_ptr = req;
4349 	}
4350 
4351 	/*
4352 	 * Record the currently active ccb, if any, and the
4353 	 * request for it in our target state area.
4354 	 */
4355 	tgt->ccb = ccb;
4356 	tgt->req = req;
4357 	tgt->state = TGT_STATE_SENDING_STATUS;
4358 
4359 	tp = req->req_vbuf;
4360 	paddr = req->req_pbuf;
4361 	paddr += MPT_RQSL(mpt);
4362 
4363 	memset(tp, 0, sizeof (*tp));
4364 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4365 	if (mpt->is_fc) {
4366 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4367 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4368 		uint8_t *sts_vbuf;
4369 		uint32_t *rsp;
4370 
4371 		sts_vbuf = req->req_vbuf;
4372 		sts_vbuf += MPT_RQSL(mpt);
4373 		rsp = (uint32_t *) sts_vbuf;
4374 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4375 
4376 		/*
4377 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4378 		 * It has to be big-endian in memory and is organized
4379 		 * in 32 bit words, which are much easier to deal with
4380 		 * as words which are swizzled as needed.
4381 		 *
4382 		 * All we're filling here is the FC_RSP payload.
4383 		 * We may just have the chip synthesize it if
4384 		 * we have no residual and an OK status.
4385 		 *
4386 		 */
4387 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4388 
4389 		rsp[2] = status;
4390 		if (tgt->resid) {
4391 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4392 			rsp[3] = htobe32(tgt->resid);
4393 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4394 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4395 #endif
4396 		}
4397 		if (status == SCSI_STATUS_CHECK_COND) {
4398 			int i;
4399 
4400 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4401 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4402 			if (sense_data) {
4403 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4404 			} else {
4405 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4406 				    "TION but no sense data?\n");
4407 				memset(&rsp, 0, MPT_SENSE_SIZE);
4408 			}
4409 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4410 				rsp[i] = htobe32(rsp[i]);
4411 			}
4412 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4413 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4414 #endif
4415 		}
4416 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4417 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4418 #endif
4419 		rsp[2] = htobe32(rsp[2]);
4420 	} else if (mpt->is_sas) {
4421 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4422 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4423 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4424 	} else {
4425 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4426 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4427 		tp->StatusCode = status;
4428 		tp->QueueTag = htole16(sp->Tag);
4429 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4430 	}
4431 
4432 	tp->ReplyWord = htole32(tgt->reply_desc);
4433 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4434 
4435 #ifdef	WE_CAN_USE_AUTO_REPOST
4436 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4437 #endif
4438 	if (status == SCSI_STATUS_OK && resplen == 0) {
4439 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4440 	} else {
4441 		tp->StatusDataSGE.u.Address32 = (uint32_t) paddr;
4442 		tp->StatusDataSGE.FlagsLength =
4443 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4444 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4445 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4446 			MPI_SGE_FLAGS_END_OF_LIST	|
4447 			MPI_SGE_FLAGS_END_OF_BUFFER;
4448 		tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
4449 		tp->StatusDataSGE.FlagsLength |= resplen;
4450 	}
4451 
4452 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4453 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4454 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4455 	    req->serno, tgt->resid);
4456 	if (ccb) {
4457 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4458 		ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
4459 	}
4460 	mpt_send_cmd(mpt, req);
4461 }
4462 
4463 static void
4464 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4465     tgt_resource_t *trtp, int init_id)
4466 {
4467 	struct ccb_immed_notify *inot;
4468 	mpt_tgt_state_t *tgt;
4469 
4470 	tgt = MPT_TGT_STATE(mpt, req);
4471 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4472 	if (inot == NULL) {
4473 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4474 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4475 		return;
4476 	}
4477 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4478 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4479 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4480 
4481 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4482 	inot->sense_len = 0;
4483 	memset(inot->message_args, 0, sizeof (inot->message_args));
4484 	inot->initiator_id = init_id;	/* XXX */
4485 
4486 	/*
4487 	 * This is a somewhat grotesque attempt to map from task management
4488 	 * to old style SCSI messages. God help us all.
4489 	 */
4490 	switch (fc) {
4491 	case MPT_ABORT_TASK_SET:
4492 		inot->message_args[0] = MSG_ABORT_TAG;
4493 		break;
4494 	case MPT_CLEAR_TASK_SET:
4495 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4496 		break;
4497 	case MPT_TARGET_RESET:
4498 		inot->message_args[0] = MSG_TARGET_RESET;
4499 		break;
4500 	case MPT_CLEAR_ACA:
4501 		inot->message_args[0] = MSG_CLEAR_ACA;
4502 		break;
4503 	case MPT_TERMINATE_TASK:
4504 		inot->message_args[0] = MSG_ABORT_TAG;
4505 		break;
4506 	default:
4507 		inot->message_args[0] = MSG_NOOP;
4508 		break;
4509 	}
4510 	tgt->ccb = (union ccb *) inot;
4511 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4512 	MPTLOCK_2_CAMLOCK(mpt);
4513 	xpt_done((union ccb *)inot);
4514 	CAMLOCK_2_MPTLOCK(mpt);
4515 }
4516 
4517 static void
4518 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4519 {
4520 	struct ccb_accept_tio *atiop;
4521 	lun_id_t lun;
4522 	int tag_action = 0;
4523 	mpt_tgt_state_t *tgt;
4524 	tgt_resource_t *trtp = NULL;
4525 	U8 *lunptr;
4526 	U8 *vbuf;
4527 	U16 itag;
4528 	U16 ioindex;
4529 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4530 	uint8_t *cdbp;
4531 
4532 	/*
4533 	 * First, DMA sync the received command-
4534 	 * which is in the *request* * phys area.
4535 	 *
4536 	 * XXX: We could optimize this for a range
4537 	 */
4538 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
4539 	    BUS_DMASYNC_POSTREAD);
4540 
4541 	/*
4542 	 * Stash info for the current command where we can get at it later.
4543 	 */
4544 	vbuf = req->req_vbuf;
4545 	vbuf += MPT_RQSL(mpt);
4546 
4547 	/*
4548 	 * Get our state pointer set up.
4549 	 */
4550 	tgt = MPT_TGT_STATE(mpt, req);
4551 	if (tgt->state != TGT_STATE_LOADED) {
4552 		mpt_tgt_dump_req_state(mpt, req);
4553 		panic("bad target state in mpt_scsi_tgt_atio");
4554 	}
4555 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4556 	tgt->state = TGT_STATE_IN_CAM;
4557 	tgt->reply_desc = reply_desc;
4558 	ioindex = GET_IO_INDEX(reply_desc);
4559 
4560 	if (mpt->is_fc) {
4561 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4562 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4563 		if (fc->FcpCntl[2]) {
4564 			/*
4565 			 * Task Management Request
4566 			 */
4567 			switch (fc->FcpCntl[2]) {
4568 			case 0x2:
4569 				fct = MPT_ABORT_TASK_SET;
4570 				break;
4571 			case 0x4:
4572 				fct = MPT_CLEAR_TASK_SET;
4573 				break;
4574 			case 0x20:
4575 				fct = MPT_TARGET_RESET;
4576 				break;
4577 			case 0x40:
4578 				fct = MPT_CLEAR_ACA;
4579 				break;
4580 			case 0x80:
4581 				fct = MPT_TERMINATE_TASK;
4582 				break;
4583 			default:
4584 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4585 				    fc->FcpCntl[2]);
4586 				mpt_scsi_tgt_status(mpt, 0, req,
4587 				    SCSI_STATUS_OK, 0);
4588 				return;
4589 			}
4590 		} else {
4591 			switch (fc->FcpCntl[1]) {
4592 			case 0:
4593 				tag_action = MSG_SIMPLE_Q_TAG;
4594 				break;
4595 			case 1:
4596 				tag_action = MSG_HEAD_OF_Q_TAG;
4597 				break;
4598 			case 2:
4599 				tag_action = MSG_ORDERED_Q_TAG;
4600 				break;
4601 			default:
4602 				/*
4603 				 * Bah. Ignore Untagged Queing and ACA
4604 				 */
4605 				tag_action = MSG_SIMPLE_Q_TAG;
4606 				break;
4607 			}
4608 		}
4609 		tgt->resid = be32toh(fc->FcpDl);
4610 		cdbp = fc->FcpCdb;
4611 		lunptr = fc->FcpLun;
4612 		itag = be16toh(fc->OptionalOxid);
4613 	} else if (mpt->is_sas) {
4614 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4615 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4616 		cdbp = ssp->CDB;
4617 		lunptr = ssp->LogicalUnitNumber;
4618 		itag = ssp->InitiatorTag;
4619 	} else {
4620 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4621 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4622 		cdbp = sp->CDB;
4623 		lunptr = sp->LogicalUnitNumber;
4624 		itag = sp->Tag;
4625 	}
4626 
4627 	/*
4628 	 * Generate a simple lun
4629 	 */
4630 	switch (lunptr[0] & 0xc0) {
4631 	case 0x40:
4632 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4633 		break;
4634 	case 0:
4635 		lun = lunptr[1];
4636 		break;
4637 	default:
4638 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4639 		lun = 0xffff;
4640 		break;
4641 	}
4642 
4643 	/*
4644 	 * Deal with non-enabled or bad luns here.
4645 	 */
4646 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4647 	    mpt->trt[lun].enabled == 0) {
4648 		if (mpt->twildcard) {
4649 			trtp = &mpt->trt_wildcard;
4650 		} else if (fct == MPT_NIL_TMT_VALUE) {
4651 			/*
4652 			 * In this case, we haven't got an upstream listener
4653 			 * for either a specific lun or wildcard luns. We
4654 			 * have to make some sensible response. For regular
4655 			 * inquiry, just return some NOT HERE inquiry data.
4656 			 * For VPD inquiry, report illegal field in cdb.
4657 			 * For REQUEST SENSE, just return NO SENSE data.
4658 			 * REPORT LUNS gets illegal command.
4659 			 * All other commands get 'no such device'.
4660 			 */
4661 
4662 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
4663 
4664 			mpt_prt(mpt, "CMD 0x%x to unmanaged lun %u\n",
4665 			    cdbp[0], lun);
4666 
4667 			memset(buf, 0, MPT_SENSE_SIZE);
4668 			cond = SCSI_STATUS_CHECK_COND;
4669 			buf[0] = 0xf0;
4670 			buf[2] = 0x5;
4671 			buf[7] = 0x8;
4672 			sp = buf;
4673 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4674 
4675 			switch (cdbp[0]) {
4676 			case INQUIRY:
4677 			{
4678 				static uint8_t iqd[8] = {
4679 				    0x7f, 0x0, 0x4, 0x12, 0x0
4680 				};
4681 				if (cdbp[1] != 0) {
4682 					buf[12] = 0x26;
4683 					buf[13] = 0x01;
4684 					break;
4685 				}
4686 				mpt_prt(mpt, "local inquiry\n");
4687 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4688 				    iqd, sizeof (iqd));
4689 				return;
4690 			}
4691 			case REQUEST_SENSE:
4692 			{
4693 				buf[2] = 0x0;
4694 				mpt_prt(mpt, "local request sense\n");
4695 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4696 				    buf, sizeof (buf));
4697 				return;
4698 			}
4699 			case REPORT_LUNS:
4700 				buf[12] = 0x26;
4701 				break;
4702 			default:
4703 				buf[12] = 0x25;
4704 				break;
4705 			}
4706 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
4707 			return;
4708 		}
4709 		/* otherwise, leave trtp NULL */
4710 	} else {
4711 		trtp = &mpt->trt[lun];
4712 	}
4713 
4714 	/*
4715 	 * Deal with any task management
4716 	 */
4717 	if (fct != MPT_NIL_TMT_VALUE) {
4718 		if (trtp == NULL) {
4719 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
4720 			    fct);
4721 			mpt_scsi_tgt_status(mpt, 0, req,
4722 			    SCSI_STATUS_OK, 0);
4723 		} else {
4724 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
4725 			    GET_INITIATOR_INDEX(reply_desc));
4726 		}
4727 		return;
4728 	}
4729 
4730 
4731 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
4732 	if (atiop == NULL) {
4733 		mpt_lprt(mpt, MPT_PRT_WARN,
4734 		    "no ATIOs for lun %u- sending back %s\n", lun,
4735 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
4736 		mpt_scsi_tgt_status(mpt, NULL, req,
4737 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
4738 		    NULL);
4739 		return;
4740 	}
4741 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
4742 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4743 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
4744 	atiop->ccb_h.ccb_mpt_ptr = mpt;
4745 	atiop->ccb_h.status = CAM_CDB_RECVD;
4746 	atiop->ccb_h.target_lun = lun;
4747 	atiop->sense_len = 0;
4748 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
4749 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
4750 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
4751 
4752 	/*
4753 	 * The tag we construct here allows us to find the
4754 	 * original request that the command came in with.
4755 	 *
4756 	 * This way we don't have to depend on anything but the
4757 	 * tag to find things when CCBs show back up from CAM.
4758 	 */
4759 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4760 	tgt->tag_id = atiop->tag_id;
4761 	if (tag_action) {
4762 		atiop->tag_action = tag_action;
4763 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
4764 	}
4765 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4766 		int i;
4767 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
4768 		    atiop->ccb_h.target_lun);
4769 		for (i = 0; i < atiop->cdb_len; i++) {
4770 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
4771 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
4772 		}
4773 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
4774 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
4775 	}
4776 
4777 	MPTLOCK_2_CAMLOCK(mpt);
4778 	xpt_done((union ccb *)atiop);
4779 	CAMLOCK_2_MPTLOCK(mpt);
4780 }
4781 
4782 static void
4783 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
4784 {
4785 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4786 
4787 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
4788 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
4789 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
4790 	    tgt->tag_id, tgt->state);
4791 }
4792 
4793 static void
4794 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
4795 {
4796 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
4797 	    req->index, req->index, req->state);
4798 	mpt_tgt_dump_tgt_state(mpt, req);
4799 }
4800 
4801 static int
4802 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
4803     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4804 {
4805 	int dbg;
4806 	union ccb *ccb;
4807 	U16 status;
4808 
4809 	if (reply_frame == NULL) {
4810 		/*
4811 		 * Figure out what the state of the command is.
4812 		 */
4813 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4814 
4815 #ifdef	INVARIANTS
4816 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
4817 		if (tgt->req) {
4818 			mpt_req_not_spcl(mpt, tgt->req,
4819 			    "turbo scsi_tgt_reply associated req", __LINE__);
4820 		}
4821 #endif
4822 		switch(tgt->state) {
4823 		case TGT_STATE_LOADED:
4824 			/*
4825 			 * This is a new command starting.
4826 			 */
4827 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
4828 			break;
4829 		case TGT_STATE_MOVING_DATA:
4830 		{
4831 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4832 
4833 			ccb = tgt->ccb;
4834 			if (tgt->req == NULL) {
4835 				panic("mpt: turbo target reply with null "
4836 				    "associated request moving data");
4837 				/* NOTREACHED */
4838 			}
4839 			if (ccb == NULL) {
4840 				if (tgt->is_local == 0) {
4841 					panic("mpt: turbo target reply with "
4842 					    "null associated ccb moving data");
4843 					/* NOTREACHED */
4844 				}
4845 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4846 				    "TARGET_ASSIST local done\n");
4847 				TAILQ_REMOVE(&mpt->request_pending_list,
4848 				    tgt->req, links);
4849 				mpt_free_request(mpt, tgt->req);
4850 				tgt->req = NULL;
4851 				mpt_scsi_tgt_status(mpt, NULL, req,
4852 				    0, NULL);
4853 				return (TRUE);
4854 			}
4855 			tgt->ccb = NULL;
4856 			tgt->nxfers++;
4857 			untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
4858 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4859 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
4860 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
4861 			/*
4862 			 * Free the Target Assist Request
4863 			 */
4864 			KASSERT(tgt->req->ccb == ccb,
4865 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
4866 			    tgt->req->serno, tgt->req->ccb));
4867 			TAILQ_REMOVE(&mpt->request_pending_list,
4868 			    tgt->req, links);
4869 			mpt_free_request(mpt, tgt->req);
4870 			tgt->req = NULL;
4871 
4872 			/*
4873 			 * Do we need to send status now? That is, are
4874 			 * we done with all our data transfers?
4875 			 */
4876 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4877 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4878 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4879 				KASSERT(ccb->ccb_h.status,
4880 				    ("zero ccb sts at %d\n", __LINE__));
4881 				tgt->state = TGT_STATE_IN_CAM;
4882 				if (mpt->outofbeer) {
4883 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4884 					mpt->outofbeer = 0;
4885 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4886 				}
4887 				MPTLOCK_2_CAMLOCK(mpt);
4888 				xpt_done(ccb);
4889 				CAMLOCK_2_MPTLOCK(mpt);
4890 				break;
4891 			}
4892 			/*
4893 			 * Otherwise, send status (and sense)
4894 			 */
4895 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4896 				sp = sense;
4897 				memcpy(sp, &ccb->csio.sense_data,
4898 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
4899 			}
4900 			mpt_scsi_tgt_status(mpt, ccb, req,
4901 			    ccb->csio.scsi_status, sp);
4902 			break;
4903 		}
4904 		case TGT_STATE_SENDING_STATUS:
4905 		case TGT_STATE_MOVING_DATA_AND_STATUS:
4906 		{
4907 			int ioindex;
4908 			ccb = tgt->ccb;
4909 
4910 			if (tgt->req == NULL) {
4911 				panic("mpt: turbo target reply with null "
4912 				    "associated request sending status");
4913 				/* NOTREACHED */
4914 			}
4915 
4916 			if (ccb) {
4917 				tgt->ccb = NULL;
4918 				if (tgt->state ==
4919 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
4920 					tgt->nxfers++;
4921 				}
4922 				untimeout(mpt_timeout, ccb,
4923 				    ccb->ccb_h.timeout_ch);
4924 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4925 					ccb->ccb_h.status |= CAM_SENT_SENSE;
4926 				}
4927 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4928 				    "TARGET_STATUS tag %x sts %x flgs %x req "
4929 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
4930 				    ccb->ccb_h.flags, tgt->req);
4931 				/*
4932 				 * Free the Target Send Status Request
4933 				 */
4934 				KASSERT(tgt->req->ccb == ccb,
4935 				    ("tgt->req %p:%u tgt->req->ccb %p",
4936 				    tgt->req, tgt->req->serno, tgt->req->ccb));
4937 				/*
4938 				 * Notify CAM that we're done
4939 				 */
4940 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4941 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4942 				KASSERT(ccb->ccb_h.status,
4943 				    ("ZERO ccb sts at %d\n", __LINE__));
4944 				tgt->ccb = NULL;
4945 			} else {
4946 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4947 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
4948 				    tgt->req, tgt->req->serno);
4949 			}
4950 			TAILQ_REMOVE(&mpt->request_pending_list,
4951 			    tgt->req, links);
4952 			mpt_free_request(mpt, tgt->req);
4953 			tgt->req = NULL;
4954 
4955 			/*
4956 			 * And re-post the Command Buffer.
4957 			 * This will reset the state.
4958 			 */
4959 			ioindex = GET_IO_INDEX(reply_desc);
4960 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4961 			tgt->is_local = 0;
4962 			mpt_post_target_command(mpt, req, ioindex);
4963 
4964 			/*
4965 			 * And post a done for anyone who cares
4966 			 */
4967 			if (ccb) {
4968 				if (mpt->outofbeer) {
4969 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4970 					mpt->outofbeer = 0;
4971 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4972 				}
4973 				MPTLOCK_2_CAMLOCK(mpt);
4974 				xpt_done(ccb);
4975 				CAMLOCK_2_MPTLOCK(mpt);
4976 			}
4977 			break;
4978 		}
4979 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
4980 			tgt->state = TGT_STATE_LOADED;
4981 			break;
4982 		default:
4983 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
4984 			    "Reply Function\n", tgt->state);
4985 		}
4986 		return (TRUE);
4987 	}
4988 
4989 	status = le16toh(reply_frame->IOCStatus);
4990 	if (status != MPI_IOCSTATUS_SUCCESS) {
4991 		dbg = MPT_PRT_ERROR;
4992 	} else {
4993 		dbg = MPT_PRT_DEBUG1;
4994 	}
4995 
4996 	mpt_lprt(mpt, dbg,
4997 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
4998 	     req, req->serno, reply_frame, reply_frame->Function, status);
4999 
5000 	switch (reply_frame->Function) {
5001 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5002 	{
5003 		mpt_tgt_state_t *tgt;
5004 #ifdef	INVARIANTS
5005 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5006 #endif
5007 		if (status != MPI_IOCSTATUS_SUCCESS) {
5008 			/*
5009 			 * XXX What to do?
5010 			 */
5011 			break;
5012 		}
5013 		tgt = MPT_TGT_STATE(mpt, req);
5014 		KASSERT(tgt->state == TGT_STATE_LOADING,
5015 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
5016 		mpt_assign_serno(mpt, req);
5017 		tgt->state = TGT_STATE_LOADED;
5018 		break;
5019 	}
5020 	case MPI_FUNCTION_TARGET_ASSIST:
5021 #ifdef	INVARIANTS
5022 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5023 #endif
5024 		mpt_prt(mpt, "target assist completion\n");
5025 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5026 		mpt_free_request(mpt, req);
5027 		break;
5028 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5029 #ifdef	INVARIANTS
5030 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5031 #endif
5032 		mpt_prt(mpt, "status send completion\n");
5033 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5034 		mpt_free_request(mpt, req);
5035 		break;
5036 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5037 	{
5038 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5039 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5040 		PTR_MSG_TARGET_MODE_ABORT abtp =
5041 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5042 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5043 #ifdef	INVARIANTS
5044 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5045 #endif
5046 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5047 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5048 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5049 		mpt_free_request(mpt, req);
5050 		break;
5051 	}
5052 	default:
5053 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5054 		    "0x%x\n", reply_frame->Function);
5055 		break;
5056 	}
5057 	return (TRUE);
5058 }
5059