xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 84ee9401a3fc8d3c22424266f421a928989cd692)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #if __FreeBSD_version >= 500000
108 #include <sys/sysctl.h>
109 #endif
110 #include <sys/callout.h>
111 #include <sys/kthread.h>
112 
113 static void mpt_poll(struct cam_sim *);
114 static timeout_t mpt_timeout;
115 static void mpt_action(struct cam_sim *, union ccb *);
116 static int
117 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
118 static void mpt_setwidth(struct mpt_softc *, int, int);
119 static void mpt_setsync(struct mpt_softc *, int, int, int);
120 static int mpt_update_spi_config(struct mpt_softc *, int);
121 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
122 
123 static mpt_reply_handler_t mpt_scsi_reply_handler;
124 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
125 static mpt_reply_handler_t mpt_fc_els_reply_handler;
126 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
127 					MSG_DEFAULT_REPLY *);
128 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
129 static int mpt_fc_reset_link(struct mpt_softc *, int);
130 
131 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
132 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
133 static void mpt_recovery_thread(void *arg);
134 static void mpt_recover_commands(struct mpt_softc *mpt);
135 
136 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
137     u_int, u_int, u_int, int);
138 
139 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
140 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
141 static int mpt_add_els_buffers(struct mpt_softc *mpt);
142 static int mpt_add_target_commands(struct mpt_softc *mpt);
143 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
145 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
146 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
147 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
148 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
149     uint8_t, uint8_t const *);
150 static void
151 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
152     tgt_resource_t *, int);
153 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
154 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
155 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
156 
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 
161 static mpt_probe_handler_t	mpt_cam_probe;
162 static mpt_attach_handler_t	mpt_cam_attach;
163 static mpt_enable_handler_t	mpt_cam_enable;
164 static mpt_ready_handler_t	mpt_cam_ready;
165 static mpt_event_handler_t	mpt_cam_event;
166 static mpt_reset_handler_t	mpt_cam_ioc_reset;
167 static mpt_detach_handler_t	mpt_cam_detach;
168 
169 static struct mpt_personality mpt_cam_personality =
170 {
171 	.name		= "mpt_cam",
172 	.probe		= mpt_cam_probe,
173 	.attach		= mpt_cam_attach,
174 	.enable		= mpt_cam_enable,
175 	.ready		= mpt_cam_ready,
176 	.event		= mpt_cam_event,
177 	.reset		= mpt_cam_ioc_reset,
178 	.detach		= mpt_cam_detach,
179 };
180 
181 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
182 
183 int
184 mpt_cam_probe(struct mpt_softc *mpt)
185 {
186 	int role;
187 
188 	/*
189 	 * Only attach to nodes that support the initiator or target role
190 	 * (or want to) or have RAID physical devices that need CAM pass-thru
191 	 * support.
192 	 */
193 	if (mpt->do_cfg_role) {
194 		role = mpt->cfg_role;
195 	} else {
196 		role = mpt->role;
197 	}
198 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
199 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
200 		return (0);
201 	}
202 	return (ENODEV);
203 }
204 
205 int
206 mpt_cam_attach(struct mpt_softc *mpt)
207 {
208 	struct cam_devq *devq;
209 	mpt_handler_t	 handler;
210 	int		 maxq;
211 	int		 error;
212 
213 	TAILQ_INIT(&mpt->request_timeout_list);
214 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
215 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
216 
217 	handler.reply_handler = mpt_scsi_reply_handler;
218 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
219 				     &scsi_io_handler_id);
220 	if (error != 0) {
221 		goto cleanup0;
222 	}
223 
224 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
225 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
226 				     &scsi_tmf_handler_id);
227 	if (error != 0) {
228 		goto cleanup0;
229 	}
230 
231 	/*
232 	 * If we're fibre channel and could support target mode, we register
233 	 * an ELS reply handler and give it resources.
234 	 */
235 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
236 		handler.reply_handler = mpt_fc_els_reply_handler;
237 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
238 		    &fc_els_handler_id);
239 		if (error != 0) {
240 			goto cleanup0;
241 		}
242 		if (mpt_add_els_buffers(mpt) == FALSE) {
243 			error = ENOMEM;
244 			goto cleanup0;
245 		}
246 		maxq -= mpt->els_cmds_allocated;
247 	}
248 
249 	/*
250 	 * If we support target mode, we register a reply handler for it,
251 	 * but don't add command resources until we actually enable target
252 	 * mode.
253 	 */
254 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
255 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
256 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
257 		    &mpt->scsi_tgt_handler_id);
258 		if (error != 0) {
259 			goto cleanup0;
260 		}
261 	}
262 
263 	/*
264 	 * We keep one request reserved for timeout TMF requests.
265 	 */
266 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
267 	if (mpt->tmf_req == NULL) {
268 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
269 		error = ENOMEM;
270 		goto cleanup0;
271 	}
272 
273 	/*
274 	 * Mark the request as free even though not on the free list.
275 	 * There is only one TMF request allowed to be outstanding at
276 	 * a time and the TMF routines perform their own allocation
277 	 * tracking using the standard state flags.
278 	 */
279 	mpt->tmf_req->state = REQ_STATE_FREE;
280 	maxq--;
281 
282 	if (mpt_spawn_recovery_thread(mpt) != 0) {
283 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
284 		error = ENOMEM;
285 		goto cleanup0;
286 	}
287 
288 	/*
289 	 * The rest of this is CAM foo, for which we need to drop our lock
290 	 */
291 	MPTLOCK_2_CAMLOCK(mpt);
292 
293 	/*
294 	 * Create the device queue for our SIM(s).
295 	 */
296 	devq = cam_simq_alloc(maxq);
297 	if (devq == NULL) {
298 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
299 		error = ENOMEM;
300 		goto cleanup;
301 	}
302 
303 	/*
304 	 * Construct our SIM entry.
305 	 */
306 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
307 	    mpt->unit, 1, maxq, devq);
308 	if (mpt->sim == NULL) {
309 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
310 		cam_simq_free(devq);
311 		error = ENOMEM;
312 		goto cleanup;
313 	}
314 
315 	/*
316 	 * Register exactly this bus.
317 	 */
318 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
319 		mpt_prt(mpt, "Bus registration Failed!\n");
320 		error = ENOMEM;
321 		goto cleanup;
322 	}
323 
324 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
325 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
326 		mpt_prt(mpt, "Unable to allocate Path!\n");
327 		error = ENOMEM;
328 		goto cleanup;
329 	}
330 
331 	/*
332 	 * Only register a second bus for RAID physical
333 	 * devices if the controller supports RAID.
334 	 */
335 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
336 		CAMLOCK_2_MPTLOCK(mpt);
337 		return (0);
338 	}
339 
340 	/*
341 	 * Create a "bus" to export all hidden disks to CAM.
342 	 */
343 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
344 	    mpt->unit, 1, maxq, devq);
345 	if (mpt->phydisk_sim == NULL) {
346 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
347 		error = ENOMEM;
348 		goto cleanup;
349 	}
350 
351 	/*
352 	 * Register this bus.
353 	 */
354 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
355 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
356 		error = ENOMEM;
357 		goto cleanup;
358 	}
359 
360 	if (xpt_create_path(&mpt->phydisk_path, NULL,
361 	    cam_sim_path(mpt->phydisk_sim),
362 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
363 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
364 		error = ENOMEM;
365 		goto cleanup;
366 	}
367 	CAMLOCK_2_MPTLOCK(mpt);
368 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
369 	return (0);
370 
371 cleanup:
372 	CAMLOCK_2_MPTLOCK(mpt);
373 cleanup0:
374 	mpt_cam_detach(mpt);
375 	return (error);
376 }
377 
378 /*
379  * Read FC configuration information
380  */
381 static int
382 mpt_read_config_info_fc(struct mpt_softc *mpt)
383 {
384 	char *topology = NULL;
385 	int rv;
386 
387 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
388 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
389 	if (rv) {
390 		return (-1);
391 	}
392 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
393 		 mpt->mpt_fcport_page0.Header.PageVersion,
394 		 mpt->mpt_fcport_page0.Header.PageLength,
395 		 mpt->mpt_fcport_page0.Header.PageNumber,
396 		 mpt->mpt_fcport_page0.Header.PageType);
397 
398 
399 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
400 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
401 	if (rv) {
402 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
403 		return (-1);
404 	}
405 
406 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
407 
408 	switch (mpt->mpt_fcport_page0.Flags &
409 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
410 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
411 		mpt->mpt_fcport_speed = 0;
412 		topology = "<NO LOOP>";
413 		break;
414 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
415 		topology = "N-Port";
416 		break;
417 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
418 		topology = "NL-Port";
419 		break;
420 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
421 		topology = "F-Port";
422 		break;
423 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
424 		topology = "FL-Port";
425 		break;
426 	default:
427 		mpt->mpt_fcport_speed = 0;
428 		topology = "?";
429 		break;
430 	}
431 
432 	mpt_lprt(mpt, MPT_PRT_INFO,
433 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
434 	    "Speed %u-Gbit\n", topology,
435 	    mpt->mpt_fcport_page0.WWNN.High,
436 	    mpt->mpt_fcport_page0.WWNN.Low,
437 	    mpt->mpt_fcport_page0.WWPN.High,
438 	    mpt->mpt_fcport_page0.WWPN.Low,
439 	    mpt->mpt_fcport_speed);
440 #if __FreeBSD_version >= 500000
441 	{
442 		struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
443 		struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
444 
445 		snprintf(mpt->scinfo.fc.wwnn,
446 		    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
447 		    mpt->mpt_fcport_page0.WWNN.High,
448 		    mpt->mpt_fcport_page0.WWNN.Low);
449 
450 		snprintf(mpt->scinfo.fc.wwpn,
451 		    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
452 		    mpt->mpt_fcport_page0.WWPN.High,
453 		    mpt->mpt_fcport_page0.WWPN.Low);
454 
455 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
456 		       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
457 		       "World Wide Node Name");
458 
459 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
460 		       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
461 		       "World Wide Port Name");
462 
463 	}
464 #endif
465 	return (0);
466 }
467 
468 /*
469  * Set FC configuration information.
470  */
471 static int
472 mpt_set_initial_config_fc(struct mpt_softc *mpt)
473 {
474 
475 	CONFIG_PAGE_FC_PORT_1 fc;
476 	U32 fl;
477 	int r, doit = 0;
478 	int role;
479 
480 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
481 	    &fc.Header, FALSE, 5000);
482 	if (r) {
483 		mpt_prt(mpt, "failed to read FC page 1 header\n");
484 		return (mpt_fc_reset_link(mpt, 1));
485 	}
486 
487 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
488 	    &fc.Header, sizeof (fc), FALSE, 5000);
489 	if (r) {
490 		mpt_prt(mpt, "failed to read FC page 1\n");
491 		return (mpt_fc_reset_link(mpt, 1));
492 	}
493 
494 	/*
495 	 * Check our flags to make sure we support the role we want.
496 	 */
497 	doit = 0;
498 	role = 0;
499 	fl = le32toh(fc.Flags);;
500 
501 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
502 		role |= MPT_ROLE_INITIATOR;
503 	}
504 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
505 		role |= MPT_ROLE_TARGET;
506 	}
507 
508 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
509 
510 	if (mpt->do_cfg_role == 0) {
511 		role = mpt->cfg_role;
512 	} else {
513 		mpt->do_cfg_role = 0;
514 	}
515 
516 	if (role != mpt->cfg_role) {
517 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
518 			if ((role & MPT_ROLE_INITIATOR) == 0) {
519 				mpt_prt(mpt, "adding initiator role\n");
520 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
521 				doit++;
522 			} else {
523 				mpt_prt(mpt, "keeping initiator role\n");
524 			}
525 		} else if (role & MPT_ROLE_INITIATOR) {
526 			mpt_prt(mpt, "removing initiator role\n");
527 			doit++;
528 		}
529 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
530 			if ((role & MPT_ROLE_TARGET) == 0) {
531 				mpt_prt(mpt, "adding target role\n");
532 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
533 				doit++;
534 			} else {
535 				mpt_prt(mpt, "keeping target role\n");
536 			}
537 		} else if (role & MPT_ROLE_TARGET) {
538 			mpt_prt(mpt, "removing target role\n");
539 			doit++;
540 		}
541 		mpt->role = mpt->cfg_role;
542 	}
543 
544 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
545 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
546 			mpt_prt(mpt, "adding OXID option\n");
547 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
548 			doit++;
549 		}
550 	}
551 
552 	if (doit) {
553 		fc.Flags = htole32(fl);
554 		r = mpt_write_cfg_page(mpt,
555 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
556 		    sizeof(fc), FALSE, 5000);
557 		if (r != 0) {
558 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
559 			return (0);
560 		}
561 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
562 		    "effect until next reboot or IOC reset\n");
563 	}
564 	return (0);
565 }
566 
567 /*
568  * Read SAS configuration information. Nothing to do yet.
569  */
570 static int
571 mpt_read_config_info_sas(struct mpt_softc *mpt)
572 {
573 	return (0);
574 }
575 
576 /*
577  * Set SAS configuration information. Nothing to do yet.
578  */
579 static int
580 mpt_set_initial_config_sas(struct mpt_softc *mpt)
581 {
582 	return (0);
583 }
584 
585 /*
586  * Read SCSI configuration information
587  */
588 static int
589 mpt_read_config_info_spi(struct mpt_softc *mpt)
590 {
591 	int rv, i;
592 
593 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
594 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
595 	if (rv) {
596 		return (-1);
597 	}
598 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
599 	    mpt->mpt_port_page0.Header.PageVersion,
600 	    mpt->mpt_port_page0.Header.PageLength,
601 	    mpt->mpt_port_page0.Header.PageNumber,
602 	    mpt->mpt_port_page0.Header.PageType);
603 
604 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
605 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
606 	if (rv) {
607 		return (-1);
608 	}
609 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
610 	    mpt->mpt_port_page1.Header.PageVersion,
611 	    mpt->mpt_port_page1.Header.PageLength,
612 	    mpt->mpt_port_page1.Header.PageNumber,
613 	    mpt->mpt_port_page1.Header.PageType);
614 
615 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
616 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
617 	if (rv) {
618 		return (-1);
619 	}
620 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
621 	    mpt->mpt_port_page2.Header.PageVersion,
622 	    mpt->mpt_port_page2.Header.PageLength,
623 	    mpt->mpt_port_page2.Header.PageNumber,
624 	    mpt->mpt_port_page2.Header.PageType);
625 
626 	for (i = 0; i < 16; i++) {
627 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
628 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
629 		if (rv) {
630 			return (-1);
631 		}
632 		mpt_lprt(mpt, MPT_PRT_DEBUG,
633 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
634 		    mpt->mpt_dev_page0[i].Header.PageVersion,
635 		    mpt->mpt_dev_page0[i].Header.PageLength,
636 		    mpt->mpt_dev_page0[i].Header.PageNumber,
637 		    mpt->mpt_dev_page0[i].Header.PageType);
638 
639 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
640 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
641 		if (rv) {
642 			return (-1);
643 		}
644 		mpt_lprt(mpt, MPT_PRT_DEBUG,
645 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
646 		    mpt->mpt_dev_page1[i].Header.PageVersion,
647 		    mpt->mpt_dev_page1[i].Header.PageLength,
648 		    mpt->mpt_dev_page1[i].Header.PageNumber,
649 		    mpt->mpt_dev_page1[i].Header.PageType);
650 	}
651 
652 	/*
653 	 * At this point, we don't *have* to fail. As long as we have
654 	 * valid config header information, we can (barely) lurch
655 	 * along.
656 	 */
657 
658 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
659 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
660 	if (rv) {
661 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
662 	} else {
663 		mpt_lprt(mpt, MPT_PRT_DEBUG,
664 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
665 		    mpt->mpt_port_page0.Capabilities,
666 		    mpt->mpt_port_page0.PhysicalInterface);
667 	}
668 
669 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
670 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
671 	if (rv) {
672 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
673 	} else {
674 		mpt_lprt(mpt, MPT_PRT_DEBUG,
675 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
676 		    mpt->mpt_port_page1.Configuration,
677 		    mpt->mpt_port_page1.OnBusTimerValue);
678 	}
679 
680 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
681 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
682 	if (rv) {
683 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
684 	} else {
685 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
686 		    "Port Page 2: Flags %x Settings %x\n",
687 		    mpt->mpt_port_page2.PortFlags,
688 		    mpt->mpt_port_page2.PortSettings);
689 		for (i = 0; i < 16; i++) {
690 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
691 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
692 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
693 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
694 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
695 		}
696 	}
697 
698 	for (i = 0; i < 16; i++) {
699 		rv = mpt_read_cur_cfg_page(mpt, i,
700 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
701 		    FALSE, 5000);
702 		if (rv) {
703 			mpt_prt(mpt,
704 			    "cannot read SPI Target %d Device Page 0\n", i);
705 			continue;
706 		}
707 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
708 		    "target %d page 0: Negotiated Params %x Information %x\n",
709 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
710 		    mpt->mpt_dev_page0[i].Information);
711 
712 		rv = mpt_read_cur_cfg_page(mpt, i,
713 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
714 		    FALSE, 5000);
715 		if (rv) {
716 			mpt_prt(mpt,
717 			    "cannot read SPI Target %d Device Page 1\n", i);
718 			continue;
719 		}
720 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
721 		    "target %d page 1: Requested Params %x Configuration %x\n",
722 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
723 		    mpt->mpt_dev_page1[i].Configuration);
724 	}
725 	return (0);
726 }
727 
728 /*
729  * Validate SPI configuration information.
730  *
731  * In particular, validate SPI Port Page 1.
732  */
733 static int
734 mpt_set_initial_config_spi(struct mpt_softc *mpt)
735 {
736 	int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
737 	int error;
738 
739 	mpt->mpt_disc_enable = 0xff;
740 	mpt->mpt_tag_enable = 0;
741 
742 	if (mpt->mpt_port_page1.Configuration != pp1val) {
743 		CONFIG_PAGE_SCSI_PORT_1 tmp;
744 
745 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
746 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
747 		tmp = mpt->mpt_port_page1;
748 		tmp.Configuration = pp1val;
749 		error = mpt_write_cur_cfg_page(mpt, 0,
750 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
751 		if (error) {
752 			return (-1);
753 		}
754 		error = mpt_read_cur_cfg_page(mpt, 0,
755 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
756 		if (error) {
757 			return (-1);
758 		}
759 		if (tmp.Configuration != pp1val) {
760 			mpt_prt(mpt,
761 			    "failed to reset SPI Port Page 1 Config value\n");
762 			return (-1);
763 		}
764 		mpt->mpt_port_page1 = tmp;
765 	}
766 
767 	/*
768 	 * The purpose of this exercise is to get
769 	 * all targets back to async/narrow.
770 	 *
771 	 * We skip this step if the BIOS has already negotiated
772 	 * speeds with the targets and does not require us to
773 	 * do Domain Validation.
774 	 */
775 	i = mpt->mpt_port_page2.PortSettings &
776 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
777 	j = mpt->mpt_port_page2.PortFlags &
778 	    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
779 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* &&
780 	    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) {
781 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
782 		    "honoring BIOS transfer negotiations\n");
783 	} else {
784 		for (i = 0; i < 16; i++) {
785 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
786 			mpt->mpt_dev_page1[i].Configuration = 0;
787 			(void) mpt_update_spi_config(mpt, i);
788 		}
789 	}
790 	return (0);
791 }
792 
793 int
794 mpt_cam_enable(struct mpt_softc *mpt)
795 {
796 	if (mpt->is_fc) {
797 		if (mpt_read_config_info_fc(mpt)) {
798 			return (EIO);
799 		}
800 		if (mpt_set_initial_config_fc(mpt)) {
801 			return (EIO);
802 		}
803 	} else if (mpt->is_sas) {
804 		if (mpt_read_config_info_sas(mpt)) {
805 			return (EIO);
806 		}
807 		if (mpt_set_initial_config_sas(mpt)) {
808 			return (EIO);
809 		}
810 	} else if (mpt->is_spi) {
811 		if (mpt_read_config_info_spi(mpt)) {
812 			return (EIO);
813 		}
814 		if (mpt_set_initial_config_spi(mpt)) {
815 			return (EIO);
816 		}
817 	}
818 	return (0);
819 }
820 
821 void
822 mpt_cam_ready(struct mpt_softc *mpt)
823 {
824 	/*
825 	 * If we're in target mode, hang out resources now
826 	 * so we don't cause the world to hang talking to us.
827 	 */
828 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
829 		/*
830 		 * Try to add some target command resources
831 		 */
832 		MPT_LOCK(mpt);
833 		if (mpt_add_target_commands(mpt) == FALSE) {
834 			mpt_prt(mpt, "failed to add target commands\n");
835 		}
836 		MPT_UNLOCK(mpt);
837 	}
838 }
839 
840 void
841 mpt_cam_detach(struct mpt_softc *mpt)
842 {
843 	mpt_handler_t handler;
844 
845 	mpt_terminate_recovery_thread(mpt);
846 
847 	handler.reply_handler = mpt_scsi_reply_handler;
848 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
849 			       scsi_io_handler_id);
850 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
851 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
852 			       scsi_tmf_handler_id);
853 	handler.reply_handler = mpt_fc_els_reply_handler;
854 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
855 			       fc_els_handler_id);
856 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
857 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
858 			       mpt->scsi_tgt_handler_id);
859 
860 	if (mpt->tmf_req != NULL) {
861 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
862 		mpt_free_request(mpt, mpt->tmf_req);
863 		mpt->tmf_req = NULL;
864 	}
865 
866 	if (mpt->sim != NULL) {
867 		MPTLOCK_2_CAMLOCK(mpt);
868 		xpt_free_path(mpt->path);
869 		xpt_bus_deregister(cam_sim_path(mpt->sim));
870 		cam_sim_free(mpt->sim, TRUE);
871 		mpt->sim = NULL;
872 		CAMLOCK_2_MPTLOCK(mpt);
873 	}
874 
875 	if (mpt->phydisk_sim != NULL) {
876 		MPTLOCK_2_CAMLOCK(mpt);
877 		xpt_free_path(mpt->phydisk_path);
878 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
879 		cam_sim_free(mpt->phydisk_sim, TRUE);
880 		mpt->phydisk_sim = NULL;
881 		CAMLOCK_2_MPTLOCK(mpt);
882 	}
883 }
884 
885 /* This routine is used after a system crash to dump core onto the swap device.
886  */
887 static void
888 mpt_poll(struct cam_sim *sim)
889 {
890 	struct mpt_softc *mpt;
891 
892 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
893 	MPT_LOCK(mpt);
894 	mpt_intr(mpt);
895 	MPT_UNLOCK(mpt);
896 }
897 
898 /*
899  * Watchdog timeout routine for SCSI requests.
900  */
901 static void
902 mpt_timeout(void *arg)
903 {
904 	union ccb	 *ccb;
905 	struct mpt_softc *mpt;
906 	request_t	 *req;
907 
908 	ccb = (union ccb *)arg;
909 	mpt = ccb->ccb_h.ccb_mpt_ptr;
910 
911 	MPT_LOCK(mpt);
912 	req = ccb->ccb_h.ccb_req_ptr;
913 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
914 	    req->serno, ccb, req->ccb);
915 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
916 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
917 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
918 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
919 		req->state |= REQ_STATE_TIMEDOUT;
920 		mpt_wakeup_recovery_thread(mpt);
921 	}
922 	MPT_UNLOCK(mpt);
923 }
924 
925 /*
926  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
927  *
928  * Takes a list of physical segments and builds the SGL for SCSI IO command
929  * and forwards the commard to the IOC after one last check that CAM has not
930  * aborted the transaction.
931  */
932 static void
933 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
934 {
935 	request_t *req, *trq;
936 	char *mpt_off;
937 	union ccb *ccb;
938 	struct mpt_softc *mpt;
939 	int seg, first_lim;
940 	uint32_t flags, nxt_off;
941 	void *sglp = NULL;
942 	MSG_REQUEST_HEADER *hdrp;
943 	SGE_SIMPLE64 *se;
944 	SGE_CHAIN64 *ce;
945 	int istgt = 0;
946 
947 	req = (request_t *)arg;
948 	ccb = req->ccb;
949 
950 	mpt = ccb->ccb_h.ccb_mpt_ptr;
951 	req = ccb->ccb_h.ccb_req_ptr;
952 
953 	hdrp = req->req_vbuf;
954 	mpt_off = req->req_vbuf;
955 
956 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
957 		error = EFBIG;
958 	}
959 
960 	if (error == 0) {
961 		switch (hdrp->Function) {
962 		case MPI_FUNCTION_SCSI_IO_REQUEST:
963 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
964 			istgt = 0;
965 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
966 			break;
967 		case MPI_FUNCTION_TARGET_ASSIST:
968 			istgt = 1;
969 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
970 			break;
971 		default:
972 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
973 			    hdrp->Function);
974 			error = EINVAL;
975 			break;
976 		}
977 	}
978 
979 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
980 		error = EFBIG;
981 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
982 		    nseg, mpt->max_seg_cnt);
983 	}
984 
985 bad:
986 	if (error != 0) {
987 		if (error != EFBIG && error != ENOMEM) {
988 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
989 		}
990 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
991 			cam_status status;
992 			mpt_freeze_ccb(ccb);
993 			if (error == EFBIG) {
994 				status = CAM_REQ_TOO_BIG;
995 			} else if (error == ENOMEM) {
996 				if (mpt->outofbeer == 0) {
997 					mpt->outofbeer = 1;
998 					xpt_freeze_simq(mpt->sim, 1);
999 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1000 					    "FREEZEQ\n");
1001 				}
1002 				status = CAM_REQUEUE_REQ;
1003 			} else {
1004 				status = CAM_REQ_CMP_ERR;
1005 			}
1006 			mpt_set_ccb_status(ccb, status);
1007 		}
1008 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1009 			request_t *cmd_req =
1010 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1011 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1012 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1013 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1014 		}
1015 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1016 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1017 		xpt_done(ccb);
1018 		CAMLOCK_2_MPTLOCK(mpt);
1019 		mpt_free_request(mpt, req);
1020 		MPTLOCK_2_CAMLOCK(mpt);
1021 		return;
1022 	}
1023 
1024 	/*
1025 	 * No data to transfer?
1026 	 * Just make a single simple SGL with zero length.
1027 	 */
1028 
1029 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1030 		int tidx = ((char *)sglp) - mpt_off;
1031 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1032 	}
1033 
1034 	if (nseg == 0) {
1035 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1036 		MPI_pSGE_SET_FLAGS(se1,
1037 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1038 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1039 		goto out;
1040 	}
1041 
1042 
1043 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1044 	if (istgt == 0) {
1045 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1046 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1047 		}
1048 	} else {
1049 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1050 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1051 		}
1052 	}
1053 
1054 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1055 		bus_dmasync_op_t op;
1056 		if (istgt == 0) {
1057 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1058 				op = BUS_DMASYNC_PREREAD;
1059 			} else {
1060 				op = BUS_DMASYNC_PREWRITE;
1061 			}
1062 		} else {
1063 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1064 				op = BUS_DMASYNC_PREWRITE;
1065 			} else {
1066 				op = BUS_DMASYNC_PREREAD;
1067 			}
1068 		}
1069 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1070 	}
1071 
1072 	/*
1073 	 * Okay, fill in what we can at the end of the command frame.
1074 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1075 	 * the command frame.
1076 	 *
1077 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1078 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1079 	 * that.
1080 	 */
1081 
1082 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1083 		first_lim = nseg;
1084 	} else {
1085 		/*
1086 		 * Leave room for CHAIN element
1087 		 */
1088 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1089 	}
1090 
1091 	se = (SGE_SIMPLE64 *) sglp;
1092 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1093 		uint32_t tf;
1094 
1095 		memset(se, 0, sizeof (*se));
1096 		se->Address.Low = dm_segs->ds_addr;
1097 		if (sizeof(bus_addr_t) > 4) {
1098 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
1099 		}
1100 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1101 		tf = flags;
1102 		if (seg == first_lim - 1) {
1103 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1104 		}
1105 		if (seg == nseg - 1) {
1106 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1107 				MPI_SGE_FLAGS_END_OF_BUFFER;
1108 		}
1109 		MPI_pSGE_SET_FLAGS(se, tf);
1110 	}
1111 
1112 	if (seg == nseg) {
1113 		goto out;
1114 	}
1115 
1116 	/*
1117 	 * Tell the IOC where to find the first chain element.
1118 	 */
1119 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1120 	nxt_off = MPT_RQSL(mpt);
1121 	trq = req;
1122 
1123 	/*
1124 	 * Make up the rest of the data segments out of a chain element
1125 	 * (contiained in the current request frame) which points to
1126 	 * SIMPLE64 elements in the next request frame, possibly ending
1127 	 * with *another* chain element (if there's more).
1128 	 */
1129 	while (seg < nseg) {
1130 		int this_seg_lim;
1131 		uint32_t tf, cur_off;
1132 		bus_addr_t chain_list_addr;
1133 
1134 		/*
1135 		 * Point to the chain descriptor. Note that the chain
1136 		 * descriptor is at the end of the *previous* list (whether
1137 		 * chain or simple).
1138 		 */
1139 		ce = (SGE_CHAIN64 *) se;
1140 
1141 		/*
1142 		 * Before we change our current pointer, make  sure we won't
1143 		 * overflow the request area with this frame. Note that we
1144 		 * test against 'greater than' here as it's okay in this case
1145 		 * to have next offset be just outside the request area.
1146 		 */
1147 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1148 			nxt_off = MPT_REQUEST_AREA;
1149 			goto next_chain;
1150 		}
1151 
1152 		/*
1153 		 * Set our SGE element pointer to the beginning of the chain
1154 		 * list and update our next chain list offset.
1155 		 */
1156 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1157 		cur_off = nxt_off;
1158 		nxt_off += MPT_RQSL(mpt);
1159 
1160 		/*
1161 		 * Now initialized the chain descriptor.
1162 		 */
1163 		memset(ce, 0, sizeof (*ce));
1164 
1165 		/*
1166 		 * Get the physical address of the chain list.
1167 		 */
1168 		chain_list_addr = trq->req_pbuf;
1169 		chain_list_addr += cur_off;
1170 		if (sizeof (bus_addr_t) > 4) {
1171 			ce->Address.High =
1172 			    (uint32_t) ((uint64_t)chain_list_addr >> 32);
1173 		}
1174 		ce->Address.Low = (uint32_t) chain_list_addr;
1175 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1176 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1177 
1178 		/*
1179 		 * If we have more than a frame's worth of segments left,
1180 		 * set up the chain list to have the last element be another
1181 		 * chain descriptor.
1182 		 */
1183 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1184 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1185 			/*
1186 			 * The length of the chain is the length in bytes of the
1187 			 * number of segments plus the next chain element.
1188 			 *
1189 			 * The next chain descriptor offset is the length,
1190 			 * in words, of the number of segments.
1191 			 */
1192 			ce->Length = (this_seg_lim - seg) *
1193 			    sizeof (SGE_SIMPLE64);
1194 			ce->NextChainOffset = ce->Length >> 2;
1195 			ce->Length += sizeof (SGE_CHAIN64);
1196 		} else {
1197 			this_seg_lim = nseg;
1198 			ce->Length = (this_seg_lim - seg) *
1199 			    sizeof (SGE_SIMPLE64);
1200 		}
1201 
1202 		/*
1203 		 * Fill in the chain list SGE elements with our segment data.
1204 		 *
1205 		 * If we're the last element in this chain list, set the last
1206 		 * element flag. If we're the completely last element period,
1207 		 * set the end of list and end of buffer flags.
1208 		 */
1209 		while (seg < this_seg_lim) {
1210 			memset(se, 0, sizeof (*se));
1211 			se->Address.Low = dm_segs->ds_addr;
1212 			if (sizeof (bus_addr_t) > 4) {
1213 				se->Address.High =
1214 				    ((uint64_t)dm_segs->ds_addr) >> 32;
1215 			}
1216 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1217 			tf = flags;
1218 			if (seg ==  this_seg_lim - 1) {
1219 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1220 			}
1221 			if (seg == nseg - 1) {
1222 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1223 					MPI_SGE_FLAGS_END_OF_BUFFER;
1224 			}
1225 			MPI_pSGE_SET_FLAGS(se, tf);
1226 			se++;
1227 			seg++;
1228 			dm_segs++;
1229 		}
1230 
1231     next_chain:
1232 		/*
1233 		 * If we have more segments to do and we've used up all of
1234 		 * the space in a request area, go allocate another one
1235 		 * and chain to that.
1236 		 */
1237 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1238 			request_t *nrq;
1239 
1240 			CAMLOCK_2_MPTLOCK(mpt);
1241 			nrq = mpt_get_request(mpt, FALSE);
1242 			MPTLOCK_2_CAMLOCK(mpt);
1243 
1244 			if (nrq == NULL) {
1245 				error = ENOMEM;
1246 				goto bad;
1247 			}
1248 
1249 			/*
1250 			 * Append the new request area on the tail of our list.
1251 			 */
1252 			if ((trq = req->chain) == NULL) {
1253 				req->chain = nrq;
1254 			} else {
1255 				while (trq->chain != NULL) {
1256 					trq = trq->chain;
1257 				}
1258 				trq->chain = nrq;
1259 			}
1260 			trq = nrq;
1261 			mpt_off = trq->req_vbuf;
1262 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1263 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1264 			}
1265 			nxt_off = 0;
1266 		}
1267 	}
1268 out:
1269 
1270 	/*
1271 	 * Last time we need to check if this CCB needs to be aborted.
1272 	 */
1273 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1274 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1275 			request_t *cmd_req =
1276 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1277 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1278 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1279 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1280 		}
1281 		mpt_prt(mpt,
1282 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1283 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1284 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1285 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1286 		}
1287 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1288 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1289 		xpt_done(ccb);
1290 		CAMLOCK_2_MPTLOCK(mpt);
1291 		mpt_free_request(mpt, req);
1292 		MPTLOCK_2_CAMLOCK(mpt);
1293 		return;
1294 	}
1295 
1296 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1297 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1298 		ccb->ccb_h.timeout_ch =
1299 			timeout(mpt_timeout, (caddr_t)ccb,
1300 				(ccb->ccb_h.timeout * hz) / 1000);
1301 	} else {
1302 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1303 	}
1304 	if (mpt->verbose > MPT_PRT_DEBUG) {
1305 		int nc = 0;
1306 		mpt_print_request(req->req_vbuf);
1307 		for (trq = req->chain; trq; trq = trq->chain) {
1308 			printf("  Additional Chain Area %d\n", nc++);
1309 			mpt_dump_sgl(trq->req_vbuf, 0);
1310 		}
1311 	}
1312 
1313 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1314 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1315 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1316 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1317 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1318 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1319 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1320 		} else {
1321 			tgt->state = TGT_STATE_MOVING_DATA;
1322 		}
1323 #else
1324 		tgt->state = TGT_STATE_MOVING_DATA;
1325 #endif
1326 	}
1327 	CAMLOCK_2_MPTLOCK(mpt);
1328 	mpt_send_cmd(mpt, req);
1329 	MPTLOCK_2_CAMLOCK(mpt);
1330 }
1331 
1332 static void
1333 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1334 {
1335 	request_t *req, *trq;
1336 	char *mpt_off;
1337 	union ccb *ccb;
1338 	struct mpt_softc *mpt;
1339 	int seg, first_lim;
1340 	uint32_t flags, nxt_off;
1341 	void *sglp = NULL;
1342 	MSG_REQUEST_HEADER *hdrp;
1343 	SGE_SIMPLE32 *se;
1344 	SGE_CHAIN32 *ce;
1345 	int istgt = 0;
1346 
1347 	req = (request_t *)arg;
1348 	ccb = req->ccb;
1349 
1350 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1351 	req = ccb->ccb_h.ccb_req_ptr;
1352 
1353 	hdrp = req->req_vbuf;
1354 	mpt_off = req->req_vbuf;
1355 
1356 
1357 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1358 		error = EFBIG;
1359 	}
1360 
1361 	if (error == 0) {
1362 		switch (hdrp->Function) {
1363 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1364 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1365 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1366 			break;
1367 		case MPI_FUNCTION_TARGET_ASSIST:
1368 			istgt = 1;
1369 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1370 			break;
1371 		default:
1372 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1373 			    hdrp->Function);
1374 			error = EINVAL;
1375 			break;
1376 		}
1377 	}
1378 
1379 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1380 		error = EFBIG;
1381 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1382 		    nseg, mpt->max_seg_cnt);
1383 	}
1384 
1385 bad:
1386 	if (error != 0) {
1387 		if (error != EFBIG && error != ENOMEM) {
1388 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1389 		}
1390 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1391 			cam_status status;
1392 			mpt_freeze_ccb(ccb);
1393 			if (error == EFBIG) {
1394 				status = CAM_REQ_TOO_BIG;
1395 			} else if (error == ENOMEM) {
1396 				if (mpt->outofbeer == 0) {
1397 					mpt->outofbeer = 1;
1398 					xpt_freeze_simq(mpt->sim, 1);
1399 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1400 					    "FREEZEQ\n");
1401 				}
1402 				status = CAM_REQUEUE_REQ;
1403 			} else {
1404 				status = CAM_REQ_CMP_ERR;
1405 			}
1406 			mpt_set_ccb_status(ccb, status);
1407 		}
1408 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1409 			request_t *cmd_req =
1410 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1411 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1412 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1413 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1414 		}
1415 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1416 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1417 		xpt_done(ccb);
1418 		CAMLOCK_2_MPTLOCK(mpt);
1419 		mpt_free_request(mpt, req);
1420 		MPTLOCK_2_CAMLOCK(mpt);
1421 		return;
1422 	}
1423 
1424 	/*
1425 	 * No data to transfer?
1426 	 * Just make a single simple SGL with zero length.
1427 	 */
1428 
1429 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1430 		int tidx = ((char *)sglp) - mpt_off;
1431 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1432 	}
1433 
1434 	if (nseg == 0) {
1435 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1436 		MPI_pSGE_SET_FLAGS(se1,
1437 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1438 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1439 		goto out;
1440 	}
1441 
1442 
1443 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1444 	if (istgt == 0) {
1445 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1446 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1447 		}
1448 	} else {
1449 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1450 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1451 		}
1452 	}
1453 
1454 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1455 		bus_dmasync_op_t op;
1456 		if (istgt) {
1457 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1458 				op = BUS_DMASYNC_PREREAD;
1459 			} else {
1460 				op = BUS_DMASYNC_PREWRITE;
1461 			}
1462 		} else {
1463 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1464 				op = BUS_DMASYNC_PREWRITE;
1465 			} else {
1466 				op = BUS_DMASYNC_PREREAD;
1467 			}
1468 		}
1469 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1470 	}
1471 
1472 	/*
1473 	 * Okay, fill in what we can at the end of the command frame.
1474 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1475 	 * the command frame.
1476 	 *
1477 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1478 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1479 	 * that.
1480 	 */
1481 
1482 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1483 		first_lim = nseg;
1484 	} else {
1485 		/*
1486 		 * Leave room for CHAIN element
1487 		 */
1488 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1489 	}
1490 
1491 	se = (SGE_SIMPLE32 *) sglp;
1492 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1493 		uint32_t tf;
1494 
1495 		memset(se, 0,sizeof (*se));
1496 		se->Address = dm_segs->ds_addr;
1497 
1498 
1499 
1500 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1501 		tf = flags;
1502 		if (seg == first_lim - 1) {
1503 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1504 		}
1505 		if (seg == nseg - 1) {
1506 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1507 				MPI_SGE_FLAGS_END_OF_BUFFER;
1508 		}
1509 		MPI_pSGE_SET_FLAGS(se, tf);
1510 	}
1511 
1512 	if (seg == nseg) {
1513 		goto out;
1514 	}
1515 
1516 	/*
1517 	 * Tell the IOC where to find the first chain element.
1518 	 */
1519 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1520 	nxt_off = MPT_RQSL(mpt);
1521 	trq = req;
1522 
1523 	/*
1524 	 * Make up the rest of the data segments out of a chain element
1525 	 * (contiained in the current request frame) which points to
1526 	 * SIMPLE32 elements in the next request frame, possibly ending
1527 	 * with *another* chain element (if there's more).
1528 	 */
1529 	while (seg < nseg) {
1530 		int this_seg_lim;
1531 		uint32_t tf, cur_off;
1532 		bus_addr_t chain_list_addr;
1533 
1534 		/*
1535 		 * Point to the chain descriptor. Note that the chain
1536 		 * descriptor is at the end of the *previous* list (whether
1537 		 * chain or simple).
1538 		 */
1539 		ce = (SGE_CHAIN32 *) se;
1540 
1541 		/*
1542 		 * Before we change our current pointer, make  sure we won't
1543 		 * overflow the request area with this frame. Note that we
1544 		 * test against 'greater than' here as it's okay in this case
1545 		 * to have next offset be just outside the request area.
1546 		 */
1547 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1548 			nxt_off = MPT_REQUEST_AREA;
1549 			goto next_chain;
1550 		}
1551 
1552 		/*
1553 		 * Set our SGE element pointer to the beginning of the chain
1554 		 * list and update our next chain list offset.
1555 		 */
1556 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1557 		cur_off = nxt_off;
1558 		nxt_off += MPT_RQSL(mpt);
1559 
1560 		/*
1561 		 * Now initialized the chain descriptor.
1562 		 */
1563 		memset(ce, 0, sizeof (*ce));
1564 
1565 		/*
1566 		 * Get the physical address of the chain list.
1567 		 */
1568 		chain_list_addr = trq->req_pbuf;
1569 		chain_list_addr += cur_off;
1570 
1571 
1572 
1573 		ce->Address = chain_list_addr;
1574 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1575 
1576 
1577 		/*
1578 		 * If we have more than a frame's worth of segments left,
1579 		 * set up the chain list to have the last element be another
1580 		 * chain descriptor.
1581 		 */
1582 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1583 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1584 			/*
1585 			 * The length of the chain is the length in bytes of the
1586 			 * number of segments plus the next chain element.
1587 			 *
1588 			 * The next chain descriptor offset is the length,
1589 			 * in words, of the number of segments.
1590 			 */
1591 			ce->Length = (this_seg_lim - seg) *
1592 			    sizeof (SGE_SIMPLE32);
1593 			ce->NextChainOffset = ce->Length >> 2;
1594 			ce->Length += sizeof (SGE_CHAIN32);
1595 		} else {
1596 			this_seg_lim = nseg;
1597 			ce->Length = (this_seg_lim - seg) *
1598 			    sizeof (SGE_SIMPLE32);
1599 		}
1600 
1601 		/*
1602 		 * Fill in the chain list SGE elements with our segment data.
1603 		 *
1604 		 * If we're the last element in this chain list, set the last
1605 		 * element flag. If we're the completely last element period,
1606 		 * set the end of list and end of buffer flags.
1607 		 */
1608 		while (seg < this_seg_lim) {
1609 			memset(se, 0, sizeof (*se));
1610 			se->Address = dm_segs->ds_addr;
1611 
1612 
1613 
1614 
1615 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1616 			tf = flags;
1617 			if (seg ==  this_seg_lim - 1) {
1618 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1619 			}
1620 			if (seg == nseg - 1) {
1621 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1622 					MPI_SGE_FLAGS_END_OF_BUFFER;
1623 			}
1624 			MPI_pSGE_SET_FLAGS(se, tf);
1625 			se++;
1626 			seg++;
1627 			dm_segs++;
1628 		}
1629 
1630     next_chain:
1631 		/*
1632 		 * If we have more segments to do and we've used up all of
1633 		 * the space in a request area, go allocate another one
1634 		 * and chain to that.
1635 		 */
1636 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1637 			request_t *nrq;
1638 
1639 			CAMLOCK_2_MPTLOCK(mpt);
1640 			nrq = mpt_get_request(mpt, FALSE);
1641 			MPTLOCK_2_CAMLOCK(mpt);
1642 
1643 			if (nrq == NULL) {
1644 				error = ENOMEM;
1645 				goto bad;
1646 			}
1647 
1648 			/*
1649 			 * Append the new request area on the tail of our list.
1650 			 */
1651 			if ((trq = req->chain) == NULL) {
1652 				req->chain = nrq;
1653 			} else {
1654 				while (trq->chain != NULL) {
1655 					trq = trq->chain;
1656 				}
1657 				trq->chain = nrq;
1658 			}
1659 			trq = nrq;
1660 			mpt_off = trq->req_vbuf;
1661 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1662 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1663 			}
1664 			nxt_off = 0;
1665 		}
1666 	}
1667 out:
1668 
1669 	/*
1670 	 * Last time we need to check if this CCB needs to be aborted.
1671 	 */
1672 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1673 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1674 			request_t *cmd_req =
1675 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1676 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1677 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1678 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1679 		}
1680 		mpt_prt(mpt,
1681 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1682 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1683 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1684 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1685 		}
1686 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1687 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1688 		xpt_done(ccb);
1689 		CAMLOCK_2_MPTLOCK(mpt);
1690 		mpt_free_request(mpt, req);
1691 		MPTLOCK_2_CAMLOCK(mpt);
1692 		return;
1693 	}
1694 
1695 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1696 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1697 		ccb->ccb_h.timeout_ch =
1698 			timeout(mpt_timeout, (caddr_t)ccb,
1699 				(ccb->ccb_h.timeout * hz) / 1000);
1700 	} else {
1701 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1702 	}
1703 	if (mpt->verbose > MPT_PRT_DEBUG) {
1704 		int nc = 0;
1705 		mpt_print_request(req->req_vbuf);
1706 		for (trq = req->chain; trq; trq = trq->chain) {
1707 			printf("  Additional Chain Area %d\n", nc++);
1708 			mpt_dump_sgl(trq->req_vbuf, 0);
1709 		}
1710 	}
1711 
1712 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1713 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1714 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1715 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1716 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1717 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1718 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1719 		} else {
1720 			tgt->state = TGT_STATE_MOVING_DATA;
1721 		}
1722 #else
1723 		tgt->state = TGT_STATE_MOVING_DATA;
1724 #endif
1725 	}
1726 	CAMLOCK_2_MPTLOCK(mpt);
1727 	mpt_send_cmd(mpt, req);
1728 	MPTLOCK_2_CAMLOCK(mpt);
1729 }
1730 
1731 static void
1732 mpt_start(struct cam_sim *sim, union ccb *ccb)
1733 {
1734 	request_t *req;
1735 	struct mpt_softc *mpt;
1736 	MSG_SCSI_IO_REQUEST *mpt_req;
1737 	struct ccb_scsiio *csio = &ccb->csio;
1738 	struct ccb_hdr *ccbh = &ccb->ccb_h;
1739 	bus_dmamap_callback_t *cb;
1740 	target_id_t tgt;
1741 	int raid_passthru;
1742 
1743 	/* Get the pointer for the physical addapter */
1744 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1745 	raid_passthru = (sim == mpt->phydisk_sim);
1746 
1747 	CAMLOCK_2_MPTLOCK(mpt);
1748 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
1749 		if (mpt->outofbeer == 0) {
1750 			mpt->outofbeer = 1;
1751 			xpt_freeze_simq(mpt->sim, 1);
1752 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1753 		}
1754 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1755 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
1756 		MPTLOCK_2_CAMLOCK(mpt);
1757 		xpt_done(ccb);
1758 		return;
1759 	}
1760 #ifdef	INVARIANTS
1761 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
1762 #endif
1763 	MPTLOCK_2_CAMLOCK(mpt);
1764 
1765 	if (sizeof (bus_addr_t) > 4) {
1766 		cb = mpt_execute_req_a64;
1767 	} else {
1768 		cb = mpt_execute_req;
1769 	}
1770 
1771 	/*
1772 	 * Link the ccb and the request structure so we can find
1773 	 * the other knowing either the request or the ccb
1774 	 */
1775 	req->ccb = ccb;
1776 	ccb->ccb_h.ccb_req_ptr = req;
1777 
1778 	/* Now we build the command for the IOC */
1779 	mpt_req = req->req_vbuf;
1780 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
1781 
1782 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1783 	if (raid_passthru) {
1784 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1785 		CAMLOCK_2_MPTLOCK(mpt);
1786 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1787 			MPTLOCK_2_CAMLOCK(mpt);
1788 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1789 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
1790 			xpt_done(ccb);
1791 			return;
1792 		}
1793 		MPTLOCK_2_CAMLOCK(mpt);
1794 		mpt_req->Bus = 0;	/* we never set bus here */
1795 	} else {
1796 		tgt = ccb->ccb_h.target_id;
1797 		mpt_req->Bus = 0;	/* XXX */
1798 
1799 	}
1800 	mpt_req->SenseBufferLength =
1801 		(csio->sense_len < MPT_SENSE_SIZE) ?
1802 		 csio->sense_len : MPT_SENSE_SIZE;
1803 
1804 	/*
1805 	 * We use the message context to find the request structure when we
1806 	 * Get the command completion interrupt from the IOC.
1807 	 */
1808 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1809 
1810 	/* Which physical device to do the I/O on */
1811 	mpt_req->TargetID = tgt;
1812 
1813 	/* We assume a single level LUN type */
1814 	if (ccb->ccb_h.target_lun >= 256) {
1815 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1816 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1817 	} else {
1818 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1819 	}
1820 
1821 	/* Set the direction of the transfer */
1822 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1823 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1824 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1825 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1826 	} else {
1827 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1828 	}
1829 
1830 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1831 		switch(ccb->csio.tag_action) {
1832 		case MSG_HEAD_OF_Q_TAG:
1833 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
1834 			break;
1835 		case MSG_ACA_TASK:
1836 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
1837 			break;
1838 		case MSG_ORDERED_Q_TAG:
1839 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
1840 			break;
1841 		case MSG_SIMPLE_Q_TAG:
1842 		default:
1843 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1844 			break;
1845 		}
1846 	} else {
1847 		if (mpt->is_fc || mpt->is_sas) {
1848 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1849 		} else {
1850 			/* XXX No such thing for a target doing packetized. */
1851 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
1852 		}
1853 	}
1854 
1855 	if (mpt->is_spi) {
1856 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1857 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
1858 		}
1859 	}
1860 
1861 	/* Copy the scsi command block into place */
1862 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1863 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
1864 	} else {
1865 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
1866 	}
1867 
1868 	mpt_req->CDBLength = csio->cdb_len;
1869 	mpt_req->DataLength = csio->dxfer_len;
1870 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
1871 
1872 	/*
1873 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
1874 	 */
1875 	if (mpt->verbose == MPT_PRT_DEBUG) {
1876 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
1877 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
1878 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
1879 		if (mpt_req->Control != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
1880 			mpt_prtc(mpt, "(%s %u byte%s ",
1881 			    (mpt_req->Control == MPI_SCSIIO_CONTROL_READ)?
1882 			    "read" : "write",  csio->dxfer_len,
1883 			    (csio->dxfer_len == 1)? ")" : "s)");
1884 		}
1885 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
1886 		    ccb->ccb_h.target_lun, req, req->serno);
1887 	}
1888 
1889 	/*
1890 	 * If we have any data to send with this command map it into bus space.
1891 	 */
1892 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1893 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1894 			/*
1895 			 * We've been given a pointer to a single buffer.
1896 			 */
1897 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1898 				/*
1899 				 * Virtual address that needs to translated into
1900 				 * one or more physical address ranges.
1901 				 */
1902 				int error;
1903 				int s = splsoftvm();
1904 				error = bus_dmamap_load(mpt->buffer_dmat,
1905 				    req->dmap, csio->data_ptr, csio->dxfer_len,
1906 				    cb, req, 0);
1907 				splx(s);
1908 				if (error == EINPROGRESS) {
1909 					/*
1910 					 * So as to maintain ordering,
1911 					 * freeze the controller queue
1912 					 * until our mapping is
1913 					 * returned.
1914 					 */
1915 					xpt_freeze_simq(mpt->sim, 1);
1916 					ccbh->status |= CAM_RELEASE_SIMQ;
1917 				}
1918 			} else {
1919 				/*
1920 				 * We have been given a pointer to single
1921 				 * physical buffer.
1922 				 */
1923 				struct bus_dma_segment seg;
1924 				seg.ds_addr =
1925 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1926 				seg.ds_len = csio->dxfer_len;
1927 				(*cb)(req, &seg, 1, 0);
1928 			}
1929 		} else {
1930 			/*
1931 			 * We have been given a list of addresses.
1932 			 * This case could be easily supported but they are not
1933 			 * currently generated by the CAM subsystem so there
1934 			 * is no point in wasting the time right now.
1935 			 */
1936 			struct bus_dma_segment *segs;
1937 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1938 				(*cb)(req, NULL, 0, EFAULT);
1939 			} else {
1940 				/* Just use the segments provided */
1941 				segs = (struct bus_dma_segment *)csio->data_ptr;
1942 				(*cb)(req, segs, csio->sglist_cnt, 0);
1943 			}
1944 		}
1945 	} else {
1946 		(*cb)(req, NULL, 0, 0);
1947 	}
1948 }
1949 
1950 static int
1951 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
1952     int sleep_ok)
1953 {
1954 	int   error;
1955 	uint16_t status;
1956 	uint8_t response;
1957 
1958 	error = mpt_scsi_send_tmf(mpt,
1959 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
1960 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
1961 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1962 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
1963 	    0,	/* XXX How do I get the channel ID? */
1964 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
1965 	    lun != CAM_LUN_WILDCARD ? lun : 0,
1966 	    0, sleep_ok);
1967 
1968 	if (error != 0) {
1969 		/*
1970 		 * mpt_scsi_send_tmf hard resets on failure, so no
1971 		 * need to do so here.
1972 		 */
1973 		mpt_prt(mpt,
1974 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
1975 		return (EIO);
1976 	}
1977 
1978 	/* Wait for bus reset to be processed by the IOC. */
1979 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
1980 	    REQ_STATE_DONE, sleep_ok, 5000);
1981 
1982 	status = mpt->tmf_req->IOCStatus;
1983 	response = mpt->tmf_req->ResponseCode;
1984 	mpt->tmf_req->state = REQ_STATE_FREE;
1985 
1986 	if (error) {
1987 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
1988 		    "Resetting controller.\n");
1989 		mpt_reset(mpt, TRUE);
1990 		return (ETIMEDOUT);
1991 	}
1992 
1993 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1994 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
1995 		    "Resetting controller.\n", status);
1996 		mpt_reset(mpt, TRUE);
1997 		return (EIO);
1998 	}
1999 
2000 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2001 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2002 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2003 		    "Resetting controller.\n", response);
2004 		mpt_reset(mpt, TRUE);
2005 		return (EIO);
2006 	}
2007 	return (0);
2008 }
2009 
2010 static int
2011 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2012 {
2013 	int r = 0;
2014 	request_t *req;
2015 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2016 
2017  	req = mpt_get_request(mpt, FALSE);
2018 	if (req == NULL) {
2019 		return (ENOMEM);
2020 	}
2021 	fc = req->req_vbuf;
2022 	memset(fc, 0, sizeof(*fc));
2023 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2024 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2025 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2026 	mpt_send_cmd(mpt, req);
2027 	if (dowait) {
2028 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2029 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2030 		if (r == 0) {
2031 			mpt_free_request(mpt, req);
2032 		}
2033 	}
2034 	return (r);
2035 }
2036 
2037 static int
2038 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2039 	      MSG_EVENT_NOTIFY_REPLY *msg)
2040 {
2041 
2042 	switch(msg->Event & 0xFF) {
2043 	case MPI_EVENT_UNIT_ATTENTION:
2044 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2045 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
2046 		break;
2047 
2048 	case MPI_EVENT_IOC_BUS_RESET:
2049 		/* We generated a bus reset */
2050 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2051 		    (msg->Data[0] >> 8) & 0xff);
2052 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2053 		break;
2054 
2055 	case MPI_EVENT_EXT_BUS_RESET:
2056 		/* Someone else generated a bus reset */
2057 		mpt_prt(mpt, "External Bus Reset Detected\n");
2058 		/*
2059 		 * These replies don't return EventData like the MPI
2060 		 * spec says they do
2061 		 */
2062 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2063 		break;
2064 
2065 	case MPI_EVENT_RESCAN:
2066 		/*
2067 		 * In general this means a device has been added to the loop.
2068 		 */
2069 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
2070 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
2071 		break;
2072 
2073 	case MPI_EVENT_LINK_STATUS_CHANGE:
2074 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2075 		    (msg->Data[1] >> 8) & 0xff,
2076 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
2077 		break;
2078 
2079 	case MPI_EVENT_LOOP_STATE_CHANGE:
2080 		switch ((msg->Data[0] >> 16) & 0xff) {
2081 		case 0x01:
2082 			mpt_prt(mpt,
2083 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2084 			    "(Loop Initialization)\n",
2085 			    (msg->Data[1] >> 8) & 0xff,
2086 			    (msg->Data[0] >> 8) & 0xff,
2087 			    (msg->Data[0]     ) & 0xff);
2088 			switch ((msg->Data[0] >> 8) & 0xff) {
2089 			case 0xF7:
2090 				if ((msg->Data[0] & 0xff) == 0xF7) {
2091 					mpt_prt(mpt, "Device needs AL_PA\n");
2092 				} else {
2093 					mpt_prt(mpt, "Device %02x doesn't like "
2094 					    "FC performance\n",
2095 					    msg->Data[0] & 0xFF);
2096 				}
2097 				break;
2098 			case 0xF8:
2099 				if ((msg->Data[0] & 0xff) == 0xF7) {
2100 					mpt_prt(mpt, "Device had loop failure "
2101 					    "at its receiver prior to acquiring"
2102 					    " AL_PA\n");
2103 				} else {
2104 					mpt_prt(mpt, "Device %02x detected loop"
2105 					    " failure at its receiver\n",
2106 					    msg->Data[0] & 0xFF);
2107 				}
2108 				break;
2109 			default:
2110 				mpt_prt(mpt, "Device %02x requests that device "
2111 				    "%02x reset itself\n",
2112 				    msg->Data[0] & 0xFF,
2113 				    (msg->Data[0] >> 8) & 0xFF);
2114 				break;
2115 			}
2116 			break;
2117 		case 0x02:
2118 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2119 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2120 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2121 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2122 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2123 			break;
2124 		case 0x03:
2125 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2126 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2127 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2128 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
2129 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
2130 			break;
2131 		default:
2132 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2133 			    "FC event (%02x %02x %02x)\n",
2134 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2135 			    (msg->Data[0] >> 16) & 0xff, /* Event */
2136 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2137 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2138 		}
2139 		break;
2140 
2141 	case MPI_EVENT_LOGOUT:
2142 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2143 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
2144 		break;
2145 	case MPI_EVENT_EVENT_CHANGE:
2146 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2147 		    "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
2148 		break;
2149 	case MPI_EVENT_QUEUE_FULL:
2150 	{
2151 		struct cam_sim *sim;
2152 		struct cam_path *tmppath;
2153 		struct ccb_relsim crs;
2154 		PTR_EVENT_DATA_QUEUE_FULL pqf =
2155 		    (PTR_EVENT_DATA_QUEUE_FULL) msg->Data;
2156 		lun_id_t lun_id;
2157 
2158 		mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2159 		    "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2160 		if (mpt->phydisk_sim) {
2161 			sim = mpt->phydisk_sim;
2162 		} else {
2163 			sim = mpt->sim;
2164 		}
2165 		MPTLOCK_2_CAMLOCK(mpt);
2166 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2167 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2168 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2169 				mpt_prt(mpt, "unable to create a path to send "
2170 				    "XPT_REL_SIMQ");
2171 				CAMLOCK_2_MPTLOCK(mpt);
2172 				break;
2173 			}
2174 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2175 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2176 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2177 			crs.openings = pqf->CurrentDepth - 1;
2178 			xpt_action((union ccb *)&crs);
2179 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2180 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2181 			}
2182 			xpt_free_path(tmppath);
2183 		}
2184 		CAMLOCK_2_MPTLOCK(mpt);
2185 		break;
2186 	}
2187 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2188 	{
2189 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2190 		    "mpt_cam_event: SAS_DEVICE_STATUS_CHANGE\n");
2191 		break;
2192 	}
2193 	case MPI_EVENT_SAS_SES:
2194 	{
2195 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2196 		    "mpt_cam_event: MPI_EVENT_SAS_SES\n");
2197 		break;
2198 	}
2199 	default:
2200 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2201 		    msg->Event & 0xFF);
2202 		return (0);
2203 	}
2204 	return (1);
2205 }
2206 
2207 /*
2208  * Reply path for all SCSI I/O requests, called from our
2209  * interrupt handler by extracting our handler index from
2210  * the MsgContext field of the reply from the IOC.
2211  *
2212  * This routine is optimized for the common case of a
2213  * completion without error.  All exception handling is
2214  * offloaded to non-inlined helper routines to minimize
2215  * cache footprint.
2216  */
2217 static int
2218 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2219     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2220 {
2221 	MSG_SCSI_IO_REQUEST *scsi_req;
2222 	union ccb *ccb;
2223 	target_id_t tgt;
2224 
2225 	if (req->state == REQ_STATE_FREE) {
2226 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2227 		return (TRUE);
2228 	}
2229 
2230 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2231 	ccb = req->ccb;
2232 	if (ccb == NULL) {
2233 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2234 		    req, req->serno);
2235 		return (TRUE);
2236 	}
2237 
2238 	tgt = scsi_req->TargetID;
2239 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2240 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2241 
2242 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2243 		bus_dmasync_op_t op;
2244 
2245 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2246 			op = BUS_DMASYNC_POSTREAD;
2247 		else
2248 			op = BUS_DMASYNC_POSTWRITE;
2249 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2250 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2251 	}
2252 
2253 	if (reply_frame == NULL) {
2254 		/*
2255 		 * Context only reply, completion without error status.
2256 		 */
2257 		ccb->csio.resid = 0;
2258 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2259 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2260 	} else {
2261 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2262 	}
2263 
2264 	if (mpt->outofbeer) {
2265 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2266 		mpt->outofbeer = 0;
2267 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2268 	}
2269 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2270 		struct scsi_inquiry_data *iq =
2271 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2272 		if (scsi_req->Function ==
2273 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2274 			/*
2275 			 * Fake out the device type so that only the
2276 			 * pass-thru device will attach.
2277 			 */
2278 			iq->device &= ~0x1F;
2279 			iq->device |= T_NODEVICE;
2280 		}
2281 	}
2282 	if (mpt->verbose == MPT_PRT_DEBUG) {
2283 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2284 		    req, req->serno);
2285 	}
2286 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2287 	MPTLOCK_2_CAMLOCK(mpt);
2288 	xpt_done(ccb);
2289 	CAMLOCK_2_MPTLOCK(mpt);
2290 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2291 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2292 	} else {
2293 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2294 		    req, req->serno);
2295 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2296 	}
2297 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2298 	    ("CCB req needed wakeup"));
2299 #ifdef	INVARIANTS
2300 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2301 #endif
2302 	mpt_free_request(mpt, req);
2303 	return (TRUE);
2304 }
2305 
2306 static int
2307 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2308     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2309 {
2310 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2311 
2312 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2313 #ifdef	INVARIANTS
2314 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2315 #endif
2316 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2317 	/* Record IOC Status and Response Code of TMF for any waiters. */
2318 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2319 	req->ResponseCode = tmf_reply->ResponseCode;
2320 
2321 	mpt_lprt(mpt, MPT_PRT_INFO, "TMF complete: req %p:%u status 0x%x\n",
2322 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2323 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2324 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2325 		req->state |= REQ_STATE_DONE;
2326 		wakeup(req);
2327 	} else {
2328 		mpt->tmf_req->state = REQ_STATE_FREE;
2329 	}
2330 	return (TRUE);
2331 }
2332 
2333 /*
2334  * XXX: Move to definitions file
2335  */
2336 #define	ELS	0x22
2337 #define	FC4LS	0x32
2338 #define	ABTS	0x81
2339 #define	BA_ACC	0x84
2340 
2341 #define	LS_RJT	0x01
2342 #define	LS_ACC	0x02
2343 #define	PLOGI	0x03
2344 #define	LOGO	0x05
2345 #define SRR	0x14
2346 #define PRLI	0x20
2347 #define PRLO	0x21
2348 #define ADISC	0x52
2349 #define RSCN	0x61
2350 
2351 static void
2352 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2353     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2354 {
2355 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2356 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2357 
2358 	/*
2359 	 * We are going to reuse the ELS request to send this response back.
2360 	 */
2361 	rsp = &tmp;
2362 	memset(rsp, 0, sizeof(*rsp));
2363 
2364 #ifdef	USE_IMMEDIATE_LINK_DATA
2365 	/*
2366 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2367 	 */
2368 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2369 #endif
2370 	rsp->RspLength = length;
2371 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2372 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2373 
2374 	/*
2375 	 * Copy over information from the original reply frame to
2376 	 * it's correct place in the response.
2377 	 */
2378 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2379 
2380 	/*
2381 	 * And now copy back the temporary area to the original frame.
2382 	 */
2383 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2384 	rsp = req->req_vbuf;
2385 
2386 #ifdef	USE_IMMEDIATE_LINK_DATA
2387 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2388 #else
2389 {
2390 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2391 	bus_addr_t paddr = req->req_pbuf;
2392 	paddr += MPT_RQSL(mpt);
2393 
2394 	se->FlagsLength =
2395 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2396 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2397 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2398 		MPI_SGE_FLAGS_END_OF_LIST	|
2399 		MPI_SGE_FLAGS_END_OF_BUFFER;
2400 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2401 	se->FlagsLength |= (length);
2402 	se->Address = (uint32_t) paddr;
2403 }
2404 #endif
2405 
2406 	/*
2407 	 * Send it on...
2408 	 */
2409 	mpt_send_cmd(mpt, req);
2410 }
2411 
2412 static int
2413 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2414     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2415 {
2416 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2417 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2418 	U8 rctl;
2419 	U8 type;
2420 	U8 cmd;
2421 	U16 status = le16toh(reply_frame->IOCStatus);
2422 	U32 *elsbuf;
2423 	int ioindex;
2424 	int do_refresh = TRUE;
2425 
2426 #ifdef	INVARIANTS
2427 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2428 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2429 	    req, req->serno, rp->Function));
2430 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2431 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2432 	} else {
2433 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2434 	}
2435 #endif
2436 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2437 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2438 	    req, req->serno, reply_frame, reply_frame->Function);
2439 
2440 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2441 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2442 		    status, reply_frame->Function);
2443 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2444 			/*
2445 			 * XXX: to get around shutdown issue
2446 			 */
2447 			mpt->disabled = 1;
2448 			return (TRUE);
2449 		}
2450 		return (TRUE);
2451 	}
2452 
2453 	/*
2454 	 * If the function of a link service response, we recycle the
2455 	 * response to be a refresh for a new link service request.
2456 	 *
2457 	 * The request pointer is bogus in this case and we have to fetch
2458 	 * it based upon the TransactionContext.
2459 	 */
2460 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2461 		/* Freddie Uncle Charlie Katie */
2462 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2463 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2464 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2465 				break;
2466 			}
2467 
2468 		KASSERT(ioindex < mpt->els_cmds_allocated,
2469 		    ("can't find my mommie!"));
2470 
2471 		/* remove from active list as we're going to re-post it */
2472 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2473 		req->state &= ~REQ_STATE_QUEUED;
2474 		req->state |= REQ_STATE_DONE;
2475 		mpt_fc_post_els(mpt, req, ioindex);
2476 		return (TRUE);
2477 	}
2478 
2479 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2480 		/* remove from active list as we're done */
2481 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2482 		req->state &= ~REQ_STATE_QUEUED;
2483 		req->state |= REQ_STATE_DONE;
2484 		if (req->state & REQ_STATE_TIMEDOUT) {
2485 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2486 			    "Sync Primitive Send Completed After Timeout\n");
2487 			mpt_free_request(mpt, req);
2488 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2489 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2490 			    "Async Primitive Send Complete\n");
2491 			mpt_free_request(mpt, req);
2492 		} else {
2493 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2494 			    "Sync Primitive Send Complete- Waking Waiter\n");
2495 			wakeup(req);
2496 		}
2497 		return (TRUE);
2498 	}
2499 
2500 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2501 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2502 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2503 		    rp->MsgLength, rp->MsgFlags);
2504 		return (TRUE);
2505 	}
2506 
2507 	if (rp->MsgLength <= 5) {
2508 		/*
2509 		 * This is just a ack of an original ELS buffer post
2510 		 */
2511 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2512 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2513 		return (TRUE);
2514 	}
2515 
2516 
2517 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2518 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2519 
2520 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2521 	cmd = be32toh(elsbuf[0]) >> 24;
2522 
2523 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2524 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2525 		return (TRUE);
2526 	}
2527 
2528 	ioindex = le32toh(rp->TransactionContext);
2529 	req = mpt->els_cmd_ptrs[ioindex];
2530 
2531 	if (rctl == ELS && type == 1) {
2532 		switch (cmd) {
2533 		case PRLI:
2534 			/*
2535 			 * Send back a PRLI ACC
2536 			 */
2537 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2538 			    le32toh(rp->Wwn.PortNameHigh),
2539 			    le32toh(rp->Wwn.PortNameLow));
2540 			elsbuf[0] = htobe32(0x02100014);
2541 			elsbuf[1] |= htobe32(0x00000100);
2542 			elsbuf[4] = htobe32(0x00000002);
2543 			if (mpt->role & MPT_ROLE_TARGET)
2544 				elsbuf[4] |= htobe32(0x00000010);
2545 			if (mpt->role & MPT_ROLE_INITIATOR)
2546 				elsbuf[4] |= htobe32(0x00000020);
2547 			/* remove from active list as we're done */
2548 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2549 			req->state &= ~REQ_STATE_QUEUED;
2550 			req->state |= REQ_STATE_DONE;
2551 			mpt_fc_els_send_response(mpt, req, rp, 20);
2552 			do_refresh = FALSE;
2553 			break;
2554 		case PRLO:
2555 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2556 			elsbuf[0] = htobe32(0x02100014);
2557 			elsbuf[1] = htobe32(0x08000100);
2558 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2559 			    le32toh(rp->Wwn.PortNameHigh),
2560 			    le32toh(rp->Wwn.PortNameLow));
2561 			/* remove from active list as we're done */
2562 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2563 			req->state &= ~REQ_STATE_QUEUED;
2564 			req->state |= REQ_STATE_DONE;
2565 			mpt_fc_els_send_response(mpt, req, rp, 20);
2566 			do_refresh = FALSE;
2567 			break;
2568 		default:
2569 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2570 			break;
2571 		}
2572 	} else if (rctl == ABTS && type == 0) {
2573 		uint16_t rx_id = le16toh(rp->Rxid);
2574 		uint16_t ox_id = le16toh(rp->Oxid);
2575 		request_t *tgt_req = NULL;
2576 
2577 		mpt_prt(mpt,
2578 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2579 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2580 		    le32toh(rp->Wwn.PortNameLow));
2581 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2582 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2583 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2584 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2585 		} else {
2586 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2587 		}
2588 		if (tgt_req) {
2589 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2590 			uint8_t *vbuf;
2591 			union ccb *ccb = tgt->ccb;
2592 			uint32_t ct_id;
2593 
2594 			vbuf = tgt_req->req_vbuf;
2595 			vbuf += MPT_RQSL(mpt);
2596 
2597 			/*
2598 			 * Check to make sure we have the correct command
2599 			 * The reply descriptor in the target state should
2600 			 * should contain an IoIndex that should match the
2601 			 * RX_ID.
2602 			 *
2603 			 * It'd be nice to have OX_ID to crosscheck with
2604 			 * as well.
2605 			 */
2606 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2607 
2608 			if (ct_id != rx_id) {
2609 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2610 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2611 				    rx_id, ct_id);
2612 				goto skip;
2613 			}
2614 
2615 			ccb = tgt->ccb;
2616 			if (ccb) {
2617 				mpt_prt(mpt,
2618 				    "CCB (%p): lun %u flags %x status %x\n",
2619 				    ccb, ccb->ccb_h.target_lun,
2620 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2621 			}
2622 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2623 			    "%x nxfers %x\n", tgt->state,
2624 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2625 			    tgt->nxfers);
2626   skip:
2627 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2628 				mpt_prt(mpt, "unable to start TargetAbort\n");
2629 			}
2630 		} else {
2631 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2632 		}
2633 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2634 		elsbuf[0] = htobe32(0);
2635 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2636 		elsbuf[2] = htobe32(0x000ffff);
2637 		/*
2638 		 * Dork with the reply frame so that the reponse to it
2639 		 * will be correct.
2640 		 */
2641 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2642 		/* remove from active list as we're done */
2643 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2644 		req->state &= ~REQ_STATE_QUEUED;
2645 		req->state |= REQ_STATE_DONE;
2646 		mpt_fc_els_send_response(mpt, req, rp, 12);
2647 		do_refresh = FALSE;
2648 	} else {
2649 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2650 	}
2651 	if (do_refresh == TRUE) {
2652 		/* remove from active list as we're done */
2653 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2654 		req->state &= ~REQ_STATE_QUEUED;
2655 		req->state |= REQ_STATE_DONE;
2656 		mpt_fc_post_els(mpt, req, ioindex);
2657 	}
2658 	return (TRUE);
2659 }
2660 
2661 /*
2662  * Clean up all SCSI Initiator personality state in response
2663  * to a controller reset.
2664  */
2665 static void
2666 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2667 {
2668 	/*
2669 	 * The pending list is already run down by
2670 	 * the generic handler.  Perform the same
2671 	 * operation on the timed out request list.
2672 	 */
2673 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
2674 				   MPI_IOCSTATUS_INVALID_STATE);
2675 
2676 	/*
2677 	 * XXX: We need to repost ELS and Target Command Buffers?
2678 	 */
2679 
2680 	/*
2681 	 * Inform the XPT that a bus reset has occurred.
2682 	 */
2683 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
2684 }
2685 
2686 /*
2687  * Parse additional completion information in the reply
2688  * frame for SCSI I/O requests.
2689  */
2690 static int
2691 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
2692 			     MSG_DEFAULT_REPLY *reply_frame)
2693 {
2694 	union ccb *ccb;
2695 	MSG_SCSI_IO_REPLY *scsi_io_reply;
2696 	u_int ioc_status;
2697 	u_int sstate;
2698 	u_int loginfo;
2699 
2700 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
2701 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
2702 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
2703 		("MPT SCSI I/O Handler called with incorrect reply type"));
2704 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
2705 		("MPT SCSI I/O Handler called with continuation reply"));
2706 
2707 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
2708 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
2709 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
2710 	ioc_status &= MPI_IOCSTATUS_MASK;
2711 	sstate = scsi_io_reply->SCSIState;
2712 
2713 	ccb = req->ccb;
2714 	ccb->csio.resid =
2715 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
2716 
2717 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
2718 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
2719 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2720 		ccb->csio.sense_resid =
2721 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
2722 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
2723 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
2724 	}
2725 
2726 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
2727 		/*
2728 		 * Tag messages rejected, but non-tagged retry
2729 		 * was successful.
2730 XXXX
2731 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
2732 		 */
2733 	}
2734 
2735 	switch(ioc_status) {
2736 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2737 		/*
2738 		 * XXX
2739 		 * Linux driver indicates that a zero
2740 		 * transfer length with this error code
2741 		 * indicates a CRC error.
2742 		 *
2743 		 * No need to swap the bytes for checking
2744 		 * against zero.
2745 		 */
2746 		if (scsi_io_reply->TransferCount == 0) {
2747 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2748 			break;
2749 		}
2750 		/* FALLTHROUGH */
2751 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
2752 	case MPI_IOCSTATUS_SUCCESS:
2753 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
2754 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
2755 			/*
2756 			 * Status was never returned for this transaction.
2757 			 */
2758 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
2759 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
2760 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
2761 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
2762 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
2763 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
2764 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
2765 
2766 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
2767 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2768 		} else
2769 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2770 		break;
2771 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
2772 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
2773 		break;
2774 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
2775 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2776 		break;
2777 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2778 		/*
2779 		 * Since selection timeouts and "device really not
2780 		 * there" are grouped into this error code, report
2781 		 * selection timeout.  Selection timeouts are
2782 		 * typically retried before giving up on the device
2783 		 * whereas "device not there" errors are considered
2784 		 * unretryable.
2785 		 */
2786 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2787 		break;
2788 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2789 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
2790 		break;
2791 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
2792 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
2793 		break;
2794 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
2795 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
2796 		break;
2797 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2798 		ccb->ccb_h.status = CAM_UA_TERMIO;
2799 		break;
2800 	case MPI_IOCSTATUS_INVALID_STATE:
2801 		/*
2802 		 * The IOC has been reset.  Emulate a bus reset.
2803 		 */
2804 		/* FALLTHROUGH */
2805 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
2806 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2807 		break;
2808 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
2809 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
2810 		/*
2811 		 * Don't clobber any timeout status that has
2812 		 * already been set for this transaction.  We
2813 		 * want the SCSI layer to be able to differentiate
2814 		 * between the command we aborted due to timeout
2815 		 * and any innocent bystanders.
2816 		 */
2817 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
2818 			break;
2819 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
2820 		break;
2821 
2822 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2823 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
2824 		break;
2825 	case MPI_IOCSTATUS_BUSY:
2826 		mpt_set_ccb_status(ccb, CAM_BUSY);
2827 		break;
2828 	case MPI_IOCSTATUS_INVALID_FUNCTION:
2829 	case MPI_IOCSTATUS_INVALID_SGL:
2830 	case MPI_IOCSTATUS_INTERNAL_ERROR:
2831 	case MPI_IOCSTATUS_INVALID_FIELD:
2832 	default:
2833 		/* XXX
2834 		 * Some of the above may need to kick
2835 		 * of a recovery action!!!!
2836 		 */
2837 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2838 		break;
2839 	}
2840 
2841 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2842 		mpt_freeze_ccb(ccb);
2843 	}
2844 
2845 	return (TRUE);
2846 }
2847 
2848 static void
2849 mpt_action(struct cam_sim *sim, union ccb *ccb)
2850 {
2851 	struct mpt_softc *mpt;
2852 	struct ccb_trans_settings *cts;
2853 	target_id_t tgt;
2854 	lun_id_t lun;
2855 	int raid_passthru;
2856 
2857 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2858 
2859 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
2860 	KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
2861 	raid_passthru = (sim == mpt->phydisk_sim);
2862 
2863 	tgt = ccb->ccb_h.target_id;
2864 	lun = ccb->ccb_h.target_lun;
2865 	if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ &&
2866 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
2867 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
2868 		CAMLOCK_2_MPTLOCK(mpt);
2869 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2870 			MPTLOCK_2_CAMLOCK(mpt);
2871 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2872 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2873 			xpt_done(ccb);
2874 			return;
2875 		}
2876 		MPTLOCK_2_CAMLOCK(mpt);
2877 	}
2878 	ccb->ccb_h.ccb_mpt_ptr = mpt;
2879 
2880 	switch (ccb->ccb_h.func_code) {
2881 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2882 		/*
2883 		 * Do a couple of preliminary checks...
2884 		 */
2885 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2886 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2887 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2888 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2889 				break;
2890 			}
2891 		}
2892 		/* Max supported CDB length is 16 bytes */
2893 		/* XXX Unless we implement the new 32byte message type */
2894 		if (ccb->csio.cdb_len >
2895 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
2896 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2897 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2898 			break;
2899 		}
2900 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2901 		mpt_start(sim, ccb);
2902 		return;
2903 
2904 	case XPT_RESET_BUS:
2905 	case XPT_RESET_DEV:
2906 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2907 			ccb->ccb_h.func_code == XPT_RESET_BUS ?
2908 			"XPT_RESET_BUS\n" : "XPT_RESET_DEV\n");
2909 
2910 		CAMLOCK_2_MPTLOCK(mpt);
2911 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
2912 		MPTLOCK_2_CAMLOCK(mpt);
2913 
2914 		/*
2915 		 * mpt_bus_reset is always successful in that it
2916 		 * will fall back to a hard reset should a bus
2917 		 * reset attempt fail.
2918 		 */
2919 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2920 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2921 		break;
2922 
2923 	case XPT_ABORT:
2924 	{
2925 		union ccb *accb = ccb->cab.abort_ccb;
2926 		CAMLOCK_2_MPTLOCK(mpt);
2927 		switch (accb->ccb_h.func_code) {
2928 		case XPT_ACCEPT_TARGET_IO:
2929 		case XPT_IMMED_NOTIFY:
2930 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
2931 			break;
2932 		case XPT_CONT_TARGET_IO:
2933 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
2934 			ccb->ccb_h.status = CAM_UA_ABORT;
2935 			break;
2936 		case XPT_SCSI_IO:
2937 			ccb->ccb_h.status = CAM_UA_ABORT;
2938 			break;
2939 		default:
2940 			ccb->ccb_h.status = CAM_REQ_INVALID;
2941 			break;
2942 		}
2943 		MPTLOCK_2_CAMLOCK(mpt);
2944 		break;
2945 	}
2946 
2947 #ifdef	CAM_NEW_TRAN_CODE
2948 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
2949 #else
2950 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
2951 #endif
2952 #define	DP_DISC_ENABLE	0x1
2953 #define	DP_DISC_DISABL	0x2
2954 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
2955 
2956 #define	DP_TQING_ENABLE	0x4
2957 #define	DP_TQING_DISABL	0x8
2958 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
2959 
2960 #define	DP_WIDE		0x10
2961 #define	DP_NARROW	0x20
2962 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
2963 
2964 #define	DP_SYNC		0x40
2965 
2966 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2967 	{
2968 #ifdef	CAM_NEW_TRAN_CODE
2969 		struct ccb_trans_settings_scsi *scsi;
2970 		struct ccb_trans_settings_spi *spi;
2971 #endif
2972 		uint8_t dval;
2973 		u_int period;
2974 		u_int offset;
2975 		int i, j;
2976 
2977 		cts = &ccb->cts;
2978 
2979 		if (mpt->is_fc || mpt->is_sas) {
2980 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2981 			break;
2982 		}
2983 
2984 		/*
2985 		 * Skip attempting settings on RAID volume disks.
2986 		 * Other devices on the bus get the normal treatment.
2987 		 */
2988 		if (mpt->phydisk_sim && raid_passthru == 0 &&
2989 		    mpt_is_raid_volume(mpt, tgt) != 0) {
2990 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
2991 			    "skipping transfer settings for RAID volumes\n");
2992 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2993 			break;
2994 		}
2995 
2996 		i = mpt->mpt_port_page2.PortSettings &
2997 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
2998 		j = mpt->mpt_port_page2.PortFlags &
2999 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3000 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3001 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3002 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3003 			    "honoring BIOS transfer negotiations\n");
3004 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3005 			break;
3006 		}
3007 
3008 		dval = 0;
3009 		period = 0;
3010 		offset = 0;
3011 
3012 #ifndef	CAM_NEW_TRAN_CODE
3013 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3014 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3015 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3016 		}
3017 
3018 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3019 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3020 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3021 		}
3022 
3023 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3024 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3025 		}
3026 
3027 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3028 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3029 			dval |= DP_SYNC;
3030 			period = cts->sync_period;
3031 			offset = cts->sync_offset;
3032 		}
3033 #else
3034 		scsi = &cts->proto_specific.scsi;
3035 		spi = &cts->xport_specific.spi;
3036 
3037 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3038 			dval |= (spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3039 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3040 		}
3041 
3042 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3043 			dval |= (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3044 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3045 		}
3046 
3047 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3048 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3049 			    DP_WIDE : DP_NARROW;
3050 		}
3051 
3052 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
3053 		    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
3054 		    (spi->sync_period && spi->sync_offset)) {
3055 			dval |= DP_SYNC;
3056 			period = spi->sync_period;
3057 			offset = spi->sync_offset;
3058 		}
3059 #endif
3060 		CAMLOCK_2_MPTLOCK(mpt);
3061 		if (dval & DP_DISC_ENABLE) {
3062 			mpt->mpt_disc_enable |= (1 << tgt);
3063 		} else if (dval & DP_DISC_DISABL) {
3064 			mpt->mpt_disc_enable &= ~(1 << tgt);
3065 		}
3066 		if (dval & DP_TQING_ENABLE) {
3067 			mpt->mpt_tag_enable |= (1 << tgt);
3068 		} else if (dval & DP_TQING_DISABL) {
3069 			mpt->mpt_tag_enable &= ~(1 << tgt);
3070 		}
3071 		if (dval & DP_WIDTH) {
3072 			mpt_setwidth(mpt, tgt, 1);
3073 		}
3074 		if (dval & DP_SYNC) {
3075 			mpt_setsync(mpt, tgt, period, offset);
3076 		}
3077 
3078 		if (mpt_update_spi_config(mpt, tgt)) {
3079 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3080 		} else {
3081 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3082 		}
3083 		MPTLOCK_2_CAMLOCK(mpt);
3084 		break;
3085 	}
3086 	case XPT_GET_TRAN_SETTINGS:
3087 		cts = &ccb->cts;
3088 		if (mpt->is_fc) {
3089 #ifndef	CAM_NEW_TRAN_CODE
3090 			/*
3091 			 * a lot of normal SCSI things don't make sense.
3092 			 */
3093 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3094 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3095 			/*
3096 			 * How do you measure the width of a high
3097 			 * speed serial bus? Well, in bytes.
3098 			 *
3099 			 * Offset and period make no sense, though, so we set
3100 			 * (above) a 'base' transfer speed to be gigabit.
3101 			 */
3102 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3103 #else
3104 			struct ccb_trans_settings_fc *fc =
3105 			    &cts->xport_specific.fc;
3106 
3107 			cts->protocol = PROTO_SCSI;
3108 			cts->protocol_version = SCSI_REV_2;
3109 			cts->transport = XPORT_FC;
3110 			cts->transport_version = 0;
3111 
3112 			fc->valid = CTS_FC_VALID_SPEED;
3113 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
3114 			/* XXX: need a port database for each target */
3115 #endif
3116 		} else if (mpt->is_sas) {
3117 #ifndef	CAM_NEW_TRAN_CODE
3118 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3119 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3120 			/*
3121 			 * How do you measure the width of a high
3122 			 * speed serial bus? Well, in bytes.
3123 			 *
3124 			 * Offset and period make no sense, though, so we set
3125 			 * (above) a 'base' transfer speed to be gigabit.
3126 			 */
3127 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3128 #else
3129 			struct ccb_trans_settings_sas *sas =
3130 			    &cts->xport_specific.sas;
3131 
3132 			cts->protocol = PROTO_SCSI;
3133 			cts->protocol_version = SCSI_REV_3;
3134 			cts->transport = XPORT_SAS;
3135 			cts->transport_version = 0;
3136 
3137 			sas->valid = CTS_SAS_VALID_SPEED;
3138 			sas->bitrate = 300000;	/* XXX: Default 3Gbps */
3139 #endif
3140 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3141 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3142 			break;
3143 		}
3144 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3145 		break;
3146 
3147 	case XPT_CALC_GEOMETRY:
3148 	{
3149 		struct ccb_calc_geometry *ccg;
3150 
3151 		ccg = &ccb->ccg;
3152 		if (ccg->block_size == 0) {
3153 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3154 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3155 			break;
3156 		}
3157 		mpt_calc_geometry(ccg, /*extended*/1);
3158 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3159 		break;
3160 	}
3161 	case XPT_PATH_INQ:		/* Path routing inquiry */
3162 	{
3163 		struct ccb_pathinq *cpi = &ccb->cpi;
3164 
3165 		cpi->version_num = 1;
3166 		cpi->target_sprt = 0;
3167 		cpi->hba_eng_cnt = 0;
3168 		cpi->max_target = mpt->mpt_max_devices - 1;
3169 		/*
3170 		 * XXX: FC cards report MAX_DEVICES of 512- but we
3171 		 * XXX: seem to hang when going higher than 255.
3172 		 */
3173 		if (cpi->max_target > 255)
3174 			cpi->max_target = 255;
3175 		/*
3176 		 * XXX: VMware ESX reports > 16 devices and then dies
3177 		 * XXX: when we probe.
3178 		 */
3179 		if (mpt->is_spi && cpi->max_target > 15)
3180 			cpi->max_target = 15;
3181 		cpi->max_lun = 7;
3182 		cpi->initiator_id = mpt->mpt_ini_id;
3183 
3184 		cpi->bus_id = cam_sim_bus(sim);
3185 		/*
3186 		 * Actual speed for each device varies.
3187 		 *
3188 		 * The base speed is the speed of the underlying connection.
3189 		 * This is strictly determined for SPI (async, narrow). If
3190 		 * link is up for Fibre Channel, then speed can be gotten
3191 		 * from that.
3192 		 */
3193 		if (mpt->is_fc) {
3194 			cpi->hba_misc = PIM_NOBUSRESET;
3195 			cpi->base_transfer_speed =
3196 			    mpt->mpt_fcport_speed * 100000;
3197 			cpi->hba_inquiry = PI_TAG_ABLE;
3198 		} else if (mpt->is_sas) {
3199 			cpi->hba_misc = PIM_NOBUSRESET;
3200 			cpi->base_transfer_speed = 300000;
3201 			cpi->hba_inquiry = PI_TAG_ABLE;
3202 		} else {
3203 			cpi->hba_misc = PIM_SEQSCAN;
3204 			cpi->base_transfer_speed = 3300;
3205 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3206 		}
3207 
3208 		/*
3209 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3210 		 * wide, restrict it to one lun and have it *not* be a bus
3211 		 * that can have a SCSI bus reset.
3212 		 */
3213 		if (raid_passthru) {
3214 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3215 			cpi->initiator_id = cpi->max_target + 1;
3216 			cpi->max_lun = 0;
3217 			cpi->hba_misc |= PIM_NOBUSRESET;
3218 		}
3219 
3220 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3221 			cpi->hba_misc |= PIM_NOINITIATOR;
3222 		}
3223 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3224 			cpi->target_sprt =
3225 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3226 		} else {
3227 			cpi->target_sprt = 0;
3228 		}
3229 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3230 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3231 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3232 		cpi->unit_number = cam_sim_unit(sim);
3233 		cpi->ccb_h.status = CAM_REQ_CMP;
3234 		break;
3235 	}
3236 	case XPT_EN_LUN:		/* Enable LUN as a target */
3237 	{
3238 		int result;
3239 
3240 		CAMLOCK_2_MPTLOCK(mpt);
3241 		if (ccb->cel.enable)
3242 			result = mpt_enable_lun(mpt,
3243 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3244 		else
3245 			result = mpt_disable_lun(mpt,
3246 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3247 		MPTLOCK_2_CAMLOCK(mpt);
3248 		if (result == 0) {
3249 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3250 		} else {
3251 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3252 		}
3253 		break;
3254 	}
3255 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3256 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3257 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3258 	{
3259 		tgt_resource_t *trtp;
3260 		lun_id_t lun = ccb->ccb_h.target_lun;
3261 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3262 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3263 		ccb->ccb_h.flags = 0;
3264 
3265 		if (lun == CAM_LUN_WILDCARD) {
3266 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3267 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3268 				break;
3269 			}
3270 			trtp = &mpt->trt_wildcard;
3271 		} else if (lun >= MPT_MAX_LUNS) {
3272 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3273 			break;
3274 		} else {
3275 			trtp = &mpt->trt[lun];
3276 		}
3277 		CAMLOCK_2_MPTLOCK(mpt);
3278 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3279 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3280 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3281 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3282 			    sim_links.stqe);
3283 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3284 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3285 			    "Put FREE INOT lun %d\n", lun);
3286 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3287 			    sim_links.stqe);
3288 		} else {
3289 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3290 		}
3291 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3292 		MPTLOCK_2_CAMLOCK(mpt);
3293 		return;
3294 	}
3295 	case XPT_CONT_TARGET_IO:
3296 		CAMLOCK_2_MPTLOCK(mpt);
3297 		mpt_target_start_io(mpt, ccb);
3298 		MPTLOCK_2_CAMLOCK(mpt);
3299 		return;
3300 
3301 	default:
3302 		ccb->ccb_h.status = CAM_REQ_INVALID;
3303 		break;
3304 	}
3305 	xpt_done(ccb);
3306 }
3307 
3308 static int
3309 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3310 {
3311 #ifdef	CAM_NEW_TRAN_CODE
3312 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3313 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3314 #endif
3315 	target_id_t tgt;
3316 	uint8_t dval, pval, oval;
3317 	int rv;
3318 
3319 	if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3320 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3321 			return (-1);
3322 		}
3323 	} else {
3324 		tgt = cts->ccb_h.target_id;
3325 	}
3326 
3327 	/*
3328 	 * XXX: We aren't looking Port Page 2 BIOS settings here.
3329 	 * XXX: For goal settings, we pick the max from port page 0
3330 	 *
3331 	 * For current settings we read the current settings out from
3332 	 * device page 0 for that target.
3333 	 */
3334 	if (IS_CURRENT_SETTINGS(cts)) {
3335 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3336 		dval = 0;
3337 
3338 		CAMLOCK_2_MPTLOCK(mpt);
3339 		tmp = mpt->mpt_dev_page0[tgt];
3340 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3341 		    sizeof(tmp), FALSE, 5000);
3342 		if (rv) {
3343 			MPTLOCK_2_CAMLOCK(mpt);
3344 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3345 			return (rv);
3346 		}
3347 		MPTLOCK_2_CAMLOCK(mpt);
3348 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3349 		    DP_WIDE : DP_NARROW;
3350 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3351 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3352 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3353 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3354 		oval = (tmp.NegotiatedParameters >> 16) & 0xff;
3355 		pval = (tmp.NegotiatedParameters >>  8) & 0xff;
3356 		mpt->mpt_dev_page0[tgt] = tmp;
3357 	} else {
3358 		/*
3359 		 * XXX: Just make theoretical maximum.
3360 		 */
3361 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE;
3362 		oval = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
3363 		pval = (mpt->mpt_port_page0.Capabilities >>  8) & 0xff;
3364 	}
3365 #ifndef	CAM_NEW_TRAN_CODE
3366 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3367 	if (dval & DP_DISC_ENABLE) {
3368 		cts->flags |= CCB_TRANS_DISC_ENB;
3369 	}
3370 	if (dval & DP_TQING_ENABLE) {
3371 		cts->flags |= CCB_TRANS_TAG_ENB;
3372 	}
3373 	if (dval & DP_WIDE) {
3374 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3375 	} else {
3376 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3377 	}
3378 	cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
3379 	    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3380 	if (oval) {
3381 		cts->sync_period = pval;
3382 		cts->sync_offset = oval;
3383 		cts->valid |=
3384 		    CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID;
3385 	}
3386 #else
3387 	cts->protocol = PROTO_SCSI;
3388 	cts->protocol_version = SCSI_REV_2;
3389 	cts->transport = XPORT_SPI;
3390 	cts->transport_version = 2;
3391 
3392 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
3393 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
3394 	if (dval & DP_DISC_ENABLE) {
3395 		spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3396 	}
3397 	if (dval & DP_TQING_ENABLE) {
3398 		scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3399 	}
3400 	if (oval && pval) {
3401 		spi->sync_offset = oval;
3402 		spi->sync_period = pval;
3403 		spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3404 		spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3405 	}
3406 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3407 	if (dval & DP_WIDE) {
3408 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3409 	} else {
3410 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3411 	}
3412 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3413 		scsi->valid = CTS_SCSI_VALID_TQ;
3414 		spi->valid |= CTS_SPI_VALID_DISC;
3415 	} else {
3416 		scsi->valid = 0;
3417 	}
3418 #endif
3419 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3420 	    "mpt_get_spi_settings[%d]: %s 0x%x period 0x%x offset %d\n", tgt,
3421 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3422 	return (0);
3423 }
3424 
3425 static void
3426 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3427 {
3428 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3429 
3430 	ptr = &mpt->mpt_dev_page1[tgt];
3431 	if (onoff) {
3432 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3433 	} else {
3434 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3435 	}
3436 }
3437 
3438 static void
3439 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3440 {
3441 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3442 
3443 	ptr = &mpt->mpt_dev_page1[tgt];
3444 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3445 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3446 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3447 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3448 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3449 	ptr->RequestedParameters |= (period << 8) | (offset << 16);
3450 	if (period < 0xa) {
3451 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3452 	}
3453 	if (period < 0x9) {
3454 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3455 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3456 	}
3457 }
3458 
3459 static int
3460 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3461 {
3462 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3463 	int rv;
3464 
3465 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3466 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3467 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3468 	tmp = mpt->mpt_dev_page1[tgt];
3469 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3470 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3471 	if (rv) {
3472 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3473 		return (-1);
3474 	}
3475 	return (0);
3476 }
3477 
3478 static void
3479 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3480 {
3481 #if __FreeBSD_version >= 500000
3482 	cam_calc_geometry(ccg, extended);
3483 #else
3484 	uint32_t size_mb;
3485 	uint32_t secs_per_cylinder;
3486 
3487 	if (ccg->block_size == 0) {
3488 		ccg->ccb_h.status = CAM_REQ_INVALID;
3489 		return;
3490 	}
3491 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3492 	if (size_mb > 1024 && extended) {
3493 		ccg->heads = 255;
3494 		ccg->secs_per_track = 63;
3495 	} else {
3496 		ccg->heads = 64;
3497 		ccg->secs_per_track = 32;
3498 	}
3499 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3500 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3501 	ccg->ccb_h.status = CAM_REQ_CMP;
3502 #endif
3503 }
3504 
3505 /****************************** Timeout Recovery ******************************/
3506 static int
3507 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3508 {
3509 	int error;
3510 
3511 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3512 	    &mpt->recovery_thread, /*flags*/0,
3513 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3514 	return (error);
3515 }
3516 
3517 static void
3518 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3519 {
3520 	if (mpt->recovery_thread == NULL) {
3521 		return;
3522 	}
3523 	mpt->shutdwn_recovery = 1;
3524 	wakeup(mpt);
3525 	/*
3526 	 * Sleep on a slightly different location
3527 	 * for this interlock just for added safety.
3528 	 */
3529 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3530 }
3531 
3532 static void
3533 mpt_recovery_thread(void *arg)
3534 {
3535 	struct mpt_softc *mpt;
3536 
3537 #if __FreeBSD_version >= 500000
3538 	mtx_lock(&Giant);
3539 #endif
3540 	mpt = (struct mpt_softc *)arg;
3541 	MPT_LOCK(mpt);
3542 	for (;;) {
3543 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3544 			if (mpt->shutdwn_recovery == 0) {
3545 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3546 			}
3547 		}
3548 		if (mpt->shutdwn_recovery != 0) {
3549 			break;
3550 		}
3551 		mpt_recover_commands(mpt);
3552 	}
3553 	mpt->recovery_thread = NULL;
3554 	wakeup(&mpt->recovery_thread);
3555 	MPT_UNLOCK(mpt);
3556 #if __FreeBSD_version >= 500000
3557 	mtx_unlock(&Giant);
3558 #endif
3559 	kthread_exit(0);
3560 }
3561 
3562 static int
3563 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3564     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3565 {
3566 	MSG_SCSI_TASK_MGMT *tmf_req;
3567 	int		    error;
3568 
3569 	/*
3570 	 * Wait for any current TMF request to complete.
3571 	 * We're only allowed to issue one TMF at a time.
3572 	 */
3573 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3574 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3575 	if (error != 0) {
3576 		mpt_reset(mpt, TRUE);
3577 		return (ETIMEDOUT);
3578 	}
3579 
3580 	mpt_assign_serno(mpt, mpt->tmf_req);
3581 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3582 
3583 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3584 	memset(tmf_req, 0, sizeof(*tmf_req));
3585 	tmf_req->TargetID = target;
3586 	tmf_req->Bus = channel;
3587 	tmf_req->ChainOffset = 0;
3588 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3589 	tmf_req->Reserved = 0;
3590 	tmf_req->TaskType = type;
3591 	tmf_req->Reserved1 = 0;
3592 	tmf_req->MsgFlags = flags;
3593 	tmf_req->MsgContext =
3594 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3595 	memset(&tmf_req->LUN, 0,
3596 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
3597 	if (lun > 256) {
3598 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3599 		tmf_req->LUN[1] = lun & 0xff;
3600 	} else {
3601 		tmf_req->LUN[1] = lun;
3602 	}
3603 	tmf_req->TaskMsgContext = abort_ctx;
3604 
3605 	mpt_lprt(mpt, MPT_PRT_INFO,
3606 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3607 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3608 	if (mpt->verbose > MPT_PRT_DEBUG) {
3609 		mpt_print_request(tmf_req);
3610 	}
3611 
3612 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3613 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3614 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3615 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3616 	if (error != MPT_OK) {
3617 		mpt_reset(mpt, TRUE);
3618 	}
3619 	return (error);
3620 }
3621 
3622 /*
3623  * When a command times out, it is placed on the requeust_timeout_list
3624  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3625  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3626  * the timedout transactions.  The next TMF is issued either by the
3627  * completion handler of the current TMF waking our recovery thread,
3628  * or the TMF timeout handler causing a hard reset sequence.
3629  */
3630 static void
3631 mpt_recover_commands(struct mpt_softc *mpt)
3632 {
3633 	request_t	   *req;
3634 	union ccb	   *ccb;
3635 	int		    error;
3636 
3637 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3638 		/*
3639 		 * No work to do- leave.
3640 		 */
3641 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3642 		return;
3643 	}
3644 
3645 	/*
3646 	 * Flush any commands whose completion coincides with their timeout.
3647 	 */
3648 	mpt_intr(mpt);
3649 
3650 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3651 		/*
3652 		 * The timedout commands have already
3653 		 * completed.  This typically means
3654 		 * that either the timeout value was on
3655 		 * the hairy edge of what the device
3656 		 * requires or - more likely - interrupts
3657 		 * are not happening.
3658 		 */
3659 		mpt_prt(mpt, "Timedout requests already complete. "
3660 		    "Interrupts may not be functioning.\n");
3661 		mpt_enable_ints(mpt);
3662 		return;
3663 	}
3664 
3665 	/*
3666 	 * We have no visibility into the current state of the
3667 	 * controller, so attempt to abort the commands in the
3668 	 * order they timed-out. For initiator commands, we
3669 	 * depend on the reply handler pulling requests off
3670 	 * the timeout list.
3671 	 */
3672 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3673 		uint16_t status;
3674 		uint8_t response;
3675 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3676 
3677 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3678 		    req, req->serno, hdrp->Function);
3679 		ccb = req->ccb;
3680 		if (ccb == NULL) {
3681 			mpt_prt(mpt, "null ccb in timed out request. "
3682 			    "Resetting Controller.\n");
3683 			mpt_reset(mpt, TRUE);
3684 			continue;
3685 		}
3686 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3687 
3688 		/*
3689 		 * Check to see if this is not an initiator command and
3690 		 * deal with it differently if it is.
3691 		 */
3692 		switch (hdrp->Function) {
3693 		case MPI_FUNCTION_SCSI_IO_REQUEST:
3694 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
3695 			break;
3696 		default:
3697 			/*
3698 			 * XXX: FIX ME: need to abort target assists...
3699 			 */
3700 			mpt_prt(mpt, "just putting it back on the pend q\n");
3701 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
3702 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
3703 			    links);
3704 			continue;
3705 		}
3706 
3707 		error = mpt_scsi_send_tmf(mpt,
3708 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3709 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
3710 		    htole32(req->index | scsi_io_handler_id), TRUE);
3711 
3712 		if (error != 0) {
3713 			/*
3714 			 * mpt_scsi_send_tmf hard resets on failure, so no
3715 			 * need to do so here.  Our queue should be emptied
3716 			 * by the hard reset.
3717 			 */
3718 			continue;
3719 		}
3720 
3721 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
3722 		    REQ_STATE_DONE, TRUE, 500);
3723 
3724 		status = mpt->tmf_req->IOCStatus;
3725 		response = mpt->tmf_req->ResponseCode;
3726 		mpt->tmf_req->state = REQ_STATE_FREE;
3727 
3728 		if (error != 0) {
3729 			/*
3730 			 * If we've errored out,, reset the controller.
3731 			 */
3732 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
3733 			    "Resetting controller\n");
3734 			mpt_reset(mpt, TRUE);
3735 			continue;
3736 		}
3737 
3738 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
3739 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
3740 			    "Resetting controller.\n", status);
3741 			mpt_reset(mpt, TRUE);
3742 			continue;
3743 		}
3744 
3745 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
3746 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
3747 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
3748 			    "Resetting controller.\n", response);
3749 			mpt_reset(mpt, TRUE);
3750 			continue;
3751 		}
3752 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
3753 	}
3754 }
3755 
3756 /************************ Target Mode Support ****************************/
3757 static void
3758 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
3759 {
3760 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3761 	PTR_SGE_TRANSACTION32 tep;
3762 	PTR_SGE_SIMPLE32 se;
3763 	bus_addr_t paddr;
3764 
3765 	paddr = req->req_pbuf;
3766 	paddr += MPT_RQSL(mpt);
3767 
3768 	fc = req->req_vbuf;
3769 	memset(fc, 0, MPT_REQUEST_AREA);
3770 	fc->BufferCount = 1;
3771 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3772 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
3773 
3774 	/*
3775 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
3776 	 * consist of a TE SGL element (with details length of zero)
3777 	 * followe by a SIMPLE SGL element which holds the address
3778 	 * of the buffer.
3779 	 */
3780 
3781 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3782 
3783 	tep->ContextSize = 4;
3784 	tep->Flags = 0;
3785 	tep->TransactionContext[0] = htole32(ioindex);
3786 
3787 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3788 	se->FlagsLength =
3789 		MPI_SGE_FLAGS_HOST_TO_IOC	|
3790 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
3791 		MPI_SGE_FLAGS_LAST_ELEMENT	|
3792 		MPI_SGE_FLAGS_END_OF_LIST	|
3793 		MPI_SGE_FLAGS_END_OF_BUFFER;
3794 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
3795 	se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3796 	se->Address = (uint32_t) paddr;
3797 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3798 	    "add ELS index %d ioindex %d for %p:%u\n",
3799 	    req->index, ioindex, req, req->serno);
3800 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
3801 	    ("mpt_fc_post_els: request not locked"));
3802 	mpt_send_cmd(mpt, req);
3803 }
3804 
3805 static void
3806 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3807 {
3808 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3809 	PTR_CMD_BUFFER_DESCRIPTOR cb;
3810 	bus_addr_t paddr;
3811 
3812 	paddr = req->req_pbuf;
3813 	paddr += MPT_RQSL(mpt);
3814 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3815 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
3816 
3817 	fc = req->req_vbuf;
3818 	fc->BufferCount = 1;
3819 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3820 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3821 
3822 	cb = &fc->Buffer[0];
3823 	cb->IoIndex = htole16(ioindex);
3824 	cb->u.PhysicalAddress32 = (U32) paddr;
3825 
3826 	mpt_check_doorbell(mpt);
3827 	mpt_send_cmd(mpt, req);
3828 }
3829 
3830 static int
3831 mpt_add_els_buffers(struct mpt_softc *mpt)
3832 {
3833 	int i;
3834 
3835 	if (mpt->is_fc == 0) {
3836 		return (TRUE);
3837 	}
3838 
3839 	if (mpt->els_cmds_allocated) {
3840 		return (TRUE);
3841 	}
3842 
3843 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
3844 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3845 
3846 	if (mpt->els_cmd_ptrs == NULL) {
3847 		return (FALSE);
3848 	}
3849 
3850 	/*
3851 	 * Feed the chip some ELS buffer resources
3852 	 */
3853 	for (i = 0; i < MPT_MAX_ELS; i++) {
3854 		request_t *req = mpt_get_request(mpt, FALSE);
3855 		if (req == NULL) {
3856 			break;
3857 		}
3858 		req->state |= REQ_STATE_LOCKED;
3859 		mpt->els_cmd_ptrs[i] = req;
3860 		mpt_fc_post_els(mpt, req, i);
3861 	}
3862 
3863 	if (i == 0) {
3864 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
3865 		free(mpt->els_cmd_ptrs, M_DEVBUF);
3866 		mpt->els_cmd_ptrs = NULL;
3867 		return (FALSE);
3868 	}
3869 	if (i != MPT_MAX_ELS) {
3870 		mpt_lprt(mpt, MPT_PRT_INFO,
3871 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
3872 	}
3873 	mpt->els_cmds_allocated = i;
3874 	return(TRUE);
3875 }
3876 
3877 static int
3878 mpt_add_target_commands(struct mpt_softc *mpt)
3879 {
3880 	int i, max;
3881 
3882 	if (mpt->tgt_cmd_ptrs) {
3883 		return (TRUE);
3884 	}
3885 
3886 	max = MPT_MAX_REQUESTS(mpt) >> 1;
3887 	if (max > mpt->mpt_max_tgtcmds) {
3888 		max = mpt->mpt_max_tgtcmds;
3889 	}
3890 	mpt->tgt_cmd_ptrs =
3891 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
3892 	if (mpt->tgt_cmd_ptrs == NULL) {
3893 		mpt_prt(mpt,
3894 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
3895 		return (FALSE);
3896 	}
3897 
3898 	for (i = 0; i < max; i++) {
3899 		request_t *req;
3900 
3901 		req = mpt_get_request(mpt, FALSE);
3902 		if (req == NULL) {
3903 			break;
3904 		}
3905 		req->state |= REQ_STATE_LOCKED;
3906 		mpt->tgt_cmd_ptrs[i] = req;
3907 		mpt_post_target_command(mpt, req, i);
3908 	}
3909 
3910 
3911 	if (i == 0) {
3912 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
3913 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
3914 		mpt->tgt_cmd_ptrs = NULL;
3915 		return (FALSE);
3916 	}
3917 
3918 	mpt->tgt_cmds_allocated = i;
3919 
3920 	if (i < max) {
3921 		mpt_lprt(mpt, MPT_PRT_INFO,
3922 		    "added %d of %d target bufs\n", i, max);
3923 	}
3924 	return (i);
3925 }
3926 
3927 static int
3928 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3929 {
3930 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3931 		mpt->twildcard = 1;
3932 	} else if (lun >= MPT_MAX_LUNS) {
3933 		return (EINVAL);
3934 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3935 		return (EINVAL);
3936 	}
3937 	if (mpt->tenabled == 0) {
3938 		if (mpt->is_fc) {
3939 			(void) mpt_fc_reset_link(mpt, 0);
3940 		}
3941 		mpt->tenabled = 1;
3942 	}
3943 	if (lun == CAM_LUN_WILDCARD) {
3944 		mpt->trt_wildcard.enabled = 1;
3945 	} else {
3946 		mpt->trt[lun].enabled = 1;
3947 	}
3948 	return (0);
3949 }
3950 
3951 static int
3952 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3953 {
3954 	int i;
3955 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3956 		mpt->twildcard = 0;
3957 	} else if (lun >= MPT_MAX_LUNS) {
3958 		return (EINVAL);
3959 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3960 		return (EINVAL);
3961 	}
3962 	if (lun == CAM_LUN_WILDCARD) {
3963 		mpt->trt_wildcard.enabled = 0;
3964 	} else {
3965 		mpt->trt[lun].enabled = 0;
3966 	}
3967 	for (i = 0; i < MPT_MAX_LUNS; i++) {
3968 		if (mpt->trt[lun].enabled) {
3969 			break;
3970 		}
3971 	}
3972 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
3973 		if (mpt->is_fc) {
3974 			(void) mpt_fc_reset_link(mpt, 0);
3975 		}
3976 		mpt->tenabled = 0;
3977 	}
3978 	return (0);
3979 }
3980 
3981 /*
3982  * Called with MPT lock held
3983  */
3984 static void
3985 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
3986 {
3987 	struct ccb_scsiio *csio = &ccb->csio;
3988 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
3989 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
3990 
3991 	switch (tgt->state) {
3992 	case TGT_STATE_IN_CAM:
3993 		break;
3994 	case TGT_STATE_MOVING_DATA:
3995 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3996 		xpt_freeze_simq(mpt->sim, 1);
3997 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3998 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3999 		MPTLOCK_2_CAMLOCK(mpt);
4000 		xpt_done(ccb);
4001 		CAMLOCK_2_MPTLOCK(mpt);
4002 		return;
4003 	default:
4004 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4005 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4006 		mpt_tgt_dump_req_state(mpt, cmd_req);
4007 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4008 		MPTLOCK_2_CAMLOCK(mpt);
4009 		xpt_done(ccb);
4010 		CAMLOCK_2_MPTLOCK(mpt);
4011 		return;
4012 	}
4013 
4014 	if (csio->dxfer_len) {
4015 		bus_dmamap_callback_t *cb;
4016 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4017 		request_t *req;
4018 
4019 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4020 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
4021 
4022 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4023 			if (mpt->outofbeer == 0) {
4024 				mpt->outofbeer = 1;
4025 				xpt_freeze_simq(mpt->sim, 1);
4026 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4027 			}
4028 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4029 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4030 			MPTLOCK_2_CAMLOCK(mpt);
4031 			xpt_done(ccb);
4032 			CAMLOCK_2_MPTLOCK(mpt);
4033 			return;
4034 		}
4035 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4036 		if (sizeof (bus_addr_t) > 4) {
4037 			cb = mpt_execute_req_a64;
4038 		} else {
4039 			cb = mpt_execute_req;
4040 		}
4041 
4042 		req->ccb = ccb;
4043 		ccb->ccb_h.ccb_req_ptr = req;
4044 
4045 		/*
4046 		 * Record the currently active ccb and the
4047 		 * request for it in our target state area.
4048 		 */
4049 		tgt->ccb = ccb;
4050 		tgt->req = req;
4051 
4052 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4053 		ta = req->req_vbuf;
4054 
4055 		if (mpt->is_sas) {
4056 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4057 			     cmd_req->req_vbuf;
4058 			ta->QueueTag = ssp->InitiatorTag;
4059 		} else if (mpt->is_spi) {
4060 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4061 			     cmd_req->req_vbuf;
4062 			ta->QueueTag = sp->Tag;
4063 		}
4064 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4065 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4066 		ta->ReplyWord = htole32(tgt->reply_desc);
4067 		if (csio->ccb_h.target_lun > 256) {
4068 			ta->LUN[0] =
4069 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4070 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4071 		} else {
4072 			ta->LUN[1] = csio->ccb_h.target_lun;
4073 		}
4074 
4075 		ta->RelativeOffset = tgt->bytes_xfered;
4076 		ta->DataLength = ccb->csio.dxfer_len;
4077 		if (ta->DataLength > tgt->resid) {
4078 			ta->DataLength = tgt->resid;
4079 		}
4080 
4081 		/*
4082 		 * XXX Should be done after data transfer completes?
4083 		 */
4084 		tgt->resid -= csio->dxfer_len;
4085 		tgt->bytes_xfered += csio->dxfer_len;
4086 
4087 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4088 			ta->TargetAssistFlags |=
4089 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4090 		}
4091 
4092 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4093 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4094 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4095 			ta->TargetAssistFlags |=
4096 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4097 		}
4098 #endif
4099 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4100 
4101 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4102 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4103 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4104 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4105 
4106 		MPTLOCK_2_CAMLOCK(mpt);
4107 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4108 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4109 				int error;
4110 				int s = splsoftvm();
4111 				error = bus_dmamap_load(mpt->buffer_dmat,
4112 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4113 				    cb, req, 0);
4114 				splx(s);
4115 				if (error == EINPROGRESS) {
4116 					xpt_freeze_simq(mpt->sim, 1);
4117 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4118 				}
4119 			} else {
4120 				/*
4121 				 * We have been given a pointer to single
4122 				 * physical buffer.
4123 				 */
4124 				struct bus_dma_segment seg;
4125 				seg.ds_addr = (bus_addr_t)
4126 				    (vm_offset_t)csio->data_ptr;
4127 				seg.ds_len = csio->dxfer_len;
4128 				(*cb)(req, &seg, 1, 0);
4129 			}
4130 		} else {
4131 			/*
4132 			 * We have been given a list of addresses.
4133 			 * This case could be easily supported but they are not
4134 			 * currently generated by the CAM subsystem so there
4135 			 * is no point in wasting the time right now.
4136 			 */
4137 			struct bus_dma_segment *sgs;
4138 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4139 				(*cb)(req, NULL, 0, EFAULT);
4140 			} else {
4141 				/* Just use the segments provided */
4142 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4143 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4144 			}
4145 		}
4146 		CAMLOCK_2_MPTLOCK(mpt);
4147 	} else {
4148 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4149 
4150 		/*
4151 		 * XXX: I don't know why this seems to happen, but
4152 		 * XXX: completing the CCB seems to make things happy.
4153 		 * XXX: This seems to happen if the initiator requests
4154 		 * XXX: enough data that we have to do multiple CTIOs.
4155 		 */
4156 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4157 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4158 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4159 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4160 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4161 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4162 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4163 			MPTLOCK_2_CAMLOCK(mpt);
4164 			xpt_done(ccb);
4165 			CAMLOCK_2_MPTLOCK(mpt);
4166 			return;
4167 		}
4168 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4169 			sp = sense;
4170 			memcpy(sp, &csio->sense_data,
4171 			   min(csio->sense_len, MPT_SENSE_SIZE));
4172 		}
4173 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4174 	}
4175 }
4176 
4177 static void
4178 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4179     uint32_t lun, int send, uint8_t *data, size_t length)
4180 {
4181 	mpt_tgt_state_t *tgt;
4182 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4183 	SGE_SIMPLE32 *se;
4184 	uint32_t flags;
4185 	uint8_t *dptr;
4186 	bus_addr_t pptr;
4187 	request_t *req;
4188 
4189 	if (length == 0) {
4190 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4191 		return;
4192 	}
4193 
4194 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4195 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4196 		mpt_prt(mpt, "out of resources- dropping local response\n");
4197 		return;
4198 	}
4199 	tgt->is_local = 1;
4200 
4201 
4202 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4203 	ta = req->req_vbuf;
4204 
4205 	if (mpt->is_sas) {
4206 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4207 		ta->QueueTag = ssp->InitiatorTag;
4208 	} else if (mpt->is_spi) {
4209 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4210 		ta->QueueTag = sp->Tag;
4211 	}
4212 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4213 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4214 	ta->ReplyWord = htole32(tgt->reply_desc);
4215 	if (lun > 256) {
4216 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4217 		ta->LUN[1] = lun & 0xff;
4218 	} else {
4219 		ta->LUN[1] = lun;
4220 	}
4221 	ta->RelativeOffset = 0;
4222 	ta->DataLength = length;
4223 
4224 	dptr = req->req_vbuf;
4225 	dptr += MPT_RQSL(mpt);
4226 	pptr = req->req_pbuf;
4227 	pptr += MPT_RQSL(mpt);
4228 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4229 
4230 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4231 	memset(se, 0,sizeof (*se));
4232 
4233 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4234 	if (send) {
4235 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4236 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4237 	}
4238 	se->Address = pptr;
4239 	MPI_pSGE_SET_LENGTH(se, length);
4240 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4241 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4242 	MPI_pSGE_SET_FLAGS(se, flags);
4243 
4244 	tgt->ccb = NULL;
4245 	tgt->req = req;
4246 	tgt->resid = 0;
4247 	tgt->bytes_xfered = length;
4248 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4249 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4250 #else
4251 	tgt->state = TGT_STATE_MOVING_DATA;
4252 #endif
4253 	mpt_send_cmd(mpt, req);
4254 }
4255 
4256 /*
4257  * Abort queued up CCBs
4258  */
4259 static cam_status
4260 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4261 {
4262 	struct mpt_hdr_stailq *lp;
4263 	struct ccb_hdr *srch;
4264 	int found = 0;
4265 	union ccb *accb = ccb->cab.abort_ccb;
4266 	tgt_resource_t *trtp;
4267 
4268 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4269 
4270 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4271 		trtp = &mpt->trt_wildcard;
4272 	} else {
4273 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4274 	}
4275 
4276 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4277 		lp = &trtp->atios;
4278 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4279 		lp = &trtp->inots;
4280 	} else {
4281 		return (CAM_REQ_INVALID);
4282 	}
4283 
4284 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4285 		if (srch == &accb->ccb_h) {
4286 			found = 1;
4287 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4288 			break;
4289 		}
4290 	}
4291 	if (found) {
4292 		accb->ccb_h.status = CAM_REQ_ABORTED;
4293 		xpt_done(accb);
4294 		return (CAM_REQ_CMP);
4295 	}
4296 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4297 	return (CAM_PATH_INVALID);
4298 }
4299 
4300 /*
4301  * Ask the MPT to abort the current target command
4302  */
4303 static int
4304 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4305 {
4306 	int error;
4307 	request_t *req;
4308 	PTR_MSG_TARGET_MODE_ABORT abtp;
4309 
4310 	req = mpt_get_request(mpt, FALSE);
4311 	if (req == NULL) {
4312 		return (-1);
4313 	}
4314 	abtp = req->req_vbuf;
4315 	memset(abtp, 0, sizeof (*abtp));
4316 
4317 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4318 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4319 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4320 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4321 	error = 0;
4322 	if (mpt->is_fc || mpt->is_sas) {
4323 		mpt_send_cmd(mpt, req);
4324 	} else {
4325 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4326 	}
4327 	return (error);
4328 }
4329 
4330 /*
4331  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4332  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4333  * FC929 to set bogus FC_RSP fields (nonzero residuals
4334  * but w/o RESID fields set). This causes QLogic initiators
4335  * to think maybe that a frame was lost.
4336  *
4337  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4338  * we use allocated requests to do TARGET_ASSIST and we
4339  * need to know when to release them.
4340  */
4341 
4342 static void
4343 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4344     uint8_t status, uint8_t const *sense_data)
4345 {
4346 	uint8_t *cmd_vbuf;
4347 	mpt_tgt_state_t *tgt;
4348 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4349 	request_t *req;
4350 	bus_addr_t paddr;
4351 	int resplen = 0;
4352 
4353 	cmd_vbuf = cmd_req->req_vbuf;
4354 	cmd_vbuf += MPT_RQSL(mpt);
4355 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4356 
4357 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4358 		if (mpt->outofbeer == 0) {
4359 			mpt->outofbeer = 1;
4360 			xpt_freeze_simq(mpt->sim, 1);
4361 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4362 		}
4363 		if (ccb) {
4364 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4365 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4366 			MPTLOCK_2_CAMLOCK(mpt);
4367 			xpt_done(ccb);
4368 			CAMLOCK_2_MPTLOCK(mpt);
4369 		} else {
4370 			mpt_prt(mpt,
4371 			    "could not allocate status request- dropping\n");
4372 		}
4373 		return;
4374 	}
4375 	req->ccb = ccb;
4376 	if (ccb) {
4377 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4378 		ccb->ccb_h.ccb_req_ptr = req;
4379 	}
4380 
4381 	/*
4382 	 * Record the currently active ccb, if any, and the
4383 	 * request for it in our target state area.
4384 	 */
4385 	tgt->ccb = ccb;
4386 	tgt->req = req;
4387 	tgt->state = TGT_STATE_SENDING_STATUS;
4388 
4389 	tp = req->req_vbuf;
4390 	paddr = req->req_pbuf;
4391 	paddr += MPT_RQSL(mpt);
4392 
4393 	memset(tp, 0, sizeof (*tp));
4394 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4395 	if (mpt->is_fc) {
4396 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4397 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4398 		uint8_t *sts_vbuf;
4399 		uint32_t *rsp;
4400 
4401 		sts_vbuf = req->req_vbuf;
4402 		sts_vbuf += MPT_RQSL(mpt);
4403 		rsp = (uint32_t *) sts_vbuf;
4404 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4405 
4406 		/*
4407 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4408 		 * It has to be big-endian in memory and is organized
4409 		 * in 32 bit words, which are much easier to deal with
4410 		 * as words which are swizzled as needed.
4411 		 *
4412 		 * All we're filling here is the FC_RSP payload.
4413 		 * We may just have the chip synthesize it if
4414 		 * we have no residual and an OK status.
4415 		 *
4416 		 */
4417 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4418 
4419 		rsp[2] = status;
4420 		if (tgt->resid) {
4421 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4422 			rsp[3] = htobe32(tgt->resid);
4423 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4424 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4425 #endif
4426 		}
4427 		if (status == SCSI_STATUS_CHECK_COND) {
4428 			int i;
4429 
4430 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4431 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4432 			if (sense_data) {
4433 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4434 			} else {
4435 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4436 				    "TION but no sense data?\n");
4437 				memset(&rsp, 0, MPT_SENSE_SIZE);
4438 			}
4439 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4440 				rsp[i] = htobe32(rsp[i]);
4441 			}
4442 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4443 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4444 #endif
4445 		}
4446 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4447 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4448 #endif
4449 		rsp[2] = htobe32(rsp[2]);
4450 	} else if (mpt->is_sas) {
4451 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4452 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4453 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4454 	} else {
4455 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4456 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4457 		tp->StatusCode = status;
4458 		tp->QueueTag = htole16(sp->Tag);
4459 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4460 	}
4461 
4462 	tp->ReplyWord = htole32(tgt->reply_desc);
4463 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4464 
4465 #ifdef	WE_CAN_USE_AUTO_REPOST
4466 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4467 #endif
4468 	if (status == SCSI_STATUS_OK && resplen == 0) {
4469 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4470 	} else {
4471 		tp->StatusDataSGE.u.Address32 = (uint32_t) paddr;
4472 		tp->StatusDataSGE.FlagsLength =
4473 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4474 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4475 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4476 			MPI_SGE_FLAGS_END_OF_LIST	|
4477 			MPI_SGE_FLAGS_END_OF_BUFFER;
4478 		tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
4479 		tp->StatusDataSGE.FlagsLength |= resplen;
4480 	}
4481 
4482 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4483 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4484 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4485 	    req->serno, tgt->resid);
4486 	if (ccb) {
4487 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4488 		ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
4489 	}
4490 	mpt_send_cmd(mpt, req);
4491 }
4492 
4493 static void
4494 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4495     tgt_resource_t *trtp, int init_id)
4496 {
4497 	struct ccb_immed_notify *inot;
4498 	mpt_tgt_state_t *tgt;
4499 
4500 	tgt = MPT_TGT_STATE(mpt, req);
4501 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4502 	if (inot == NULL) {
4503 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4504 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4505 		return;
4506 	}
4507 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4508 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4509 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4510 
4511 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4512 	inot->sense_len = 0;
4513 	memset(inot->message_args, 0, sizeof (inot->message_args));
4514 	inot->initiator_id = init_id;	/* XXX */
4515 
4516 	/*
4517 	 * This is a somewhat grotesque attempt to map from task management
4518 	 * to old style SCSI messages. God help us all.
4519 	 */
4520 	switch (fc) {
4521 	case MPT_ABORT_TASK_SET:
4522 		inot->message_args[0] = MSG_ABORT_TAG;
4523 		break;
4524 	case MPT_CLEAR_TASK_SET:
4525 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4526 		break;
4527 	case MPT_TARGET_RESET:
4528 		inot->message_args[0] = MSG_TARGET_RESET;
4529 		break;
4530 	case MPT_CLEAR_ACA:
4531 		inot->message_args[0] = MSG_CLEAR_ACA;
4532 		break;
4533 	case MPT_TERMINATE_TASK:
4534 		inot->message_args[0] = MSG_ABORT_TAG;
4535 		break;
4536 	default:
4537 		inot->message_args[0] = MSG_NOOP;
4538 		break;
4539 	}
4540 	tgt->ccb = (union ccb *) inot;
4541 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4542 	MPTLOCK_2_CAMLOCK(mpt);
4543 	xpt_done((union ccb *)inot);
4544 	CAMLOCK_2_MPTLOCK(mpt);
4545 }
4546 
4547 static void
4548 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4549 {
4550 	struct ccb_accept_tio *atiop;
4551 	lun_id_t lun;
4552 	int tag_action = 0;
4553 	mpt_tgt_state_t *tgt;
4554 	tgt_resource_t *trtp = NULL;
4555 	U8 *lunptr;
4556 	U8 *vbuf;
4557 	U16 itag;
4558 	U16 ioindex;
4559 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4560 	uint8_t *cdbp;
4561 
4562 	/*
4563 	 * First, DMA sync the received command-
4564 	 * which is in the *request* * phys area.
4565 	 *
4566 	 * XXX: We could optimize this for a range
4567 	 */
4568 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
4569 	    BUS_DMASYNC_POSTREAD);
4570 
4571 	/*
4572 	 * Stash info for the current command where we can get at it later.
4573 	 */
4574 	vbuf = req->req_vbuf;
4575 	vbuf += MPT_RQSL(mpt);
4576 
4577 	/*
4578 	 * Get our state pointer set up.
4579 	 */
4580 	tgt = MPT_TGT_STATE(mpt, req);
4581 	if (tgt->state != TGT_STATE_LOADED) {
4582 		mpt_tgt_dump_req_state(mpt, req);
4583 		panic("bad target state in mpt_scsi_tgt_atio");
4584 	}
4585 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4586 	tgt->state = TGT_STATE_IN_CAM;
4587 	tgt->reply_desc = reply_desc;
4588 	ioindex = GET_IO_INDEX(reply_desc);
4589 
4590 	if (mpt->is_fc) {
4591 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4592 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4593 		if (fc->FcpCntl[2]) {
4594 			/*
4595 			 * Task Management Request
4596 			 */
4597 			switch (fc->FcpCntl[2]) {
4598 			case 0x2:
4599 				fct = MPT_ABORT_TASK_SET;
4600 				break;
4601 			case 0x4:
4602 				fct = MPT_CLEAR_TASK_SET;
4603 				break;
4604 			case 0x20:
4605 				fct = MPT_TARGET_RESET;
4606 				break;
4607 			case 0x40:
4608 				fct = MPT_CLEAR_ACA;
4609 				break;
4610 			case 0x80:
4611 				fct = MPT_TERMINATE_TASK;
4612 				break;
4613 			default:
4614 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4615 				    fc->FcpCntl[2]);
4616 				mpt_scsi_tgt_status(mpt, 0, req,
4617 				    SCSI_STATUS_OK, 0);
4618 				return;
4619 			}
4620 		} else {
4621 			switch (fc->FcpCntl[1]) {
4622 			case 0:
4623 				tag_action = MSG_SIMPLE_Q_TAG;
4624 				break;
4625 			case 1:
4626 				tag_action = MSG_HEAD_OF_Q_TAG;
4627 				break;
4628 			case 2:
4629 				tag_action = MSG_ORDERED_Q_TAG;
4630 				break;
4631 			default:
4632 				/*
4633 				 * Bah. Ignore Untagged Queing and ACA
4634 				 */
4635 				tag_action = MSG_SIMPLE_Q_TAG;
4636 				break;
4637 			}
4638 		}
4639 		tgt->resid = be32toh(fc->FcpDl);
4640 		cdbp = fc->FcpCdb;
4641 		lunptr = fc->FcpLun;
4642 		itag = be16toh(fc->OptionalOxid);
4643 	} else if (mpt->is_sas) {
4644 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4645 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4646 		cdbp = ssp->CDB;
4647 		lunptr = ssp->LogicalUnitNumber;
4648 		itag = ssp->InitiatorTag;
4649 	} else {
4650 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4651 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4652 		cdbp = sp->CDB;
4653 		lunptr = sp->LogicalUnitNumber;
4654 		itag = sp->Tag;
4655 	}
4656 
4657 	/*
4658 	 * Generate a simple lun
4659 	 */
4660 	switch (lunptr[0] & 0xc0) {
4661 	case 0x40:
4662 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4663 		break;
4664 	case 0:
4665 		lun = lunptr[1];
4666 		break;
4667 	default:
4668 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4669 		lun = 0xffff;
4670 		break;
4671 	}
4672 
4673 	/*
4674 	 * Deal with non-enabled or bad luns here.
4675 	 */
4676 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4677 	    mpt->trt[lun].enabled == 0) {
4678 		if (mpt->twildcard) {
4679 			trtp = &mpt->trt_wildcard;
4680 		} else if (fct == MPT_NIL_TMT_VALUE) {
4681 			/*
4682 			 * In this case, we haven't got an upstream listener
4683 			 * for either a specific lun or wildcard luns. We
4684 			 * have to make some sensible response. For regular
4685 			 * inquiry, just return some NOT HERE inquiry data.
4686 			 * For VPD inquiry, report illegal field in cdb.
4687 			 * For REQUEST SENSE, just return NO SENSE data.
4688 			 * REPORT LUNS gets illegal command.
4689 			 * All other commands get 'no such device'.
4690 			 */
4691 
4692 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
4693 
4694 			mpt_prt(mpt, "CMD 0x%x to unmanaged lun %u\n",
4695 			    cdbp[0], lun);
4696 
4697 			memset(buf, 0, MPT_SENSE_SIZE);
4698 			cond = SCSI_STATUS_CHECK_COND;
4699 			buf[0] = 0xf0;
4700 			buf[2] = 0x5;
4701 			buf[7] = 0x8;
4702 			sp = buf;
4703 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4704 
4705 			switch (cdbp[0]) {
4706 			case INQUIRY:
4707 			{
4708 				static uint8_t iqd[8] = {
4709 				    0x7f, 0x0, 0x4, 0x12, 0x0
4710 				};
4711 				if (cdbp[1] != 0) {
4712 					buf[12] = 0x26;
4713 					buf[13] = 0x01;
4714 					break;
4715 				}
4716 				mpt_prt(mpt, "local inquiry\n");
4717 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4718 				    iqd, sizeof (iqd));
4719 				return;
4720 			}
4721 			case REQUEST_SENSE:
4722 			{
4723 				buf[2] = 0x0;
4724 				mpt_prt(mpt, "local request sense\n");
4725 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4726 				    buf, sizeof (buf));
4727 				return;
4728 			}
4729 			case REPORT_LUNS:
4730 				buf[12] = 0x26;
4731 				break;
4732 			default:
4733 				buf[12] = 0x25;
4734 				break;
4735 			}
4736 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
4737 			return;
4738 		}
4739 		/* otherwise, leave trtp NULL */
4740 	} else {
4741 		trtp = &mpt->trt[lun];
4742 	}
4743 
4744 	/*
4745 	 * Deal with any task management
4746 	 */
4747 	if (fct != MPT_NIL_TMT_VALUE) {
4748 		if (trtp == NULL) {
4749 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
4750 			    fct);
4751 			mpt_scsi_tgt_status(mpt, 0, req,
4752 			    SCSI_STATUS_OK, 0);
4753 		} else {
4754 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
4755 			    GET_INITIATOR_INDEX(reply_desc));
4756 		}
4757 		return;
4758 	}
4759 
4760 
4761 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
4762 	if (atiop == NULL) {
4763 		mpt_lprt(mpt, MPT_PRT_WARN,
4764 		    "no ATIOs for lun %u- sending back %s\n", lun,
4765 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
4766 		mpt_scsi_tgt_status(mpt, NULL, req,
4767 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
4768 		    NULL);
4769 		return;
4770 	}
4771 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
4772 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4773 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
4774 	atiop->ccb_h.ccb_mpt_ptr = mpt;
4775 	atiop->ccb_h.status = CAM_CDB_RECVD;
4776 	atiop->ccb_h.target_lun = lun;
4777 	atiop->sense_len = 0;
4778 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
4779 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
4780 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
4781 
4782 	/*
4783 	 * The tag we construct here allows us to find the
4784 	 * original request that the command came in with.
4785 	 *
4786 	 * This way we don't have to depend on anything but the
4787 	 * tag to find things when CCBs show back up from CAM.
4788 	 */
4789 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4790 	tgt->tag_id = atiop->tag_id;
4791 	if (tag_action) {
4792 		atiop->tag_action = tag_action;
4793 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
4794 	}
4795 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4796 		int i;
4797 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
4798 		    atiop->ccb_h.target_lun);
4799 		for (i = 0; i < atiop->cdb_len; i++) {
4800 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
4801 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
4802 		}
4803 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
4804 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
4805 	}
4806 
4807 	MPTLOCK_2_CAMLOCK(mpt);
4808 	xpt_done((union ccb *)atiop);
4809 	CAMLOCK_2_MPTLOCK(mpt);
4810 }
4811 
4812 static void
4813 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
4814 {
4815 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4816 
4817 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
4818 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
4819 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
4820 	    tgt->tag_id, tgt->state);
4821 }
4822 
4823 static void
4824 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
4825 {
4826 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
4827 	    req->index, req->index, req->state);
4828 	mpt_tgt_dump_tgt_state(mpt, req);
4829 }
4830 
4831 static int
4832 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
4833     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4834 {
4835 	int dbg;
4836 	union ccb *ccb;
4837 	U16 status;
4838 
4839 	if (reply_frame == NULL) {
4840 		/*
4841 		 * Figure out what the state of the command is.
4842 		 */
4843 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4844 
4845 #ifdef	INVARIANTS
4846 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
4847 		if (tgt->req) {
4848 			mpt_req_not_spcl(mpt, tgt->req,
4849 			    "turbo scsi_tgt_reply associated req", __LINE__);
4850 		}
4851 #endif
4852 		switch(tgt->state) {
4853 		case TGT_STATE_LOADED:
4854 			/*
4855 			 * This is a new command starting.
4856 			 */
4857 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
4858 			break;
4859 		case TGT_STATE_MOVING_DATA:
4860 		{
4861 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4862 
4863 			ccb = tgt->ccb;
4864 			if (tgt->req == NULL) {
4865 				panic("mpt: turbo target reply with null "
4866 				    "associated request moving data");
4867 				/* NOTREACHED */
4868 			}
4869 			if (ccb == NULL) {
4870 				if (tgt->is_local == 0) {
4871 					panic("mpt: turbo target reply with "
4872 					    "null associated ccb moving data");
4873 					/* NOTREACHED */
4874 				}
4875 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4876 				    "TARGET_ASSIST local done\n");
4877 				TAILQ_REMOVE(&mpt->request_pending_list,
4878 				    tgt->req, links);
4879 				mpt_free_request(mpt, tgt->req);
4880 				tgt->req = NULL;
4881 				mpt_scsi_tgt_status(mpt, NULL, req,
4882 				    0, NULL);
4883 				return (TRUE);
4884 			}
4885 			tgt->ccb = NULL;
4886 			tgt->nxfers++;
4887 			untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
4888 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4889 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
4890 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
4891 			/*
4892 			 * Free the Target Assist Request
4893 			 */
4894 			KASSERT(tgt->req->ccb == ccb,
4895 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
4896 			    tgt->req->serno, tgt->req->ccb));
4897 			TAILQ_REMOVE(&mpt->request_pending_list,
4898 			    tgt->req, links);
4899 			mpt_free_request(mpt, tgt->req);
4900 			tgt->req = NULL;
4901 
4902 			/*
4903 			 * Do we need to send status now? That is, are
4904 			 * we done with all our data transfers?
4905 			 */
4906 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4907 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4908 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4909 				KASSERT(ccb->ccb_h.status,
4910 				    ("zero ccb sts at %d\n", __LINE__));
4911 				tgt->state = TGT_STATE_IN_CAM;
4912 				if (mpt->outofbeer) {
4913 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4914 					mpt->outofbeer = 0;
4915 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4916 				}
4917 				MPTLOCK_2_CAMLOCK(mpt);
4918 				xpt_done(ccb);
4919 				CAMLOCK_2_MPTLOCK(mpt);
4920 				break;
4921 			}
4922 			/*
4923 			 * Otherwise, send status (and sense)
4924 			 */
4925 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4926 				sp = sense;
4927 				memcpy(sp, &ccb->csio.sense_data,
4928 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
4929 			}
4930 			mpt_scsi_tgt_status(mpt, ccb, req,
4931 			    ccb->csio.scsi_status, sp);
4932 			break;
4933 		}
4934 		case TGT_STATE_SENDING_STATUS:
4935 		case TGT_STATE_MOVING_DATA_AND_STATUS:
4936 		{
4937 			int ioindex;
4938 			ccb = tgt->ccb;
4939 
4940 			if (tgt->req == NULL) {
4941 				panic("mpt: turbo target reply with null "
4942 				    "associated request sending status");
4943 				/* NOTREACHED */
4944 			}
4945 
4946 			if (ccb) {
4947 				tgt->ccb = NULL;
4948 				if (tgt->state ==
4949 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
4950 					tgt->nxfers++;
4951 				}
4952 				untimeout(mpt_timeout, ccb,
4953 				    ccb->ccb_h.timeout_ch);
4954 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4955 					ccb->ccb_h.status |= CAM_SENT_SENSE;
4956 				}
4957 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4958 				    "TARGET_STATUS tag %x sts %x flgs %x req "
4959 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
4960 				    ccb->ccb_h.flags, tgt->req);
4961 				/*
4962 				 * Free the Target Send Status Request
4963 				 */
4964 				KASSERT(tgt->req->ccb == ccb,
4965 				    ("tgt->req %p:%u tgt->req->ccb %p",
4966 				    tgt->req, tgt->req->serno, tgt->req->ccb));
4967 				/*
4968 				 * Notify CAM that we're done
4969 				 */
4970 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4971 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4972 				KASSERT(ccb->ccb_h.status,
4973 				    ("ZERO ccb sts at %d\n", __LINE__));
4974 				tgt->ccb = NULL;
4975 			} else {
4976 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4977 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
4978 				    tgt->req, tgt->req->serno);
4979 			}
4980 			TAILQ_REMOVE(&mpt->request_pending_list,
4981 			    tgt->req, links);
4982 			mpt_free_request(mpt, tgt->req);
4983 			tgt->req = NULL;
4984 
4985 			/*
4986 			 * And re-post the Command Buffer.
4987 			 * This will reset the state.
4988 			 */
4989 			ioindex = GET_IO_INDEX(reply_desc);
4990 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4991 			tgt->is_local = 0;
4992 			mpt_post_target_command(mpt, req, ioindex);
4993 
4994 			/*
4995 			 * And post a done for anyone who cares
4996 			 */
4997 			if (ccb) {
4998 				if (mpt->outofbeer) {
4999 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5000 					mpt->outofbeer = 0;
5001 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5002 				}
5003 				MPTLOCK_2_CAMLOCK(mpt);
5004 				xpt_done(ccb);
5005 				CAMLOCK_2_MPTLOCK(mpt);
5006 			}
5007 			break;
5008 		}
5009 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5010 			tgt->state = TGT_STATE_LOADED;
5011 			break;
5012 		default:
5013 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5014 			    "Reply Function\n", tgt->state);
5015 		}
5016 		return (TRUE);
5017 	}
5018 
5019 	status = le16toh(reply_frame->IOCStatus);
5020 	if (status != MPI_IOCSTATUS_SUCCESS) {
5021 		dbg = MPT_PRT_ERROR;
5022 	} else {
5023 		dbg = MPT_PRT_DEBUG1;
5024 	}
5025 
5026 	mpt_lprt(mpt, dbg,
5027 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5028 	     req, req->serno, reply_frame, reply_frame->Function, status);
5029 
5030 	switch (reply_frame->Function) {
5031 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5032 	{
5033 		mpt_tgt_state_t *tgt;
5034 #ifdef	INVARIANTS
5035 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5036 #endif
5037 		if (status != MPI_IOCSTATUS_SUCCESS) {
5038 			/*
5039 			 * XXX What to do?
5040 			 */
5041 			break;
5042 		}
5043 		tgt = MPT_TGT_STATE(mpt, req);
5044 		KASSERT(tgt->state == TGT_STATE_LOADING,
5045 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
5046 		mpt_assign_serno(mpt, req);
5047 		tgt->state = TGT_STATE_LOADED;
5048 		break;
5049 	}
5050 	case MPI_FUNCTION_TARGET_ASSIST:
5051 #ifdef	INVARIANTS
5052 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5053 #endif
5054 		mpt_prt(mpt, "target assist completion\n");
5055 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5056 		mpt_free_request(mpt, req);
5057 		break;
5058 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5059 #ifdef	INVARIANTS
5060 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5061 #endif
5062 		mpt_prt(mpt, "status send completion\n");
5063 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5064 		mpt_free_request(mpt, req);
5065 		break;
5066 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5067 	{
5068 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5069 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5070 		PTR_MSG_TARGET_MODE_ABORT abtp =
5071 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5072 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5073 #ifdef	INVARIANTS
5074 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5075 #endif
5076 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5077 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5078 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5079 		mpt_free_request(mpt, req);
5080 		break;
5081 	}
5082 	default:
5083 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5084 		    "0x%x\n", reply_frame->Function);
5085 		break;
5086 	}
5087 	return (TRUE);
5088 }
5089