xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
108 #if __FreeBSD_version >= 500000
109 #include <sys/sysctl.h>
110 #endif
111 #include <sys/callout.h>
112 #include <sys/kthread.h>
113 
114 #if __FreeBSD_version >= 700025
115 #ifndef	CAM_NEW_TRAN_CODE
116 #define	CAM_NEW_TRAN_CODE	1
117 #endif
118 #endif
119 
120 static void mpt_poll(struct cam_sim *);
121 static timeout_t mpt_timeout;
122 static void mpt_action(struct cam_sim *, union ccb *);
123 static int
124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
125 static void mpt_setwidth(struct mpt_softc *, int, int);
126 static void mpt_setsync(struct mpt_softc *, int, int, int);
127 static int mpt_update_spi_config(struct mpt_softc *, int);
128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
129 
130 static mpt_reply_handler_t mpt_scsi_reply_handler;
131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
132 static mpt_reply_handler_t mpt_fc_els_reply_handler;
133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
134 					MSG_DEFAULT_REPLY *);
135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
136 static int mpt_fc_reset_link(struct mpt_softc *, int);
137 
138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
140 static void mpt_recovery_thread(void *arg);
141 static void mpt_recover_commands(struct mpt_softc *mpt);
142 
143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
144     u_int, u_int, u_int, int);
145 
146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
148 static int mpt_add_els_buffers(struct mpt_softc *mpt);
149 static int mpt_add_target_commands(struct mpt_softc *mpt);
150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
152 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
156     uint8_t, uint8_t const *);
157 static void
158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
159     tgt_resource_t *, int);
160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
163 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
164 
165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
169 
170 static mpt_probe_handler_t	mpt_cam_probe;
171 static mpt_attach_handler_t	mpt_cam_attach;
172 static mpt_enable_handler_t	mpt_cam_enable;
173 static mpt_ready_handler_t	mpt_cam_ready;
174 static mpt_event_handler_t	mpt_cam_event;
175 static mpt_reset_handler_t	mpt_cam_ioc_reset;
176 static mpt_detach_handler_t	mpt_cam_detach;
177 
178 static struct mpt_personality mpt_cam_personality =
179 {
180 	.name		= "mpt_cam",
181 	.probe		= mpt_cam_probe,
182 	.attach		= mpt_cam_attach,
183 	.enable		= mpt_cam_enable,
184 	.ready		= mpt_cam_ready,
185 	.event		= mpt_cam_event,
186 	.reset		= mpt_cam_ioc_reset,
187 	.detach		= mpt_cam_detach,
188 };
189 
190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
192 
193 int mpt_enable_sata_wc = -1;
194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
195 
196 int
197 mpt_cam_probe(struct mpt_softc *mpt)
198 {
199 	int role;
200 
201 	/*
202 	 * Only attach to nodes that support the initiator or target role
203 	 * (or want to) or have RAID physical devices that need CAM pass-thru
204 	 * support.
205 	 */
206 	if (mpt->do_cfg_role) {
207 		role = mpt->cfg_role;
208 	} else {
209 		role = mpt->role;
210 	}
211 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
212 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
213 		return (0);
214 	}
215 	return (ENODEV);
216 }
217 
218 int
219 mpt_cam_attach(struct mpt_softc *mpt)
220 {
221 	struct cam_devq *devq;
222 	mpt_handler_t	 handler;
223 	int		 maxq;
224 	int		 error;
225 
226 	MPT_LOCK(mpt);
227 	TAILQ_INIT(&mpt->request_timeout_list);
228 	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
229 	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
230 
231 	handler.reply_handler = mpt_scsi_reply_handler;
232 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 				     &scsi_io_handler_id);
234 	if (error != 0) {
235 		MPT_UNLOCK(mpt);
236 		goto cleanup;
237 	}
238 
239 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
240 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
241 				     &scsi_tmf_handler_id);
242 	if (error != 0) {
243 		MPT_UNLOCK(mpt);
244 		goto cleanup;
245 	}
246 
247 	/*
248 	 * If we're fibre channel and could support target mode, we register
249 	 * an ELS reply handler and give it resources.
250 	 */
251 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
252 		handler.reply_handler = mpt_fc_els_reply_handler;
253 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
254 		    &fc_els_handler_id);
255 		if (error != 0) {
256 			MPT_UNLOCK(mpt);
257 			goto cleanup;
258 		}
259 		if (mpt_add_els_buffers(mpt) == FALSE) {
260 			error = ENOMEM;
261 			MPT_UNLOCK(mpt);
262 			goto cleanup;
263 		}
264 		maxq -= mpt->els_cmds_allocated;
265 	}
266 
267 	/*
268 	 * If we support target mode, we register a reply handler for it,
269 	 * but don't add command resources until we actually enable target
270 	 * mode.
271 	 */
272 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
273 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
274 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
275 		    &mpt->scsi_tgt_handler_id);
276 		if (error != 0) {
277 			MPT_UNLOCK(mpt);
278 			goto cleanup;
279 		}
280 	}
281 
282 	if (mpt->is_sas) {
283 		handler.reply_handler = mpt_sata_pass_reply_handler;
284 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
285 		    &sata_pass_handler_id);
286 		if (error != 0) {
287 			MPT_UNLOCK(mpt);
288 			goto cleanup;
289 		}
290 	}
291 
292 	/*
293 	 * We keep one request reserved for timeout TMF requests.
294 	 */
295 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
296 	if (mpt->tmf_req == NULL) {
297 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
298 		error = ENOMEM;
299 		MPT_UNLOCK(mpt);
300 		goto cleanup;
301 	}
302 
303 	/*
304 	 * Mark the request as free even though not on the free list.
305 	 * There is only one TMF request allowed to be outstanding at
306 	 * a time and the TMF routines perform their own allocation
307 	 * tracking using the standard state flags.
308 	 */
309 	mpt->tmf_req->state = REQ_STATE_FREE;
310 	maxq--;
311 
312 	/*
313 	 * The rest of this is CAM foo, for which we need to drop our lock
314 	 */
315 	MPT_UNLOCK(mpt);
316 
317 	if (mpt_spawn_recovery_thread(mpt) != 0) {
318 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
319 		error = ENOMEM;
320 		goto cleanup;
321 	}
322 
323 	/*
324 	 * Create the device queue for our SIM(s).
325 	 */
326 	devq = cam_simq_alloc(maxq);
327 	if (devq == NULL) {
328 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
329 		error = ENOMEM;
330 		goto cleanup;
331 	}
332 
333 	/*
334 	 * Construct our SIM entry.
335 	 */
336 	mpt->sim =
337 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
338 	if (mpt->sim == NULL) {
339 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
340 		cam_simq_free(devq);
341 		error = ENOMEM;
342 		goto cleanup;
343 	}
344 
345 	/*
346 	 * Register exactly this bus.
347 	 */
348 	MPT_LOCK(mpt);
349 	if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
350 		mpt_prt(mpt, "Bus registration Failed!\n");
351 		error = ENOMEM;
352 		MPT_UNLOCK(mpt);
353 		goto cleanup;
354 	}
355 
356 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
357 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
358 		mpt_prt(mpt, "Unable to allocate Path!\n");
359 		error = ENOMEM;
360 		MPT_UNLOCK(mpt);
361 		goto cleanup;
362 	}
363 	MPT_UNLOCK(mpt);
364 
365 	/*
366 	 * Only register a second bus for RAID physical
367 	 * devices if the controller supports RAID.
368 	 */
369 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
370 		return (0);
371 	}
372 
373 	/*
374 	 * Create a "bus" to export all hidden disks to CAM.
375 	 */
376 	mpt->phydisk_sim =
377 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
378 	if (mpt->phydisk_sim == NULL) {
379 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
380 		error = ENOMEM;
381 		goto cleanup;
382 	}
383 
384 	/*
385 	 * Register this bus.
386 	 */
387 	MPT_LOCK(mpt);
388 	if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
389 	    CAM_SUCCESS) {
390 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
391 		error = ENOMEM;
392 		MPT_UNLOCK(mpt);
393 		goto cleanup;
394 	}
395 
396 	if (xpt_create_path(&mpt->phydisk_path, NULL,
397 	    cam_sim_path(mpt->phydisk_sim),
398 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
399 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
400 		error = ENOMEM;
401 		MPT_UNLOCK(mpt);
402 		goto cleanup;
403 	}
404 	MPT_UNLOCK(mpt);
405 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
406 	return (0);
407 
408 cleanup:
409 	mpt_cam_detach(mpt);
410 	return (error);
411 }
412 
413 /*
414  * Read FC configuration information
415  */
416 static int
417 mpt_read_config_info_fc(struct mpt_softc *mpt)
418 {
419 	char *topology = NULL;
420 	int rv;
421 
422 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
423 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
424 	if (rv) {
425 		return (-1);
426 	}
427 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
428 		 mpt->mpt_fcport_page0.Header.PageVersion,
429 		 mpt->mpt_fcport_page0.Header.PageLength,
430 		 mpt->mpt_fcport_page0.Header.PageNumber,
431 		 mpt->mpt_fcport_page0.Header.PageType);
432 
433 
434 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
435 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
436 	if (rv) {
437 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
438 		return (-1);
439 	}
440 	mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
441 
442 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
443 
444 	switch (mpt->mpt_fcport_page0.Flags &
445 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
446 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
447 		mpt->mpt_fcport_speed = 0;
448 		topology = "<NO LOOP>";
449 		break;
450 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
451 		topology = "N-Port";
452 		break;
453 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
454 		topology = "NL-Port";
455 		break;
456 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
457 		topology = "F-Port";
458 		break;
459 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
460 		topology = "FL-Port";
461 		break;
462 	default:
463 		mpt->mpt_fcport_speed = 0;
464 		topology = "?";
465 		break;
466 	}
467 
468 	mpt_lprt(mpt, MPT_PRT_INFO,
469 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
470 	    "Speed %u-Gbit\n", topology,
471 	    mpt->mpt_fcport_page0.WWNN.High,
472 	    mpt->mpt_fcport_page0.WWNN.Low,
473 	    mpt->mpt_fcport_page0.WWPN.High,
474 	    mpt->mpt_fcport_page0.WWPN.Low,
475 	    mpt->mpt_fcport_speed);
476 #if __FreeBSD_version >= 500000
477 	MPT_UNLOCK(mpt);
478 	{
479 		struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
480 		struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
481 
482 		snprintf(mpt->scinfo.fc.wwnn,
483 		    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
484 		    mpt->mpt_fcport_page0.WWNN.High,
485 		    mpt->mpt_fcport_page0.WWNN.Low);
486 
487 		snprintf(mpt->scinfo.fc.wwpn,
488 		    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
489 		    mpt->mpt_fcport_page0.WWPN.High,
490 		    mpt->mpt_fcport_page0.WWPN.Low);
491 
492 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
493 		       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
494 		       "World Wide Node Name");
495 
496 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
497 		       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
498 		       "World Wide Port Name");
499 
500 	}
501 	MPT_LOCK(mpt);
502 #endif
503 	return (0);
504 }
505 
506 /*
507  * Set FC configuration information.
508  */
509 static int
510 mpt_set_initial_config_fc(struct mpt_softc *mpt)
511 {
512 
513 	CONFIG_PAGE_FC_PORT_1 fc;
514 	U32 fl;
515 	int r, doit = 0;
516 	int role;
517 
518 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
519 	    &fc.Header, FALSE, 5000);
520 	if (r) {
521 		mpt_prt(mpt, "failed to read FC page 1 header\n");
522 		return (mpt_fc_reset_link(mpt, 1));
523 	}
524 
525 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
526 	    &fc.Header, sizeof (fc), FALSE, 5000);
527 	if (r) {
528 		mpt_prt(mpt, "failed to read FC page 1\n");
529 		return (mpt_fc_reset_link(mpt, 1));
530 	}
531 	mpt2host_config_page_fc_port_1(&fc);
532 
533 	/*
534 	 * Check our flags to make sure we support the role we want.
535 	 */
536 	doit = 0;
537 	role = 0;
538 	fl = fc.Flags;
539 
540 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
541 		role |= MPT_ROLE_INITIATOR;
542 	}
543 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
544 		role |= MPT_ROLE_TARGET;
545 	}
546 
547 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
548 
549 	if (mpt->do_cfg_role == 0) {
550 		role = mpt->cfg_role;
551 	} else {
552 		mpt->do_cfg_role = 0;
553 	}
554 
555 	if (role != mpt->cfg_role) {
556 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
557 			if ((role & MPT_ROLE_INITIATOR) == 0) {
558 				mpt_prt(mpt, "adding initiator role\n");
559 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
560 				doit++;
561 			} else {
562 				mpt_prt(mpt, "keeping initiator role\n");
563 			}
564 		} else if (role & MPT_ROLE_INITIATOR) {
565 			mpt_prt(mpt, "removing initiator role\n");
566 			doit++;
567 		}
568 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
569 			if ((role & MPT_ROLE_TARGET) == 0) {
570 				mpt_prt(mpt, "adding target role\n");
571 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
572 				doit++;
573 			} else {
574 				mpt_prt(mpt, "keeping target role\n");
575 			}
576 		} else if (role & MPT_ROLE_TARGET) {
577 			mpt_prt(mpt, "removing target role\n");
578 			doit++;
579 		}
580 		mpt->role = mpt->cfg_role;
581 	}
582 
583 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
584 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
585 			mpt_prt(mpt, "adding OXID option\n");
586 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
587 			doit++;
588 		}
589 	}
590 
591 	if (doit) {
592 		fc.Flags = fl;
593 		host2mpt_config_page_fc_port_1(&fc);
594 		r = mpt_write_cfg_page(mpt,
595 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
596 		    sizeof(fc), FALSE, 5000);
597 		if (r != 0) {
598 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
599 			return (0);
600 		}
601 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
602 		    "effect until next reboot or IOC reset\n");
603 	}
604 	return (0);
605 }
606 
607 static int
608 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
609 {
610 	ConfigExtendedPageHeader_t hdr;
611 	struct mptsas_phyinfo *phyinfo;
612 	SasIOUnitPage0_t *buffer;
613 	int error, len, i;
614 
615 	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
616 				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
617 				       &hdr, 0, 10000);
618 	if (error)
619 		goto out;
620 	if (hdr.ExtPageLength == 0) {
621 		error = ENXIO;
622 		goto out;
623 	}
624 
625 	len = hdr.ExtPageLength * 4;
626 	buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
627 	if (buffer == NULL) {
628 		error = ENOMEM;
629 		goto out;
630 	}
631 
632 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
633 				     0, &hdr, buffer, len, 0, 10000);
634 	if (error) {
635 		free(buffer, M_DEVBUF);
636 		goto out;
637 	}
638 
639 	portinfo->num_phys = buffer->NumPhys;
640 	portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
641 	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
642 	if (portinfo->phy_info == NULL) {
643 		free(buffer, M_DEVBUF);
644 		error = ENOMEM;
645 		goto out;
646 	}
647 
648 	for (i = 0; i < portinfo->num_phys; i++) {
649 		phyinfo = &portinfo->phy_info[i];
650 		phyinfo->phy_num = i;
651 		phyinfo->port_id = buffer->PhyData[i].Port;
652 		phyinfo->negotiated_link_rate =
653 		    buffer->PhyData[i].NegotiatedLinkRate;
654 		phyinfo->handle =
655 		    le16toh(buffer->PhyData[i].ControllerDevHandle);
656 	}
657 
658 	free(buffer, M_DEVBUF);
659 out:
660 	return (error);
661 }
662 
663 static int
664 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
665 	uint32_t form, uint32_t form_specific)
666 {
667 	ConfigExtendedPageHeader_t hdr;
668 	SasPhyPage0_t *buffer;
669 	int error;
670 
671 	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
672 				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
673 				       0, 10000);
674 	if (error)
675 		goto out;
676 	if (hdr.ExtPageLength == 0) {
677 		error = ENXIO;
678 		goto out;
679 	}
680 
681 	buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
682 	if (buffer == NULL) {
683 		error = ENOMEM;
684 		goto out;
685 	}
686 
687 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
688 				     form + form_specific, &hdr, buffer,
689 				     sizeof(SasPhyPage0_t), 0, 10000);
690 	if (error) {
691 		free(buffer, M_DEVBUF);
692 		goto out;
693 	}
694 
695 	phy_info->hw_link_rate = buffer->HwLinkRate;
696 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
697 	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
698 	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
699 
700 	free(buffer, M_DEVBUF);
701 out:
702 	return (error);
703 }
704 
705 static int
706 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
707 	uint32_t form, uint32_t form_specific)
708 {
709 	ConfigExtendedPageHeader_t hdr;
710 	SasDevicePage0_t *buffer;
711 	uint64_t sas_address;
712 	int error = 0;
713 
714 	bzero(device_info, sizeof(*device_info));
715 	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
716 				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
717 				       &hdr, 0, 10000);
718 	if (error)
719 		goto out;
720 	if (hdr.ExtPageLength == 0) {
721 		error = ENXIO;
722 		goto out;
723 	}
724 
725 	buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
726 	if (buffer == NULL) {
727 		error = ENOMEM;
728 		goto out;
729 	}
730 
731 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
732 				     form + form_specific, &hdr, buffer,
733 				     sizeof(SasDevicePage0_t), 0, 10000);
734 	if (error) {
735 		free(buffer, M_DEVBUF);
736 		goto out;
737 	}
738 
739 	device_info->dev_handle = le16toh(buffer->DevHandle);
740 	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
741 	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
742 	device_info->slot = le16toh(buffer->Slot);
743 	device_info->phy_num = buffer->PhyNum;
744 	device_info->physical_port = buffer->PhysicalPort;
745 	device_info->target_id = buffer->TargetID;
746 	device_info->bus = buffer->Bus;
747 	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
748 	device_info->sas_address = le64toh(sas_address);
749 	device_info->device_info = le32toh(buffer->DeviceInfo);
750 
751 	free(buffer, M_DEVBUF);
752 out:
753 	return (error);
754 }
755 
756 /*
757  * Read SAS configuration information. Nothing to do yet.
758  */
759 static int
760 mpt_read_config_info_sas(struct mpt_softc *mpt)
761 {
762 	struct mptsas_portinfo *portinfo;
763 	struct mptsas_phyinfo *phyinfo;
764 	int error, i;
765 
766 	portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
767 	if (portinfo == NULL)
768 		return (ENOMEM);
769 
770 	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
771 	if (error) {
772 		free(portinfo, M_DEVBUF);
773 		return (0);
774 	}
775 
776 	for (i = 0; i < portinfo->num_phys; i++) {
777 		phyinfo = &portinfo->phy_info[i];
778 		error = mptsas_sas_phy_pg0(mpt, phyinfo,
779 		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
780 		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
781 		if (error)
782 			break;
783 		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
784 		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
785 		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
786 		    phyinfo->handle);
787 		if (error)
788 			break;
789 		phyinfo->identify.phy_num = phyinfo->phy_num = i;
790 		if (phyinfo->attached.dev_handle)
791 			error = mptsas_sas_device_pg0(mpt,
792 			    &phyinfo->attached,
793 			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
794 			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
795 			    phyinfo->attached.dev_handle);
796 		if (error)
797 			break;
798 	}
799 	mpt->sas_portinfo = portinfo;
800 	return (0);
801 }
802 
803 static void
804 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
805 	int enabled)
806 {
807 	SataPassthroughRequest_t	*pass;
808 	request_t *req;
809 	int error, status;
810 
811 	req = mpt_get_request(mpt, 0);
812 	if (req == NULL)
813 		return;
814 
815 	pass = req->req_vbuf;
816 	bzero(pass, sizeof(SataPassthroughRequest_t));
817 	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
818 	pass->TargetID = devinfo->target_id;
819 	pass->Bus = devinfo->bus;
820 	pass->PassthroughFlags = 0;
821 	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
822 	pass->DataLength = 0;
823 	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
824 	pass->CommandFIS[0] = 0x27;
825 	pass->CommandFIS[1] = 0x80;
826 	pass->CommandFIS[2] = 0xef;
827 	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
828 	pass->CommandFIS[7] = 0x40;
829 	pass->CommandFIS[15] = 0x08;
830 
831 	mpt_check_doorbell(mpt);
832 	mpt_send_cmd(mpt, req);
833 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
834 			     10 * 1000);
835 	if (error) {
836 		mpt_free_request(mpt, req);
837 		printf("error %d sending passthrough\n", error);
838 		return;
839 	}
840 
841 	status = le16toh(req->IOCStatus);
842 	if (status != MPI_IOCSTATUS_SUCCESS) {
843 		mpt_free_request(mpt, req);
844 		printf("IOCSTATUS %d\n", status);
845 		return;
846 	}
847 
848 	mpt_free_request(mpt, req);
849 }
850 
851 /*
852  * Set SAS configuration information. Nothing to do yet.
853  */
854 static int
855 mpt_set_initial_config_sas(struct mpt_softc *mpt)
856 {
857 	struct mptsas_phyinfo *phyinfo;
858 	int i;
859 
860 	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
861 		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
862 			phyinfo = &mpt->sas_portinfo->phy_info[i];
863 			if (phyinfo->attached.dev_handle == 0)
864 				continue;
865 			if ((phyinfo->attached.device_info &
866 			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
867 				continue;
868 			if (bootverbose)
869 				device_printf(mpt->dev,
870 				    "%sabling SATA WC on phy %d\n",
871 				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
872 			mptsas_set_sata_wc(mpt, &phyinfo->attached,
873 					   mpt_enable_sata_wc);
874 		}
875 	}
876 
877 	return (0);
878 }
879 
880 static int
881 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
882  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
883 {
884 	if (req != NULL) {
885 
886 		if (reply_frame != NULL) {
887 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
888 		}
889 		req->state &= ~REQ_STATE_QUEUED;
890 		req->state |= REQ_STATE_DONE;
891 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
892 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
893 			wakeup(req);
894 		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
895 			/*
896 			 * Whew- we can free this request (late completion)
897 			 */
898 			mpt_free_request(mpt, req);
899 		}
900 	}
901 
902 	return (TRUE);
903 }
904 
905 /*
906  * Read SCSI configuration information
907  */
908 static int
909 mpt_read_config_info_spi(struct mpt_softc *mpt)
910 {
911 	int rv, i;
912 
913 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
914 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
915 	if (rv) {
916 		return (-1);
917 	}
918 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
919 	    mpt->mpt_port_page0.Header.PageVersion,
920 	    mpt->mpt_port_page0.Header.PageLength,
921 	    mpt->mpt_port_page0.Header.PageNumber,
922 	    mpt->mpt_port_page0.Header.PageType);
923 
924 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
925 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
926 	if (rv) {
927 		return (-1);
928 	}
929 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
930 	    mpt->mpt_port_page1.Header.PageVersion,
931 	    mpt->mpt_port_page1.Header.PageLength,
932 	    mpt->mpt_port_page1.Header.PageNumber,
933 	    mpt->mpt_port_page1.Header.PageType);
934 
935 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
936 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
937 	if (rv) {
938 		return (-1);
939 	}
940 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
941 	    mpt->mpt_port_page2.Header.PageVersion,
942 	    mpt->mpt_port_page2.Header.PageLength,
943 	    mpt->mpt_port_page2.Header.PageNumber,
944 	    mpt->mpt_port_page2.Header.PageType);
945 
946 	for (i = 0; i < 16; i++) {
947 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
948 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
949 		if (rv) {
950 			return (-1);
951 		}
952 		mpt_lprt(mpt, MPT_PRT_DEBUG,
953 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
954 		    mpt->mpt_dev_page0[i].Header.PageVersion,
955 		    mpt->mpt_dev_page0[i].Header.PageLength,
956 		    mpt->mpt_dev_page0[i].Header.PageNumber,
957 		    mpt->mpt_dev_page0[i].Header.PageType);
958 
959 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
960 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
961 		if (rv) {
962 			return (-1);
963 		}
964 		mpt_lprt(mpt, MPT_PRT_DEBUG,
965 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
966 		    mpt->mpt_dev_page1[i].Header.PageVersion,
967 		    mpt->mpt_dev_page1[i].Header.PageLength,
968 		    mpt->mpt_dev_page1[i].Header.PageNumber,
969 		    mpt->mpt_dev_page1[i].Header.PageType);
970 	}
971 
972 	/*
973 	 * At this point, we don't *have* to fail. As long as we have
974 	 * valid config header information, we can (barely) lurch
975 	 * along.
976 	 */
977 
978 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
979 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
980 	if (rv) {
981 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
982 	} else {
983 		mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
984 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
985 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
986 		    mpt->mpt_port_page0.Capabilities,
987 		    mpt->mpt_port_page0.PhysicalInterface);
988 	}
989 
990 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
991 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
992 	if (rv) {
993 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
994 	} else {
995 		mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
996 		mpt_lprt(mpt, MPT_PRT_DEBUG,
997 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
998 		    mpt->mpt_port_page1.Configuration,
999 		    mpt->mpt_port_page1.OnBusTimerValue);
1000 	}
1001 
1002 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1003 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
1004 	if (rv) {
1005 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1006 	} else {
1007 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1008 		    "Port Page 2: Flags %x Settings %x\n",
1009 		    mpt->mpt_port_page2.PortFlags,
1010 		    mpt->mpt_port_page2.PortSettings);
1011 		mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1012 		for (i = 0; i < 16; i++) {
1013 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1014 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1015 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1016 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1017 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1018 		}
1019 	}
1020 
1021 	for (i = 0; i < 16; i++) {
1022 		rv = mpt_read_cur_cfg_page(mpt, i,
1023 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1024 		    FALSE, 5000);
1025 		if (rv) {
1026 			mpt_prt(mpt,
1027 			    "cannot read SPI Target %d Device Page 0\n", i);
1028 			continue;
1029 		}
1030 		mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1031 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1032 		    "target %d page 0: Negotiated Params %x Information %x\n",
1033 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1034 		    mpt->mpt_dev_page0[i].Information);
1035 
1036 		rv = mpt_read_cur_cfg_page(mpt, i,
1037 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1038 		    FALSE, 5000);
1039 		if (rv) {
1040 			mpt_prt(mpt,
1041 			    "cannot read SPI Target %d Device Page 1\n", i);
1042 			continue;
1043 		}
1044 		mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1045 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1046 		    "target %d page 1: Requested Params %x Configuration %x\n",
1047 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
1048 		    mpt->mpt_dev_page1[i].Configuration);
1049 	}
1050 	return (0);
1051 }
1052 
1053 /*
1054  * Validate SPI configuration information.
1055  *
1056  * In particular, validate SPI Port Page 1.
1057  */
1058 static int
1059 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1060 {
1061 	int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
1062 	int error;
1063 
1064 	mpt->mpt_disc_enable = 0xff;
1065 	mpt->mpt_tag_enable = 0;
1066 
1067 	if (mpt->mpt_port_page1.Configuration != pp1val) {
1068 		CONFIG_PAGE_SCSI_PORT_1 tmp;
1069 
1070 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1071 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1072 		tmp = mpt->mpt_port_page1;
1073 		tmp.Configuration = pp1val;
1074 		host2mpt_config_page_scsi_port_1(&tmp);
1075 		error = mpt_write_cur_cfg_page(mpt, 0,
1076 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1077 		if (error) {
1078 			return (-1);
1079 		}
1080 		error = mpt_read_cur_cfg_page(mpt, 0,
1081 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1082 		if (error) {
1083 			return (-1);
1084 		}
1085 		mpt2host_config_page_scsi_port_1(&tmp);
1086 		if (tmp.Configuration != pp1val) {
1087 			mpt_prt(mpt,
1088 			    "failed to reset SPI Port Page 1 Config value\n");
1089 			return (-1);
1090 		}
1091 		mpt->mpt_port_page1 = tmp;
1092 	}
1093 
1094 	/*
1095 	 * The purpose of this exercise is to get
1096 	 * all targets back to async/narrow.
1097 	 *
1098 	 * We skip this step if the BIOS has already negotiated
1099 	 * speeds with the targets.
1100 	 */
1101 	i = mpt->mpt_port_page2.PortSettings &
1102 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1103 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1104 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1105 		    "honoring BIOS transfer negotiations\n");
1106 	} else {
1107 		for (i = 0; i < 16; i++) {
1108 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
1109 			mpt->mpt_dev_page1[i].Configuration = 0;
1110 			(void) mpt_update_spi_config(mpt, i);
1111 		}
1112 	}
1113 	return (0);
1114 }
1115 
1116 int
1117 mpt_cam_enable(struct mpt_softc *mpt)
1118 {
1119 	int error;
1120 
1121 	MPT_LOCK(mpt);
1122 
1123 	error = EIO;
1124 	if (mpt->is_fc) {
1125 		if (mpt_read_config_info_fc(mpt)) {
1126 			goto out;
1127 		}
1128 		if (mpt_set_initial_config_fc(mpt)) {
1129 			goto out;
1130 		}
1131 	} else if (mpt->is_sas) {
1132 		if (mpt_read_config_info_sas(mpt)) {
1133 			goto out;
1134 		}
1135 		if (mpt_set_initial_config_sas(mpt)) {
1136 			goto out;
1137 		}
1138 	} else if (mpt->is_spi) {
1139 		if (mpt_read_config_info_spi(mpt)) {
1140 			goto out;
1141 		}
1142 		if (mpt_set_initial_config_spi(mpt)) {
1143 			goto out;
1144 		}
1145 	}
1146 	error = 0;
1147 
1148 out:
1149 	MPT_UNLOCK(mpt);
1150 	return (error);
1151 }
1152 
1153 void
1154 mpt_cam_ready(struct mpt_softc *mpt)
1155 {
1156 	/*
1157 	 * If we're in target mode, hang out resources now
1158 	 * so we don't cause the world to hang talking to us.
1159 	 */
1160 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1161 		/*
1162 		 * Try to add some target command resources
1163 		 */
1164 		MPT_LOCK(mpt);
1165 		if (mpt_add_target_commands(mpt) == FALSE) {
1166 			mpt_prt(mpt, "failed to add target commands\n");
1167 		}
1168 		MPT_UNLOCK(mpt);
1169 	}
1170 	mpt->ready = 1;
1171 }
1172 
1173 void
1174 mpt_cam_detach(struct mpt_softc *mpt)
1175 {
1176 	mpt_handler_t handler;
1177 
1178 	MPT_LOCK(mpt);
1179 	mpt->ready = 0;
1180 	mpt_terminate_recovery_thread(mpt);
1181 
1182 	handler.reply_handler = mpt_scsi_reply_handler;
1183 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1184 			       scsi_io_handler_id);
1185 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
1186 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1187 			       scsi_tmf_handler_id);
1188 	handler.reply_handler = mpt_fc_els_reply_handler;
1189 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1190 			       fc_els_handler_id);
1191 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
1192 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1193 			       mpt->scsi_tgt_handler_id);
1194 	handler.reply_handler = mpt_sata_pass_reply_handler;
1195 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1196 			       sata_pass_handler_id);
1197 
1198 	if (mpt->tmf_req != NULL) {
1199 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1200 		mpt_free_request(mpt, mpt->tmf_req);
1201 		mpt->tmf_req = NULL;
1202 	}
1203 	if (mpt->sas_portinfo != NULL) {
1204 		free(mpt->sas_portinfo, M_DEVBUF);
1205 		mpt->sas_portinfo = NULL;
1206 	}
1207 	MPT_UNLOCK(mpt);
1208 
1209 	if (mpt->sim != NULL) {
1210 		xpt_free_path(mpt->path);
1211 		xpt_bus_deregister(cam_sim_path(mpt->sim));
1212 		cam_sim_free(mpt->sim, TRUE);
1213 		mpt->sim = NULL;
1214 	}
1215 
1216 	if (mpt->phydisk_sim != NULL) {
1217 		xpt_free_path(mpt->phydisk_path);
1218 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1219 		cam_sim_free(mpt->phydisk_sim, TRUE);
1220 		mpt->phydisk_sim = NULL;
1221 	}
1222 }
1223 
1224 /* This routine is used after a system crash to dump core onto the swap device.
1225  */
1226 static void
1227 mpt_poll(struct cam_sim *sim)
1228 {
1229 	struct mpt_softc *mpt;
1230 
1231 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1232 	mpt_intr(mpt);
1233 }
1234 
1235 /*
1236  * Watchdog timeout routine for SCSI requests.
1237  */
1238 static void
1239 mpt_timeout(void *arg)
1240 {
1241 	union ccb	 *ccb;
1242 	struct mpt_softc *mpt;
1243 	request_t	 *req;
1244 
1245 	ccb = (union ccb *)arg;
1246 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1247 
1248 #if __FreeBSD_version < 500000
1249 	MPT_LOCK(mpt);
1250 #endif
1251 	MPT_LOCK_ASSERT(mpt);
1252 	req = ccb->ccb_h.ccb_req_ptr;
1253 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1254 	    req->serno, ccb, req->ccb);
1255 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1256 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1257 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1258 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1259 		req->state |= REQ_STATE_TIMEDOUT;
1260 		mpt_wakeup_recovery_thread(mpt);
1261 	}
1262 #if __FreeBSD_version < 500000
1263 	MPT_UNLOCK(mpt);
1264 #endif
1265 }
1266 
1267 /*
1268  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1269  *
1270  * Takes a list of physical segments and builds the SGL for SCSI IO command
1271  * and forwards the commard to the IOC after one last check that CAM has not
1272  * aborted the transaction.
1273  */
1274 static void
1275 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1276 {
1277 	request_t *req, *trq;
1278 	char *mpt_off;
1279 	union ccb *ccb;
1280 	struct mpt_softc *mpt;
1281 	int seg, first_lim;
1282 	uint32_t flags, nxt_off;
1283 	void *sglp = NULL;
1284 	MSG_REQUEST_HEADER *hdrp;
1285 	SGE_SIMPLE64 *se;
1286 	SGE_CHAIN64 *ce;
1287 	int istgt = 0;
1288 
1289 	req = (request_t *)arg;
1290 	ccb = req->ccb;
1291 
1292 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1293 	req = ccb->ccb_h.ccb_req_ptr;
1294 
1295 	hdrp = req->req_vbuf;
1296 	mpt_off = req->req_vbuf;
1297 
1298 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1299 		error = EFBIG;
1300 	}
1301 
1302 	if (error == 0) {
1303 		switch (hdrp->Function) {
1304 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1305 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1306 			istgt = 0;
1307 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1308 			break;
1309 		case MPI_FUNCTION_TARGET_ASSIST:
1310 			istgt = 1;
1311 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1312 			break;
1313 		default:
1314 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1315 			    hdrp->Function);
1316 			error = EINVAL;
1317 			break;
1318 		}
1319 	}
1320 
1321 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1322 		error = EFBIG;
1323 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1324 		    nseg, mpt->max_seg_cnt);
1325 	}
1326 
1327 bad:
1328 	if (error != 0) {
1329 		if (error != EFBIG && error != ENOMEM) {
1330 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1331 		}
1332 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1333 			cam_status status;
1334 			mpt_freeze_ccb(ccb);
1335 			if (error == EFBIG) {
1336 				status = CAM_REQ_TOO_BIG;
1337 			} else if (error == ENOMEM) {
1338 				if (mpt->outofbeer == 0) {
1339 					mpt->outofbeer = 1;
1340 					xpt_freeze_simq(mpt->sim, 1);
1341 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1342 					    "FREEZEQ\n");
1343 				}
1344 				status = CAM_REQUEUE_REQ;
1345 			} else {
1346 				status = CAM_REQ_CMP_ERR;
1347 			}
1348 			mpt_set_ccb_status(ccb, status);
1349 		}
1350 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1351 			request_t *cmd_req =
1352 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1353 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1354 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1355 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1356 		}
1357 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1358 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1359 		xpt_done(ccb);
1360 		CAMLOCK_2_MPTLOCK(mpt);
1361 		mpt_free_request(mpt, req);
1362 		MPTLOCK_2_CAMLOCK(mpt);
1363 		return;
1364 	}
1365 
1366 	/*
1367 	 * No data to transfer?
1368 	 * Just make a single simple SGL with zero length.
1369 	 */
1370 
1371 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1372 		int tidx = ((char *)sglp) - mpt_off;
1373 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1374 	}
1375 
1376 	if (nseg == 0) {
1377 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1378 		MPI_pSGE_SET_FLAGS(se1,
1379 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1380 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1381 		se1->FlagsLength = htole32(se1->FlagsLength);
1382 		goto out;
1383 	}
1384 
1385 
1386 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1387 	if (istgt == 0) {
1388 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1389 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1390 		}
1391 	} else {
1392 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1393 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1394 		}
1395 	}
1396 
1397 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1398 		bus_dmasync_op_t op;
1399 		if (istgt == 0) {
1400 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1401 				op = BUS_DMASYNC_PREREAD;
1402 			} else {
1403 				op = BUS_DMASYNC_PREWRITE;
1404 			}
1405 		} else {
1406 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1407 				op = BUS_DMASYNC_PREWRITE;
1408 			} else {
1409 				op = BUS_DMASYNC_PREREAD;
1410 			}
1411 		}
1412 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1413 	}
1414 
1415 	/*
1416 	 * Okay, fill in what we can at the end of the command frame.
1417 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1418 	 * the command frame.
1419 	 *
1420 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1421 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1422 	 * that.
1423 	 */
1424 
1425 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1426 		first_lim = nseg;
1427 	} else {
1428 		/*
1429 		 * Leave room for CHAIN element
1430 		 */
1431 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1432 	}
1433 
1434 	se = (SGE_SIMPLE64 *) sglp;
1435 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1436 		uint32_t tf;
1437 
1438 		memset(se, 0, sizeof (*se));
1439 		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1440 		if (sizeof(bus_addr_t) > 4) {
1441 			se->Address.High =
1442 			    htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1443 		}
1444 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1445 		tf = flags;
1446 		if (seg == first_lim - 1) {
1447 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1448 		}
1449 		if (seg == nseg - 1) {
1450 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1451 				MPI_SGE_FLAGS_END_OF_BUFFER;
1452 		}
1453 		MPI_pSGE_SET_FLAGS(se, tf);
1454 		se->FlagsLength = htole32(se->FlagsLength);
1455 	}
1456 
1457 	if (seg == nseg) {
1458 		goto out;
1459 	}
1460 
1461 	/*
1462 	 * Tell the IOC where to find the first chain element.
1463 	 */
1464 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1465 	nxt_off = MPT_RQSL(mpt);
1466 	trq = req;
1467 
1468 	/*
1469 	 * Make up the rest of the data segments out of a chain element
1470 	 * (contiained in the current request frame) which points to
1471 	 * SIMPLE64 elements in the next request frame, possibly ending
1472 	 * with *another* chain element (if there's more).
1473 	 */
1474 	while (seg < nseg) {
1475 		int this_seg_lim;
1476 		uint32_t tf, cur_off;
1477 		bus_addr_t chain_list_addr;
1478 
1479 		/*
1480 		 * Point to the chain descriptor. Note that the chain
1481 		 * descriptor is at the end of the *previous* list (whether
1482 		 * chain or simple).
1483 		 */
1484 		ce = (SGE_CHAIN64 *) se;
1485 
1486 		/*
1487 		 * Before we change our current pointer, make  sure we won't
1488 		 * overflow the request area with this frame. Note that we
1489 		 * test against 'greater than' here as it's okay in this case
1490 		 * to have next offset be just outside the request area.
1491 		 */
1492 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1493 			nxt_off = MPT_REQUEST_AREA;
1494 			goto next_chain;
1495 		}
1496 
1497 		/*
1498 		 * Set our SGE element pointer to the beginning of the chain
1499 		 * list and update our next chain list offset.
1500 		 */
1501 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1502 		cur_off = nxt_off;
1503 		nxt_off += MPT_RQSL(mpt);
1504 
1505 		/*
1506 		 * Now initialized the chain descriptor.
1507 		 */
1508 		memset(ce, 0, sizeof (*ce));
1509 
1510 		/*
1511 		 * Get the physical address of the chain list.
1512 		 */
1513 		chain_list_addr = trq->req_pbuf;
1514 		chain_list_addr += cur_off;
1515 		if (sizeof (bus_addr_t) > 4) {
1516 			ce->Address.High =
1517 			    htole32(((uint64_t)chain_list_addr) >> 32);
1518 		}
1519 		ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1520 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1521 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1522 
1523 		/*
1524 		 * If we have more than a frame's worth of segments left,
1525 		 * set up the chain list to have the last element be another
1526 		 * chain descriptor.
1527 		 */
1528 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1529 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1530 			/*
1531 			 * The length of the chain is the length in bytes of the
1532 			 * number of segments plus the next chain element.
1533 			 *
1534 			 * The next chain descriptor offset is the length,
1535 			 * in words, of the number of segments.
1536 			 */
1537 			ce->Length = (this_seg_lim - seg) *
1538 			    sizeof (SGE_SIMPLE64);
1539 			ce->NextChainOffset = ce->Length >> 2;
1540 			ce->Length += sizeof (SGE_CHAIN64);
1541 		} else {
1542 			this_seg_lim = nseg;
1543 			ce->Length = (this_seg_lim - seg) *
1544 			    sizeof (SGE_SIMPLE64);
1545 		}
1546 		ce->Length = htole16(ce->Length);
1547 
1548 		/*
1549 		 * Fill in the chain list SGE elements with our segment data.
1550 		 *
1551 		 * If we're the last element in this chain list, set the last
1552 		 * element flag. If we're the completely last element period,
1553 		 * set the end of list and end of buffer flags.
1554 		 */
1555 		while (seg < this_seg_lim) {
1556 			memset(se, 0, sizeof (*se));
1557 			se->Address.Low = htole32(dm_segs->ds_addr &
1558 			    0xffffffff);
1559 			if (sizeof (bus_addr_t) > 4) {
1560 				se->Address.High =
1561 				    htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1562 			}
1563 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1564 			tf = flags;
1565 			if (seg ==  this_seg_lim - 1) {
1566 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1567 			}
1568 			if (seg == nseg - 1) {
1569 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1570 					MPI_SGE_FLAGS_END_OF_BUFFER;
1571 			}
1572 			MPI_pSGE_SET_FLAGS(se, tf);
1573 			se->FlagsLength = htole32(se->FlagsLength);
1574 			se++;
1575 			seg++;
1576 			dm_segs++;
1577 		}
1578 
1579     next_chain:
1580 		/*
1581 		 * If we have more segments to do and we've used up all of
1582 		 * the space in a request area, go allocate another one
1583 		 * and chain to that.
1584 		 */
1585 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1586 			request_t *nrq;
1587 
1588 			CAMLOCK_2_MPTLOCK(mpt);
1589 			nrq = mpt_get_request(mpt, FALSE);
1590 			MPTLOCK_2_CAMLOCK(mpt);
1591 
1592 			if (nrq == NULL) {
1593 				error = ENOMEM;
1594 				goto bad;
1595 			}
1596 
1597 			/*
1598 			 * Append the new request area on the tail of our list.
1599 			 */
1600 			if ((trq = req->chain) == NULL) {
1601 				req->chain = nrq;
1602 			} else {
1603 				while (trq->chain != NULL) {
1604 					trq = trq->chain;
1605 				}
1606 				trq->chain = nrq;
1607 			}
1608 			trq = nrq;
1609 			mpt_off = trq->req_vbuf;
1610 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1611 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1612 			}
1613 			nxt_off = 0;
1614 		}
1615 	}
1616 out:
1617 
1618 	/*
1619 	 * Last time we need to check if this CCB needs to be aborted.
1620 	 */
1621 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1622 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1623 			request_t *cmd_req =
1624 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1625 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1626 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1627 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1628 		}
1629 		mpt_prt(mpt,
1630 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1631 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1632 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1633 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1634 		}
1635 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1636 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1637 		xpt_done(ccb);
1638 		CAMLOCK_2_MPTLOCK(mpt);
1639 		mpt_free_request(mpt, req);
1640 		MPTLOCK_2_CAMLOCK(mpt);
1641 		return;
1642 	}
1643 
1644 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1645 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1646 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1647 		    mpt_timeout, ccb);
1648 	}
1649 	if (mpt->verbose > MPT_PRT_DEBUG) {
1650 		int nc = 0;
1651 		mpt_print_request(req->req_vbuf);
1652 		for (trq = req->chain; trq; trq = trq->chain) {
1653 			printf("  Additional Chain Area %d\n", nc++);
1654 			mpt_dump_sgl(trq->req_vbuf, 0);
1655 		}
1656 	}
1657 
1658 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1659 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1660 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1661 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1662 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1663 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1664 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1665 		} else {
1666 			tgt->state = TGT_STATE_MOVING_DATA;
1667 		}
1668 #else
1669 		tgt->state = TGT_STATE_MOVING_DATA;
1670 #endif
1671 	}
1672 	CAMLOCK_2_MPTLOCK(mpt);
1673 	mpt_send_cmd(mpt, req);
1674 	MPTLOCK_2_CAMLOCK(mpt);
1675 }
1676 
1677 static void
1678 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1679 {
1680 	request_t *req, *trq;
1681 	char *mpt_off;
1682 	union ccb *ccb;
1683 	struct mpt_softc *mpt;
1684 	int seg, first_lim;
1685 	uint32_t flags, nxt_off;
1686 	void *sglp = NULL;
1687 	MSG_REQUEST_HEADER *hdrp;
1688 	SGE_SIMPLE32 *se;
1689 	SGE_CHAIN32 *ce;
1690 	int istgt = 0;
1691 
1692 	req = (request_t *)arg;
1693 	ccb = req->ccb;
1694 
1695 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1696 	req = ccb->ccb_h.ccb_req_ptr;
1697 
1698 	hdrp = req->req_vbuf;
1699 	mpt_off = req->req_vbuf;
1700 
1701 
1702 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1703 		error = EFBIG;
1704 	}
1705 
1706 	if (error == 0) {
1707 		switch (hdrp->Function) {
1708 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1709 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1710 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1711 			break;
1712 		case MPI_FUNCTION_TARGET_ASSIST:
1713 			istgt = 1;
1714 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1715 			break;
1716 		default:
1717 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1718 			    hdrp->Function);
1719 			error = EINVAL;
1720 			break;
1721 		}
1722 	}
1723 
1724 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1725 		error = EFBIG;
1726 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1727 		    nseg, mpt->max_seg_cnt);
1728 	}
1729 
1730 bad:
1731 	if (error != 0) {
1732 		if (error != EFBIG && error != ENOMEM) {
1733 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1734 		}
1735 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1736 			cam_status status;
1737 			mpt_freeze_ccb(ccb);
1738 			if (error == EFBIG) {
1739 				status = CAM_REQ_TOO_BIG;
1740 			} else if (error == ENOMEM) {
1741 				if (mpt->outofbeer == 0) {
1742 					mpt->outofbeer = 1;
1743 					xpt_freeze_simq(mpt->sim, 1);
1744 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1745 					    "FREEZEQ\n");
1746 				}
1747 				status = CAM_REQUEUE_REQ;
1748 			} else {
1749 				status = CAM_REQ_CMP_ERR;
1750 			}
1751 			mpt_set_ccb_status(ccb, status);
1752 		}
1753 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1754 			request_t *cmd_req =
1755 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1756 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1757 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1758 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1759 		}
1760 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1761 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1762 		xpt_done(ccb);
1763 		CAMLOCK_2_MPTLOCK(mpt);
1764 		mpt_free_request(mpt, req);
1765 		MPTLOCK_2_CAMLOCK(mpt);
1766 		return;
1767 	}
1768 
1769 	/*
1770 	 * No data to transfer?
1771 	 * Just make a single simple SGL with zero length.
1772 	 */
1773 
1774 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1775 		int tidx = ((char *)sglp) - mpt_off;
1776 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1777 	}
1778 
1779 	if (nseg == 0) {
1780 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1781 		MPI_pSGE_SET_FLAGS(se1,
1782 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1783 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1784 		se1->FlagsLength = htole32(se1->FlagsLength);
1785 		goto out;
1786 	}
1787 
1788 
1789 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1790 	if (istgt == 0) {
1791 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1792 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1793 		}
1794 	} else {
1795 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1796 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1797 		}
1798 	}
1799 
1800 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1801 		bus_dmasync_op_t op;
1802 		if (istgt) {
1803 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1804 				op = BUS_DMASYNC_PREREAD;
1805 			} else {
1806 				op = BUS_DMASYNC_PREWRITE;
1807 			}
1808 		} else {
1809 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1810 				op = BUS_DMASYNC_PREWRITE;
1811 			} else {
1812 				op = BUS_DMASYNC_PREREAD;
1813 			}
1814 		}
1815 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1816 	}
1817 
1818 	/*
1819 	 * Okay, fill in what we can at the end of the command frame.
1820 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1821 	 * the command frame.
1822 	 *
1823 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1824 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1825 	 * that.
1826 	 */
1827 
1828 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1829 		first_lim = nseg;
1830 	} else {
1831 		/*
1832 		 * Leave room for CHAIN element
1833 		 */
1834 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1835 	}
1836 
1837 	se = (SGE_SIMPLE32 *) sglp;
1838 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1839 		uint32_t tf;
1840 
1841 		memset(se, 0,sizeof (*se));
1842 		se->Address = htole32(dm_segs->ds_addr);
1843 
1844 
1845 
1846 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1847 		tf = flags;
1848 		if (seg == first_lim - 1) {
1849 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1850 		}
1851 		if (seg == nseg - 1) {
1852 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1853 				MPI_SGE_FLAGS_END_OF_BUFFER;
1854 		}
1855 		MPI_pSGE_SET_FLAGS(se, tf);
1856 		se->FlagsLength = htole32(se->FlagsLength);
1857 	}
1858 
1859 	if (seg == nseg) {
1860 		goto out;
1861 	}
1862 
1863 	/*
1864 	 * Tell the IOC where to find the first chain element.
1865 	 */
1866 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1867 	nxt_off = MPT_RQSL(mpt);
1868 	trq = req;
1869 
1870 	/*
1871 	 * Make up the rest of the data segments out of a chain element
1872 	 * (contiained in the current request frame) which points to
1873 	 * SIMPLE32 elements in the next request frame, possibly ending
1874 	 * with *another* chain element (if there's more).
1875 	 */
1876 	while (seg < nseg) {
1877 		int this_seg_lim;
1878 		uint32_t tf, cur_off;
1879 		bus_addr_t chain_list_addr;
1880 
1881 		/*
1882 		 * Point to the chain descriptor. Note that the chain
1883 		 * descriptor is at the end of the *previous* list (whether
1884 		 * chain or simple).
1885 		 */
1886 		ce = (SGE_CHAIN32 *) se;
1887 
1888 		/*
1889 		 * Before we change our current pointer, make  sure we won't
1890 		 * overflow the request area with this frame. Note that we
1891 		 * test against 'greater than' here as it's okay in this case
1892 		 * to have next offset be just outside the request area.
1893 		 */
1894 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1895 			nxt_off = MPT_REQUEST_AREA;
1896 			goto next_chain;
1897 		}
1898 
1899 		/*
1900 		 * Set our SGE element pointer to the beginning of the chain
1901 		 * list and update our next chain list offset.
1902 		 */
1903 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1904 		cur_off = nxt_off;
1905 		nxt_off += MPT_RQSL(mpt);
1906 
1907 		/*
1908 		 * Now initialized the chain descriptor.
1909 		 */
1910 		memset(ce, 0, sizeof (*ce));
1911 
1912 		/*
1913 		 * Get the physical address of the chain list.
1914 		 */
1915 		chain_list_addr = trq->req_pbuf;
1916 		chain_list_addr += cur_off;
1917 
1918 
1919 
1920 		ce->Address = htole32(chain_list_addr);
1921 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1922 
1923 
1924 		/*
1925 		 * If we have more than a frame's worth of segments left,
1926 		 * set up the chain list to have the last element be another
1927 		 * chain descriptor.
1928 		 */
1929 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1930 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1931 			/*
1932 			 * The length of the chain is the length in bytes of the
1933 			 * number of segments plus the next chain element.
1934 			 *
1935 			 * The next chain descriptor offset is the length,
1936 			 * in words, of the number of segments.
1937 			 */
1938 			ce->Length = (this_seg_lim - seg) *
1939 			    sizeof (SGE_SIMPLE32);
1940 			ce->NextChainOffset = ce->Length >> 2;
1941 			ce->Length += sizeof (SGE_CHAIN32);
1942 		} else {
1943 			this_seg_lim = nseg;
1944 			ce->Length = (this_seg_lim - seg) *
1945 			    sizeof (SGE_SIMPLE32);
1946 		}
1947 		ce->Length = htole16(ce->Length);
1948 
1949 		/*
1950 		 * Fill in the chain list SGE elements with our segment data.
1951 		 *
1952 		 * If we're the last element in this chain list, set the last
1953 		 * element flag. If we're the completely last element period,
1954 		 * set the end of list and end of buffer flags.
1955 		 */
1956 		while (seg < this_seg_lim) {
1957 			memset(se, 0, sizeof (*se));
1958 			se->Address = htole32(dm_segs->ds_addr);
1959 
1960 
1961 
1962 
1963 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1964 			tf = flags;
1965 			if (seg ==  this_seg_lim - 1) {
1966 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1967 			}
1968 			if (seg == nseg - 1) {
1969 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1970 					MPI_SGE_FLAGS_END_OF_BUFFER;
1971 			}
1972 			MPI_pSGE_SET_FLAGS(se, tf);
1973 			se->FlagsLength = htole32(se->FlagsLength);
1974 			se++;
1975 			seg++;
1976 			dm_segs++;
1977 		}
1978 
1979     next_chain:
1980 		/*
1981 		 * If we have more segments to do and we've used up all of
1982 		 * the space in a request area, go allocate another one
1983 		 * and chain to that.
1984 		 */
1985 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1986 			request_t *nrq;
1987 
1988 			CAMLOCK_2_MPTLOCK(mpt);
1989 			nrq = mpt_get_request(mpt, FALSE);
1990 			MPTLOCK_2_CAMLOCK(mpt);
1991 
1992 			if (nrq == NULL) {
1993 				error = ENOMEM;
1994 				goto bad;
1995 			}
1996 
1997 			/*
1998 			 * Append the new request area on the tail of our list.
1999 			 */
2000 			if ((trq = req->chain) == NULL) {
2001 				req->chain = nrq;
2002 			} else {
2003 				while (trq->chain != NULL) {
2004 					trq = trq->chain;
2005 				}
2006 				trq->chain = nrq;
2007 			}
2008 			trq = nrq;
2009 			mpt_off = trq->req_vbuf;
2010 			if (mpt->verbose >= MPT_PRT_DEBUG) {
2011 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2012 			}
2013 			nxt_off = 0;
2014 		}
2015 	}
2016 out:
2017 
2018 	/*
2019 	 * Last time we need to check if this CCB needs to be aborted.
2020 	 */
2021 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2022 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2023 			request_t *cmd_req =
2024 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2025 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2026 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2027 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2028 		}
2029 		mpt_prt(mpt,
2030 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2031 		    ccb->ccb_h.status & CAM_STATUS_MASK);
2032 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2033 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2034 		}
2035 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2036 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2037 		xpt_done(ccb);
2038 		CAMLOCK_2_MPTLOCK(mpt);
2039 		mpt_free_request(mpt, req);
2040 		MPTLOCK_2_CAMLOCK(mpt);
2041 		return;
2042 	}
2043 
2044 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2045 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2046 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2047 		    mpt_timeout, ccb);
2048 	}
2049 	if (mpt->verbose > MPT_PRT_DEBUG) {
2050 		int nc = 0;
2051 		mpt_print_request(req->req_vbuf);
2052 		for (trq = req->chain; trq; trq = trq->chain) {
2053 			printf("  Additional Chain Area %d\n", nc++);
2054 			mpt_dump_sgl(trq->req_vbuf, 0);
2055 		}
2056 	}
2057 
2058 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2059 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2060 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2061 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
2062 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2063 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2064 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2065 		} else {
2066 			tgt->state = TGT_STATE_MOVING_DATA;
2067 		}
2068 #else
2069 		tgt->state = TGT_STATE_MOVING_DATA;
2070 #endif
2071 	}
2072 	CAMLOCK_2_MPTLOCK(mpt);
2073 	mpt_send_cmd(mpt, req);
2074 	MPTLOCK_2_CAMLOCK(mpt);
2075 }
2076 
2077 static void
2078 mpt_start(struct cam_sim *sim, union ccb *ccb)
2079 {
2080 	request_t *req;
2081 	struct mpt_softc *mpt;
2082 	MSG_SCSI_IO_REQUEST *mpt_req;
2083 	struct ccb_scsiio *csio = &ccb->csio;
2084 	struct ccb_hdr *ccbh = &ccb->ccb_h;
2085 	bus_dmamap_callback_t *cb;
2086 	target_id_t tgt;
2087 	int raid_passthru;
2088 
2089 	/* Get the pointer for the physical addapter */
2090 	mpt = ccb->ccb_h.ccb_mpt_ptr;
2091 	raid_passthru = (sim == mpt->phydisk_sim);
2092 
2093 	CAMLOCK_2_MPTLOCK(mpt);
2094 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2095 		if (mpt->outofbeer == 0) {
2096 			mpt->outofbeer = 1;
2097 			xpt_freeze_simq(mpt->sim, 1);
2098 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2099 		}
2100 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2101 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2102 		MPTLOCK_2_CAMLOCK(mpt);
2103 		xpt_done(ccb);
2104 		return;
2105 	}
2106 #ifdef	INVARIANTS
2107 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2108 #endif
2109 	MPTLOCK_2_CAMLOCK(mpt);
2110 
2111 	if (sizeof (bus_addr_t) > 4) {
2112 		cb = mpt_execute_req_a64;
2113 	} else {
2114 		cb = mpt_execute_req;
2115 	}
2116 
2117 	/*
2118 	 * Link the ccb and the request structure so we can find
2119 	 * the other knowing either the request or the ccb
2120 	 */
2121 	req->ccb = ccb;
2122 	ccb->ccb_h.ccb_req_ptr = req;
2123 
2124 	/* Now we build the command for the IOC */
2125 	mpt_req = req->req_vbuf;
2126 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2127 
2128 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2129 	if (raid_passthru) {
2130 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2131 		CAMLOCK_2_MPTLOCK(mpt);
2132 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2133 			MPTLOCK_2_CAMLOCK(mpt);
2134 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2135 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2136 			xpt_done(ccb);
2137 			return;
2138 		}
2139 		MPTLOCK_2_CAMLOCK(mpt);
2140 		mpt_req->Bus = 0;	/* we never set bus here */
2141 	} else {
2142 		tgt = ccb->ccb_h.target_id;
2143 		mpt_req->Bus = 0;	/* XXX */
2144 
2145 	}
2146 	mpt_req->SenseBufferLength =
2147 		(csio->sense_len < MPT_SENSE_SIZE) ?
2148 		 csio->sense_len : MPT_SENSE_SIZE;
2149 
2150 	/*
2151 	 * We use the message context to find the request structure when we
2152 	 * Get the command completion interrupt from the IOC.
2153 	 */
2154 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2155 
2156 	/* Which physical device to do the I/O on */
2157 	mpt_req->TargetID = tgt;
2158 
2159 	/* We assume a single level LUN type */
2160 	if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2161 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2162 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2163 	} else {
2164 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2165 	}
2166 
2167 	/* Set the direction of the transfer */
2168 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2169 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2170 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2171 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2172 	} else {
2173 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2174 	}
2175 
2176 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2177 		switch(ccb->csio.tag_action) {
2178 		case MSG_HEAD_OF_Q_TAG:
2179 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2180 			break;
2181 		case MSG_ACA_TASK:
2182 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2183 			break;
2184 		case MSG_ORDERED_Q_TAG:
2185 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2186 			break;
2187 		case MSG_SIMPLE_Q_TAG:
2188 		default:
2189 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2190 			break;
2191 		}
2192 	} else {
2193 		if (mpt->is_fc || mpt->is_sas) {
2194 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2195 		} else {
2196 			/* XXX No such thing for a target doing packetized. */
2197 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2198 		}
2199 	}
2200 
2201 	if (mpt->is_spi) {
2202 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2203 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2204 		}
2205 	}
2206 	mpt_req->Control = htole32(mpt_req->Control);
2207 
2208 	/* Copy the scsi command block into place */
2209 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2210 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2211 	} else {
2212 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2213 	}
2214 
2215 	mpt_req->CDBLength = csio->cdb_len;
2216 	mpt_req->DataLength = htole32(csio->dxfer_len);
2217 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2218 
2219 	/*
2220 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2221 	 */
2222 	if (mpt->verbose == MPT_PRT_DEBUG) {
2223 		U32 df;
2224 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2225 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2226 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2227 		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2228 		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2229 			mpt_prtc(mpt, "(%s %u byte%s ",
2230 			    (df == MPI_SCSIIO_CONTROL_READ)?
2231 			    "read" : "write",  csio->dxfer_len,
2232 			    (csio->dxfer_len == 1)? ")" : "s)");
2233 		}
2234 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2235 		    ccb->ccb_h.target_lun, req, req->serno);
2236 	}
2237 
2238 	/*
2239 	 * If we have any data to send with this command map it into bus space.
2240 	 */
2241 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2242 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2243 			/*
2244 			 * We've been given a pointer to a single buffer.
2245 			 */
2246 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2247 				/*
2248 				 * Virtual address that needs to translated into
2249 				 * one or more physical address ranges.
2250 				 */
2251 				int error;
2252 				int s = splsoftvm();
2253 				error = bus_dmamap_load(mpt->buffer_dmat,
2254 				    req->dmap, csio->data_ptr, csio->dxfer_len,
2255 				    cb, req, 0);
2256 				splx(s);
2257 				if (error == EINPROGRESS) {
2258 					/*
2259 					 * So as to maintain ordering,
2260 					 * freeze the controller queue
2261 					 * until our mapping is
2262 					 * returned.
2263 					 */
2264 					xpt_freeze_simq(mpt->sim, 1);
2265 					ccbh->status |= CAM_RELEASE_SIMQ;
2266 				}
2267 			} else {
2268 				/*
2269 				 * We have been given a pointer to single
2270 				 * physical buffer.
2271 				 */
2272 				struct bus_dma_segment seg;
2273 				seg.ds_addr =
2274 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
2275 				seg.ds_len = csio->dxfer_len;
2276 				(*cb)(req, &seg, 1, 0);
2277 			}
2278 		} else {
2279 			/*
2280 			 * We have been given a list of addresses.
2281 			 * This case could be easily supported but they are not
2282 			 * currently generated by the CAM subsystem so there
2283 			 * is no point in wasting the time right now.
2284 			 */
2285 			struct bus_dma_segment *segs;
2286 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2287 				(*cb)(req, NULL, 0, EFAULT);
2288 			} else {
2289 				/* Just use the segments provided */
2290 				segs = (struct bus_dma_segment *)csio->data_ptr;
2291 				(*cb)(req, segs, csio->sglist_cnt, 0);
2292 			}
2293 		}
2294 	} else {
2295 		(*cb)(req, NULL, 0, 0);
2296 	}
2297 }
2298 
2299 static int
2300 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2301     int sleep_ok)
2302 {
2303 	int   error;
2304 	uint16_t status;
2305 	uint8_t response;
2306 
2307 	error = mpt_scsi_send_tmf(mpt,
2308 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2309 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2310 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2311 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2312 	    0,	/* XXX How do I get the channel ID? */
2313 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2314 	    lun != CAM_LUN_WILDCARD ? lun : 0,
2315 	    0, sleep_ok);
2316 
2317 	if (error != 0) {
2318 		/*
2319 		 * mpt_scsi_send_tmf hard resets on failure, so no
2320 		 * need to do so here.
2321 		 */
2322 		mpt_prt(mpt,
2323 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2324 		return (EIO);
2325 	}
2326 
2327 	/* Wait for bus reset to be processed by the IOC. */
2328 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2329 	    REQ_STATE_DONE, sleep_ok, 5000);
2330 
2331 	status = le16toh(mpt->tmf_req->IOCStatus);
2332 	response = mpt->tmf_req->ResponseCode;
2333 	mpt->tmf_req->state = REQ_STATE_FREE;
2334 
2335 	if (error) {
2336 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2337 		    "Resetting controller.\n");
2338 		mpt_reset(mpt, TRUE);
2339 		return (ETIMEDOUT);
2340 	}
2341 
2342 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2343 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2344 		    "Resetting controller.\n", status);
2345 		mpt_reset(mpt, TRUE);
2346 		return (EIO);
2347 	}
2348 
2349 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2350 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2351 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2352 		    "Resetting controller.\n", response);
2353 		mpt_reset(mpt, TRUE);
2354 		return (EIO);
2355 	}
2356 	return (0);
2357 }
2358 
2359 static int
2360 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2361 {
2362 	int r = 0;
2363 	request_t *req;
2364 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2365 
2366  	req = mpt_get_request(mpt, FALSE);
2367 	if (req == NULL) {
2368 		return (ENOMEM);
2369 	}
2370 	fc = req->req_vbuf;
2371 	memset(fc, 0, sizeof(*fc));
2372 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2373 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2374 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2375 	mpt_send_cmd(mpt, req);
2376 	if (dowait) {
2377 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2378 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2379 		if (r == 0) {
2380 			mpt_free_request(mpt, req);
2381 		}
2382 	}
2383 	return (r);
2384 }
2385 
2386 static int
2387 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2388 	      MSG_EVENT_NOTIFY_REPLY *msg)
2389 {
2390 	uint32_t data0, data1;
2391 
2392 	data0 = le32toh(msg->Data[0]);
2393 	data1 = le32toh(msg->Data[1]);
2394 	switch(msg->Event & 0xFF) {
2395 	case MPI_EVENT_UNIT_ATTENTION:
2396 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2397 		    (data0 >> 8) & 0xff, data0 & 0xff);
2398 		break;
2399 
2400 	case MPI_EVENT_IOC_BUS_RESET:
2401 		/* We generated a bus reset */
2402 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2403 		    (data0 >> 8) & 0xff);
2404 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2405 		break;
2406 
2407 	case MPI_EVENT_EXT_BUS_RESET:
2408 		/* Someone else generated a bus reset */
2409 		mpt_prt(mpt, "External Bus Reset Detected\n");
2410 		/*
2411 		 * These replies don't return EventData like the MPI
2412 		 * spec says they do
2413 		 */
2414 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2415 		break;
2416 
2417 	case MPI_EVENT_RESCAN:
2418 #if __FreeBSD_version >= 600000
2419 	{
2420 		union ccb *ccb;
2421 		uint32_t pathid;
2422 		/*
2423 		 * In general this means a device has been added to the loop.
2424 		 */
2425 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2426 		if (mpt->ready == 0) {
2427 			break;
2428 		}
2429 		if (mpt->phydisk_sim) {
2430 			pathid = cam_sim_path(mpt->phydisk_sim);
2431 		} else {
2432 			pathid = cam_sim_path(mpt->sim);
2433 		}
2434 		MPTLOCK_2_CAMLOCK(mpt);
2435 		/*
2436 		 * Allocate a CCB, create a wildcard path for this bus,
2437 		 * and schedule a rescan.
2438 		 */
2439 		ccb = xpt_alloc_ccb_nowait();
2440 		if (ccb == NULL) {
2441 			mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2442 			CAMLOCK_2_MPTLOCK(mpt);
2443 			break;
2444 		}
2445 
2446 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2447 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2448 			CAMLOCK_2_MPTLOCK(mpt);
2449 			mpt_prt(mpt, "unable to create path for rescan\n");
2450 			xpt_free_ccb(ccb);
2451 			break;
2452 		}
2453 		xpt_rescan(ccb);
2454 		CAMLOCK_2_MPTLOCK(mpt);
2455 		break;
2456 	}
2457 #else
2458 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2459 		break;
2460 #endif
2461 	case MPI_EVENT_LINK_STATUS_CHANGE:
2462 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2463 		    (data1 >> 8) & 0xff,
2464 		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2465 		break;
2466 
2467 	case MPI_EVENT_LOOP_STATE_CHANGE:
2468 		switch ((data0 >> 16) & 0xff) {
2469 		case 0x01:
2470 			mpt_prt(mpt,
2471 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2472 			    "(Loop Initialization)\n",
2473 			    (data1 >> 8) & 0xff,
2474 			    (data0 >> 8) & 0xff,
2475 			    (data0     ) & 0xff);
2476 			switch ((data0 >> 8) & 0xff) {
2477 			case 0xF7:
2478 				if ((data0 & 0xff) == 0xF7) {
2479 					mpt_prt(mpt, "Device needs AL_PA\n");
2480 				} else {
2481 					mpt_prt(mpt, "Device %02x doesn't like "
2482 					    "FC performance\n",
2483 					    data0 & 0xFF);
2484 				}
2485 				break;
2486 			case 0xF8:
2487 				if ((data0 & 0xff) == 0xF7) {
2488 					mpt_prt(mpt, "Device had loop failure "
2489 					    "at its receiver prior to acquiring"
2490 					    " AL_PA\n");
2491 				} else {
2492 					mpt_prt(mpt, "Device %02x detected loop"
2493 					    " failure at its receiver\n",
2494 					    data0 & 0xFF);
2495 				}
2496 				break;
2497 			default:
2498 				mpt_prt(mpt, "Device %02x requests that device "
2499 				    "%02x reset itself\n",
2500 				    data0 & 0xFF,
2501 				    (data0 >> 8) & 0xFF);
2502 				break;
2503 			}
2504 			break;
2505 		case 0x02:
2506 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2507 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2508 			    (data1 >> 8) & 0xff, /* Port */
2509 			    (data0 >>  8) & 0xff, /* Character 3 */
2510 			    (data0      ) & 0xff  /* Character 4 */);
2511 			break;
2512 		case 0x03:
2513 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2514 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2515 			    (data1 >> 8) & 0xff, /* Port */
2516 			    (data0 >> 8) & 0xff, /* Character 3 */
2517 			    (data0     ) & 0xff  /* Character 4 */);
2518 			break;
2519 		default:
2520 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2521 			    "FC event (%02x %02x %02x)\n",
2522 			    (data1 >> 8) & 0xff, /* Port */
2523 			    (data0 >> 16) & 0xff, /* Event */
2524 			    (data0 >>  8) & 0xff, /* Character 3 */
2525 			    (data0      ) & 0xff  /* Character 4 */);
2526 		}
2527 		break;
2528 
2529 	case MPI_EVENT_LOGOUT:
2530 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2531 		    (data1 >> 8) & 0xff, data0);
2532 		break;
2533 	case MPI_EVENT_QUEUE_FULL:
2534 	{
2535 		struct cam_sim *sim;
2536 		struct cam_path *tmppath;
2537 		struct ccb_relsim crs;
2538 		PTR_EVENT_DATA_QUEUE_FULL pqf;
2539 		lun_id_t lun_id;
2540 
2541 		pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2542 		pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2543 		mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2544 		    "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2545 		if (mpt->phydisk_sim) {
2546 			sim = mpt->phydisk_sim;
2547 		} else {
2548 			sim = mpt->sim;
2549 		}
2550 		MPTLOCK_2_CAMLOCK(mpt);
2551 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2552 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2553 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2554 				mpt_prt(mpt, "unable to create a path to send "
2555 				    "XPT_REL_SIMQ");
2556 				CAMLOCK_2_MPTLOCK(mpt);
2557 				break;
2558 			}
2559 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2560 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2561 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2562 			crs.openings = pqf->CurrentDepth - 1;
2563 			xpt_action((union ccb *)&crs);
2564 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2565 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2566 			}
2567 			xpt_free_path(tmppath);
2568 		}
2569 		CAMLOCK_2_MPTLOCK(mpt);
2570 		break;
2571 	}
2572 	case MPI_EVENT_EVENT_CHANGE:
2573 	case MPI_EVENT_INTEGRATED_RAID:
2574 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2575 	case MPI_EVENT_SAS_SES:
2576 		break;
2577 	default:
2578 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2579 		    msg->Event & 0xFF);
2580 		return (0);
2581 	}
2582 	return (1);
2583 }
2584 
2585 /*
2586  * Reply path for all SCSI I/O requests, called from our
2587  * interrupt handler by extracting our handler index from
2588  * the MsgContext field of the reply from the IOC.
2589  *
2590  * This routine is optimized for the common case of a
2591  * completion without error.  All exception handling is
2592  * offloaded to non-inlined helper routines to minimize
2593  * cache footprint.
2594  */
2595 static int
2596 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2597     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2598 {
2599 	MSG_SCSI_IO_REQUEST *scsi_req;
2600 	union ccb *ccb;
2601 
2602 	if (req->state == REQ_STATE_FREE) {
2603 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2604 		return (TRUE);
2605 	}
2606 
2607 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2608 	ccb = req->ccb;
2609 	if (ccb == NULL) {
2610 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2611 		    req, req->serno);
2612 		return (TRUE);
2613 	}
2614 
2615 	mpt_req_untimeout(req, mpt_timeout, ccb);
2616 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2617 
2618 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2619 		bus_dmasync_op_t op;
2620 
2621 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2622 			op = BUS_DMASYNC_POSTREAD;
2623 		else
2624 			op = BUS_DMASYNC_POSTWRITE;
2625 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2626 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2627 	}
2628 
2629 	if (reply_frame == NULL) {
2630 		/*
2631 		 * Context only reply, completion without error status.
2632 		 */
2633 		ccb->csio.resid = 0;
2634 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2635 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2636 	} else {
2637 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2638 	}
2639 
2640 	if (mpt->outofbeer) {
2641 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2642 		mpt->outofbeer = 0;
2643 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2644 	}
2645 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2646 		struct scsi_inquiry_data *iq =
2647 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2648 		if (scsi_req->Function ==
2649 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2650 			/*
2651 			 * Fake out the device type so that only the
2652 			 * pass-thru device will attach.
2653 			 */
2654 			iq->device &= ~0x1F;
2655 			iq->device |= T_NODEVICE;
2656 		}
2657 	}
2658 	if (mpt->verbose == MPT_PRT_DEBUG) {
2659 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2660 		    req, req->serno);
2661 	}
2662 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2663 	MPTLOCK_2_CAMLOCK(mpt);
2664 	xpt_done(ccb);
2665 	CAMLOCK_2_MPTLOCK(mpt);
2666 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2667 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2668 	} else {
2669 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2670 		    req, req->serno);
2671 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2672 	}
2673 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2674 	    ("CCB req needed wakeup"));
2675 #ifdef	INVARIANTS
2676 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2677 #endif
2678 	mpt_free_request(mpt, req);
2679 	return (TRUE);
2680 }
2681 
2682 static int
2683 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2684     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2685 {
2686 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2687 
2688 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2689 #ifdef	INVARIANTS
2690 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2691 #endif
2692 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2693 	/* Record IOC Status and Response Code of TMF for any waiters. */
2694 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2695 	req->ResponseCode = tmf_reply->ResponseCode;
2696 
2697 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2698 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2699 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2700 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2701 		req->state |= REQ_STATE_DONE;
2702 		wakeup(req);
2703 	} else {
2704 		mpt->tmf_req->state = REQ_STATE_FREE;
2705 	}
2706 	return (TRUE);
2707 }
2708 
2709 /*
2710  * XXX: Move to definitions file
2711  */
2712 #define	ELS	0x22
2713 #define	FC4LS	0x32
2714 #define	ABTS	0x81
2715 #define	BA_ACC	0x84
2716 
2717 #define	LS_RJT	0x01
2718 #define	LS_ACC	0x02
2719 #define	PLOGI	0x03
2720 #define	LOGO	0x05
2721 #define SRR	0x14
2722 #define PRLI	0x20
2723 #define PRLO	0x21
2724 #define ADISC	0x52
2725 #define RSCN	0x61
2726 
2727 static void
2728 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2729     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2730 {
2731 	uint32_t fl;
2732 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2733 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2734 
2735 	/*
2736 	 * We are going to reuse the ELS request to send this response back.
2737 	 */
2738 	rsp = &tmp;
2739 	memset(rsp, 0, sizeof(*rsp));
2740 
2741 #ifdef	USE_IMMEDIATE_LINK_DATA
2742 	/*
2743 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2744 	 */
2745 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2746 #endif
2747 	rsp->RspLength = length;
2748 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2749 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2750 
2751 	/*
2752 	 * Copy over information from the original reply frame to
2753 	 * it's correct place in the response.
2754 	 */
2755 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2756 
2757 	/*
2758 	 * And now copy back the temporary area to the original frame.
2759 	 */
2760 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2761 	rsp = req->req_vbuf;
2762 
2763 #ifdef	USE_IMMEDIATE_LINK_DATA
2764 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2765 #else
2766 {
2767 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2768 	bus_addr_t paddr = req->req_pbuf;
2769 	paddr += MPT_RQSL(mpt);
2770 
2771 	fl =
2772 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2773 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2774 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2775 		MPI_SGE_FLAGS_END_OF_LIST	|
2776 		MPI_SGE_FLAGS_END_OF_BUFFER;
2777 	fl <<= MPI_SGE_FLAGS_SHIFT;
2778 	fl |= (length);
2779 	se->FlagsLength = htole32(fl);
2780 	se->Address = htole32((uint32_t) paddr);
2781 }
2782 #endif
2783 
2784 	/*
2785 	 * Send it on...
2786 	 */
2787 	mpt_send_cmd(mpt, req);
2788 }
2789 
2790 static int
2791 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2792     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2793 {
2794 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2795 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2796 	U8 rctl;
2797 	U8 type;
2798 	U8 cmd;
2799 	U16 status = le16toh(reply_frame->IOCStatus);
2800 	U32 *elsbuf;
2801 	int ioindex;
2802 	int do_refresh = TRUE;
2803 
2804 #ifdef	INVARIANTS
2805 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2806 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2807 	    req, req->serno, rp->Function));
2808 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2809 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2810 	} else {
2811 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2812 	}
2813 #endif
2814 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2815 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2816 	    req, req->serno, reply_frame, reply_frame->Function);
2817 
2818 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2819 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2820 		    status, reply_frame->Function);
2821 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2822 			/*
2823 			 * XXX: to get around shutdown issue
2824 			 */
2825 			mpt->disabled = 1;
2826 			return (TRUE);
2827 		}
2828 		return (TRUE);
2829 	}
2830 
2831 	/*
2832 	 * If the function of a link service response, we recycle the
2833 	 * response to be a refresh for a new link service request.
2834 	 *
2835 	 * The request pointer is bogus in this case and we have to fetch
2836 	 * it based upon the TransactionContext.
2837 	 */
2838 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2839 		/* Freddie Uncle Charlie Katie */
2840 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2841 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2842 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2843 				break;
2844 			}
2845 
2846 		KASSERT(ioindex < mpt->els_cmds_allocated,
2847 		    ("can't find my mommie!"));
2848 
2849 		/* remove from active list as we're going to re-post it */
2850 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2851 		req->state &= ~REQ_STATE_QUEUED;
2852 		req->state |= REQ_STATE_DONE;
2853 		mpt_fc_post_els(mpt, req, ioindex);
2854 		return (TRUE);
2855 	}
2856 
2857 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2858 		/* remove from active list as we're done */
2859 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2860 		req->state &= ~REQ_STATE_QUEUED;
2861 		req->state |= REQ_STATE_DONE;
2862 		if (req->state & REQ_STATE_TIMEDOUT) {
2863 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2864 			    "Sync Primitive Send Completed After Timeout\n");
2865 			mpt_free_request(mpt, req);
2866 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2867 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2868 			    "Async Primitive Send Complete\n");
2869 			mpt_free_request(mpt, req);
2870 		} else {
2871 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2872 			    "Sync Primitive Send Complete- Waking Waiter\n");
2873 			wakeup(req);
2874 		}
2875 		return (TRUE);
2876 	}
2877 
2878 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2879 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2880 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2881 		    rp->MsgLength, rp->MsgFlags);
2882 		return (TRUE);
2883 	}
2884 
2885 	if (rp->MsgLength <= 5) {
2886 		/*
2887 		 * This is just a ack of an original ELS buffer post
2888 		 */
2889 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2890 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2891 		return (TRUE);
2892 	}
2893 
2894 
2895 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2896 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2897 
2898 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2899 	cmd = be32toh(elsbuf[0]) >> 24;
2900 
2901 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2902 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2903 		return (TRUE);
2904 	}
2905 
2906 	ioindex = le32toh(rp->TransactionContext);
2907 	req = mpt->els_cmd_ptrs[ioindex];
2908 
2909 	if (rctl == ELS && type == 1) {
2910 		switch (cmd) {
2911 		case PRLI:
2912 			/*
2913 			 * Send back a PRLI ACC
2914 			 */
2915 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2916 			    le32toh(rp->Wwn.PortNameHigh),
2917 			    le32toh(rp->Wwn.PortNameLow));
2918 			elsbuf[0] = htobe32(0x02100014);
2919 			elsbuf[1] |= htobe32(0x00000100);
2920 			elsbuf[4] = htobe32(0x00000002);
2921 			if (mpt->role & MPT_ROLE_TARGET)
2922 				elsbuf[4] |= htobe32(0x00000010);
2923 			if (mpt->role & MPT_ROLE_INITIATOR)
2924 				elsbuf[4] |= htobe32(0x00000020);
2925 			/* remove from active list as we're done */
2926 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2927 			req->state &= ~REQ_STATE_QUEUED;
2928 			req->state |= REQ_STATE_DONE;
2929 			mpt_fc_els_send_response(mpt, req, rp, 20);
2930 			do_refresh = FALSE;
2931 			break;
2932 		case PRLO:
2933 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2934 			elsbuf[0] = htobe32(0x02100014);
2935 			elsbuf[1] = htobe32(0x08000100);
2936 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2937 			    le32toh(rp->Wwn.PortNameHigh),
2938 			    le32toh(rp->Wwn.PortNameLow));
2939 			/* remove from active list as we're done */
2940 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2941 			req->state &= ~REQ_STATE_QUEUED;
2942 			req->state |= REQ_STATE_DONE;
2943 			mpt_fc_els_send_response(mpt, req, rp, 20);
2944 			do_refresh = FALSE;
2945 			break;
2946 		default:
2947 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2948 			break;
2949 		}
2950 	} else if (rctl == ABTS && type == 0) {
2951 		uint16_t rx_id = le16toh(rp->Rxid);
2952 		uint16_t ox_id = le16toh(rp->Oxid);
2953 		request_t *tgt_req = NULL;
2954 
2955 		mpt_prt(mpt,
2956 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2957 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2958 		    le32toh(rp->Wwn.PortNameLow));
2959 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2960 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2961 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2962 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2963 		} else {
2964 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2965 		}
2966 		if (tgt_req) {
2967 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2968 			union ccb *ccb = tgt->ccb;
2969 			uint32_t ct_id;
2970 
2971 			/*
2972 			 * Check to make sure we have the correct command
2973 			 * The reply descriptor in the target state should
2974 			 * should contain an IoIndex that should match the
2975 			 * RX_ID.
2976 			 *
2977 			 * It'd be nice to have OX_ID to crosscheck with
2978 			 * as well.
2979 			 */
2980 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2981 
2982 			if (ct_id != rx_id) {
2983 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2984 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2985 				    rx_id, ct_id);
2986 				goto skip;
2987 			}
2988 
2989 			ccb = tgt->ccb;
2990 			if (ccb) {
2991 				mpt_prt(mpt,
2992 				    "CCB (%p): lun %u flags %x status %x\n",
2993 				    ccb, ccb->ccb_h.target_lun,
2994 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2995 			}
2996 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2997 			    "%x nxfers %x\n", tgt->state,
2998 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2999 			    tgt->nxfers);
3000   skip:
3001 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
3002 				mpt_prt(mpt, "unable to start TargetAbort\n");
3003 			}
3004 		} else {
3005 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3006 		}
3007 		memset(elsbuf, 0, 5 * (sizeof (U32)));
3008 		elsbuf[0] = htobe32(0);
3009 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3010 		elsbuf[2] = htobe32(0x000ffff);
3011 		/*
3012 		 * Dork with the reply frame so that the reponse to it
3013 		 * will be correct.
3014 		 */
3015 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3016 		/* remove from active list as we're done */
3017 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3018 		req->state &= ~REQ_STATE_QUEUED;
3019 		req->state |= REQ_STATE_DONE;
3020 		mpt_fc_els_send_response(mpt, req, rp, 12);
3021 		do_refresh = FALSE;
3022 	} else {
3023 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3024 	}
3025 	if (do_refresh == TRUE) {
3026 		/* remove from active list as we're done */
3027 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3028 		req->state &= ~REQ_STATE_QUEUED;
3029 		req->state |= REQ_STATE_DONE;
3030 		mpt_fc_post_els(mpt, req, ioindex);
3031 	}
3032 	return (TRUE);
3033 }
3034 
3035 /*
3036  * Clean up all SCSI Initiator personality state in response
3037  * to a controller reset.
3038  */
3039 static void
3040 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3041 {
3042 	/*
3043 	 * The pending list is already run down by
3044 	 * the generic handler.  Perform the same
3045 	 * operation on the timed out request list.
3046 	 */
3047 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3048 				   MPI_IOCSTATUS_INVALID_STATE);
3049 
3050 	/*
3051 	 * XXX: We need to repost ELS and Target Command Buffers?
3052 	 */
3053 
3054 	/*
3055 	 * Inform the XPT that a bus reset has occurred.
3056 	 */
3057 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
3058 }
3059 
3060 /*
3061  * Parse additional completion information in the reply
3062  * frame for SCSI I/O requests.
3063  */
3064 static int
3065 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3066 			     MSG_DEFAULT_REPLY *reply_frame)
3067 {
3068 	union ccb *ccb;
3069 	MSG_SCSI_IO_REPLY *scsi_io_reply;
3070 	u_int ioc_status;
3071 	u_int sstate;
3072 
3073 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3074 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3075 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3076 		("MPT SCSI I/O Handler called with incorrect reply type"));
3077 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3078 		("MPT SCSI I/O Handler called with continuation reply"));
3079 
3080 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3081 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
3082 	ioc_status &= MPI_IOCSTATUS_MASK;
3083 	sstate = scsi_io_reply->SCSIState;
3084 
3085 	ccb = req->ccb;
3086 	ccb->csio.resid =
3087 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3088 
3089 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3090 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3091 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3092 		ccb->csio.sense_resid =
3093 		    ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount);
3094 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3095 		    min(ccb->csio.sense_len,
3096 		    le32toh(scsi_io_reply->SenseCount)));
3097 	}
3098 
3099 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3100 		/*
3101 		 * Tag messages rejected, but non-tagged retry
3102 		 * was successful.
3103 XXXX
3104 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3105 		 */
3106 	}
3107 
3108 	switch(ioc_status) {
3109 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3110 		/*
3111 		 * XXX
3112 		 * Linux driver indicates that a zero
3113 		 * transfer length with this error code
3114 		 * indicates a CRC error.
3115 		 *
3116 		 * No need to swap the bytes for checking
3117 		 * against zero.
3118 		 */
3119 		if (scsi_io_reply->TransferCount == 0) {
3120 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3121 			break;
3122 		}
3123 		/* FALLTHROUGH */
3124 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3125 	case MPI_IOCSTATUS_SUCCESS:
3126 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3127 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3128 			/*
3129 			 * Status was never returned for this transaction.
3130 			 */
3131 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3132 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3133 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3134 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3135 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3136 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3137 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3138 
3139 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
3140 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3141 		} else
3142 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3143 		break;
3144 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3145 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3146 		break;
3147 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3148 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3149 		break;
3150 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3151 		/*
3152 		 * Since selection timeouts and "device really not
3153 		 * there" are grouped into this error code, report
3154 		 * selection timeout.  Selection timeouts are
3155 		 * typically retried before giving up on the device
3156 		 * whereas "device not there" errors are considered
3157 		 * unretryable.
3158 		 */
3159 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3160 		break;
3161 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3162 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3163 		break;
3164 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3165 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3166 		break;
3167 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3168 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3169 		break;
3170 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3171 		ccb->ccb_h.status = CAM_UA_TERMIO;
3172 		break;
3173 	case MPI_IOCSTATUS_INVALID_STATE:
3174 		/*
3175 		 * The IOC has been reset.  Emulate a bus reset.
3176 		 */
3177 		/* FALLTHROUGH */
3178 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3179 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3180 		break;
3181 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3182 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3183 		/*
3184 		 * Don't clobber any timeout status that has
3185 		 * already been set for this transaction.  We
3186 		 * want the SCSI layer to be able to differentiate
3187 		 * between the command we aborted due to timeout
3188 		 * and any innocent bystanders.
3189 		 */
3190 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3191 			break;
3192 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3193 		break;
3194 
3195 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3196 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3197 		break;
3198 	case MPI_IOCSTATUS_BUSY:
3199 		mpt_set_ccb_status(ccb, CAM_BUSY);
3200 		break;
3201 	case MPI_IOCSTATUS_INVALID_FUNCTION:
3202 	case MPI_IOCSTATUS_INVALID_SGL:
3203 	case MPI_IOCSTATUS_INTERNAL_ERROR:
3204 	case MPI_IOCSTATUS_INVALID_FIELD:
3205 	default:
3206 		/* XXX
3207 		 * Some of the above may need to kick
3208 		 * of a recovery action!!!!
3209 		 */
3210 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3211 		break;
3212 	}
3213 
3214 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3215 		mpt_freeze_ccb(ccb);
3216 	}
3217 
3218 	return (TRUE);
3219 }
3220 
3221 static void
3222 mpt_action(struct cam_sim *sim, union ccb *ccb)
3223 {
3224 	struct mpt_softc *mpt;
3225 	struct ccb_trans_settings *cts;
3226 	target_id_t tgt;
3227 	lun_id_t lun;
3228 	int raid_passthru;
3229 
3230 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3231 
3232 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
3233 	raid_passthru = (sim == mpt->phydisk_sim);
3234 	MPT_LOCK_ASSERT(mpt);
3235 
3236 	tgt = ccb->ccb_h.target_id;
3237 	lun = ccb->ccb_h.target_lun;
3238 	if (raid_passthru &&
3239 	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
3240 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
3241 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
3242 		CAMLOCK_2_MPTLOCK(mpt);
3243 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3244 			MPTLOCK_2_CAMLOCK(mpt);
3245 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3246 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3247 			xpt_done(ccb);
3248 			return;
3249 		}
3250 		MPTLOCK_2_CAMLOCK(mpt);
3251 	}
3252 	ccb->ccb_h.ccb_mpt_ptr = mpt;
3253 
3254 	switch (ccb->ccb_h.func_code) {
3255 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
3256 		/*
3257 		 * Do a couple of preliminary checks...
3258 		 */
3259 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3260 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3261 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3262 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3263 				break;
3264 			}
3265 		}
3266 		/* Max supported CDB length is 16 bytes */
3267 		/* XXX Unless we implement the new 32byte message type */
3268 		if (ccb->csio.cdb_len >
3269 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3270 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3271 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3272 			break;
3273 		}
3274 #ifdef	MPT_TEST_MULTIPATH
3275 		if (mpt->failure_id == ccb->ccb_h.target_id) {
3276 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3277 			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3278 			break;
3279 		}
3280 #endif
3281 		ccb->csio.scsi_status = SCSI_STATUS_OK;
3282 		mpt_start(sim, ccb);
3283 		return;
3284 
3285 	case XPT_RESET_BUS:
3286 		if (raid_passthru) {
3287 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3288 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3289 			break;
3290 		}
3291 	case XPT_RESET_DEV:
3292 		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3293 			if (bootverbose) {
3294 				xpt_print(ccb->ccb_h.path, "reset bus\n");
3295 			}
3296 		} else {
3297 			xpt_print(ccb->ccb_h.path, "reset device\n");
3298 		}
3299 		CAMLOCK_2_MPTLOCK(mpt);
3300 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3301 		MPTLOCK_2_CAMLOCK(mpt);
3302 
3303 		/*
3304 		 * mpt_bus_reset is always successful in that it
3305 		 * will fall back to a hard reset should a bus
3306 		 * reset attempt fail.
3307 		 */
3308 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3309 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3310 		break;
3311 
3312 	case XPT_ABORT:
3313 	{
3314 		union ccb *accb = ccb->cab.abort_ccb;
3315 		CAMLOCK_2_MPTLOCK(mpt);
3316 		switch (accb->ccb_h.func_code) {
3317 		case XPT_ACCEPT_TARGET_IO:
3318 		case XPT_IMMED_NOTIFY:
3319 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3320 			break;
3321 		case XPT_CONT_TARGET_IO:
3322 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3323 			ccb->ccb_h.status = CAM_UA_ABORT;
3324 			break;
3325 		case XPT_SCSI_IO:
3326 			ccb->ccb_h.status = CAM_UA_ABORT;
3327 			break;
3328 		default:
3329 			ccb->ccb_h.status = CAM_REQ_INVALID;
3330 			break;
3331 		}
3332 		MPTLOCK_2_CAMLOCK(mpt);
3333 		break;
3334 	}
3335 
3336 #ifdef	CAM_NEW_TRAN_CODE
3337 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3338 #else
3339 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3340 #endif
3341 #define	DP_DISC_ENABLE	0x1
3342 #define	DP_DISC_DISABL	0x2
3343 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3344 
3345 #define	DP_TQING_ENABLE	0x4
3346 #define	DP_TQING_DISABL	0x8
3347 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3348 
3349 #define	DP_WIDE		0x10
3350 #define	DP_NARROW	0x20
3351 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3352 
3353 #define	DP_SYNC		0x40
3354 
3355 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3356 	{
3357 #ifdef	CAM_NEW_TRAN_CODE
3358 		struct ccb_trans_settings_scsi *scsi;
3359 		struct ccb_trans_settings_spi *spi;
3360 #endif
3361 		uint8_t dval;
3362 		u_int period;
3363 		u_int offset;
3364 		int i, j;
3365 
3366 		cts = &ccb->cts;
3367 
3368 		if (mpt->is_fc || mpt->is_sas) {
3369 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3370 			break;
3371 		}
3372 
3373 #ifdef	CAM_NEW_TRAN_CODE
3374 		scsi = &cts->proto_specific.scsi;
3375 		spi = &cts->xport_specific.spi;
3376 
3377 		/*
3378 		 * We can be called just to valid transport and proto versions
3379 		 */
3380 		if (scsi->valid == 0 && spi->valid == 0) {
3381 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3382 			break;
3383 		}
3384 #endif
3385 
3386 		/*
3387 		 * Skip attempting settings on RAID volume disks.
3388 		 * Other devices on the bus get the normal treatment.
3389 		 */
3390 		if (mpt->phydisk_sim && raid_passthru == 0 &&
3391 		    mpt_is_raid_volume(mpt, tgt) != 0) {
3392 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3393 			    "no transfer settings for RAID vols\n");
3394 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3395 			break;
3396 		}
3397 
3398 		i = mpt->mpt_port_page2.PortSettings &
3399 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3400 		j = mpt->mpt_port_page2.PortFlags &
3401 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3402 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3403 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3404 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3405 			    "honoring BIOS transfer negotiations\n");
3406 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3407 			break;
3408 		}
3409 
3410 		dval = 0;
3411 		period = 0;
3412 		offset = 0;
3413 
3414 #ifndef	CAM_NEW_TRAN_CODE
3415 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3416 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3417 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3418 		}
3419 
3420 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3421 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3422 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3423 		}
3424 
3425 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3426 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3427 		}
3428 
3429 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3430 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3431 			dval |= DP_SYNC;
3432 			period = cts->sync_period;
3433 			offset = cts->sync_offset;
3434 		}
3435 #else
3436 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3437 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3438 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3439 		}
3440 
3441 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3442 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3443 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3444 		}
3445 
3446 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3447 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3448 			    DP_WIDE : DP_NARROW;
3449 		}
3450 
3451 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3452 			dval |= DP_SYNC;
3453 			offset = spi->sync_offset;
3454 		} else {
3455 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3456 			    &mpt->mpt_dev_page1[tgt];
3457 			offset = ptr->RequestedParameters;
3458 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3459 	    		offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3460 		}
3461 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3462 			dval |= DP_SYNC;
3463 			period = spi->sync_period;
3464 		} else {
3465 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3466 			    &mpt->mpt_dev_page1[tgt];
3467 			period = ptr->RequestedParameters;
3468 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3469 	    		period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3470 		}
3471 #endif
3472 		CAMLOCK_2_MPTLOCK(mpt);
3473 		if (dval & DP_DISC_ENABLE) {
3474 			mpt->mpt_disc_enable |= (1 << tgt);
3475 		} else if (dval & DP_DISC_DISABL) {
3476 			mpt->mpt_disc_enable &= ~(1 << tgt);
3477 		}
3478 		if (dval & DP_TQING_ENABLE) {
3479 			mpt->mpt_tag_enable |= (1 << tgt);
3480 		} else if (dval & DP_TQING_DISABL) {
3481 			mpt->mpt_tag_enable &= ~(1 << tgt);
3482 		}
3483 		if (dval & DP_WIDTH) {
3484 			mpt_setwidth(mpt, tgt, 1);
3485 		}
3486 		if (dval & DP_SYNC) {
3487 			mpt_setsync(mpt, tgt, period, offset);
3488 		}
3489 		if (dval == 0) {
3490 			MPTLOCK_2_CAMLOCK(mpt);
3491 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3492 			break;
3493 		}
3494 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3495 		    "set [%d]: 0x%x period 0x%x offset %d\n",
3496 		    tgt, dval, period, offset);
3497 		if (mpt_update_spi_config(mpt, tgt)) {
3498 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3499 		} else {
3500 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3501 		}
3502 		MPTLOCK_2_CAMLOCK(mpt);
3503 		break;
3504 	}
3505 	case XPT_GET_TRAN_SETTINGS:
3506 	{
3507 #ifdef	CAM_NEW_TRAN_CODE
3508 		struct ccb_trans_settings_scsi *scsi;
3509 		cts = &ccb->cts;
3510 		cts->protocol = PROTO_SCSI;
3511 		if (mpt->is_fc) {
3512 			struct ccb_trans_settings_fc *fc =
3513 			    &cts->xport_specific.fc;
3514 			cts->protocol_version = SCSI_REV_SPC;
3515 			cts->transport = XPORT_FC;
3516 			cts->transport_version = 0;
3517 			fc->valid = CTS_FC_VALID_SPEED;
3518 			fc->bitrate = 100000;
3519 		} else if (mpt->is_sas) {
3520 			struct ccb_trans_settings_sas *sas =
3521 			    &cts->xport_specific.sas;
3522 			cts->protocol_version = SCSI_REV_SPC2;
3523 			cts->transport = XPORT_SAS;
3524 			cts->transport_version = 0;
3525 			sas->valid = CTS_SAS_VALID_SPEED;
3526 			sas->bitrate = 300000;
3527 		} else {
3528 			cts->protocol_version = SCSI_REV_2;
3529 			cts->transport = XPORT_SPI;
3530 			cts->transport_version = 2;
3531 			if (mpt_get_spi_settings(mpt, cts) != 0) {
3532 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3533 				break;
3534 			}
3535 		}
3536 		scsi = &cts->proto_specific.scsi;
3537 		scsi->valid = CTS_SCSI_VALID_TQ;
3538 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3539 #else
3540 		cts = &ccb->cts;
3541 		if (mpt->is_fc) {
3542 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3543 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3544 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3545 		} else if (mpt->is_sas) {
3546 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3547 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3548 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3549 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3550 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3551 			break;
3552 		}
3553 #endif
3554 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3555 		break;
3556 	}
3557 	case XPT_CALC_GEOMETRY:
3558 	{
3559 		struct ccb_calc_geometry *ccg;
3560 
3561 		ccg = &ccb->ccg;
3562 		if (ccg->block_size == 0) {
3563 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3564 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3565 			break;
3566 		}
3567 		mpt_calc_geometry(ccg, /*extended*/1);
3568 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3569 		break;
3570 	}
3571 	case XPT_PATH_INQ:		/* Path routing inquiry */
3572 	{
3573 		struct ccb_pathinq *cpi = &ccb->cpi;
3574 
3575 		cpi->version_num = 1;
3576 		cpi->target_sprt = 0;
3577 		cpi->hba_eng_cnt = 0;
3578 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3579 		/*
3580 		 * FC cards report MAX_DEVICES of 512, but
3581 		 * the MSG_SCSI_IO_REQUEST target id field
3582 		 * is only 8 bits. Until we fix the driver
3583 		 * to support 'channels' for bus overflow,
3584 		 * just limit it.
3585 		 */
3586 		if (cpi->max_target > 255) {
3587 			cpi->max_target = 255;
3588 		}
3589 
3590 		/*
3591 		 * VMware ESX reports > 16 devices and then dies when we probe.
3592 		 */
3593 		if (mpt->is_spi && cpi->max_target > 15) {
3594 			cpi->max_target = 15;
3595 		}
3596 		if (mpt->is_spi)
3597 			cpi->max_lun = 7;
3598 		else
3599 			cpi->max_lun = MPT_MAX_LUNS;
3600 		cpi->initiator_id = mpt->mpt_ini_id;
3601 		cpi->bus_id = cam_sim_bus(sim);
3602 
3603 		/*
3604 		 * The base speed is the speed of the underlying connection.
3605 		 */
3606 #ifdef	CAM_NEW_TRAN_CODE
3607 		cpi->protocol = PROTO_SCSI;
3608 		if (mpt->is_fc) {
3609 			cpi->hba_misc = PIM_NOBUSRESET;
3610 			cpi->base_transfer_speed = 100000;
3611 			cpi->hba_inquiry = PI_TAG_ABLE;
3612 			cpi->transport = XPORT_FC;
3613 			cpi->transport_version = 0;
3614 			cpi->protocol_version = SCSI_REV_SPC;
3615 		} else if (mpt->is_sas) {
3616 			cpi->hba_misc = PIM_NOBUSRESET;
3617 			cpi->base_transfer_speed = 300000;
3618 			cpi->hba_inquiry = PI_TAG_ABLE;
3619 			cpi->transport = XPORT_SAS;
3620 			cpi->transport_version = 0;
3621 			cpi->protocol_version = SCSI_REV_SPC2;
3622 		} else {
3623 			cpi->hba_misc = PIM_SEQSCAN;
3624 			cpi->base_transfer_speed = 3300;
3625 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3626 			cpi->transport = XPORT_SPI;
3627 			cpi->transport_version = 2;
3628 			cpi->protocol_version = SCSI_REV_2;
3629 		}
3630 #else
3631 		if (mpt->is_fc) {
3632 			cpi->hba_misc = PIM_NOBUSRESET;
3633 			cpi->base_transfer_speed = 100000;
3634 			cpi->hba_inquiry = PI_TAG_ABLE;
3635 		} else if (mpt->is_sas) {
3636 			cpi->hba_misc = PIM_NOBUSRESET;
3637 			cpi->base_transfer_speed = 300000;
3638 			cpi->hba_inquiry = PI_TAG_ABLE;
3639 		} else {
3640 			cpi->hba_misc = PIM_SEQSCAN;
3641 			cpi->base_transfer_speed = 3300;
3642 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3643 		}
3644 #endif
3645 
3646 		/*
3647 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3648 		 * wide and restrict it to one lun.
3649 		 */
3650 		if (raid_passthru) {
3651 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3652 			cpi->initiator_id = cpi->max_target + 1;
3653 			cpi->max_lun = 0;
3654 		}
3655 
3656 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3657 			cpi->hba_misc |= PIM_NOINITIATOR;
3658 		}
3659 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3660 			cpi->target_sprt =
3661 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3662 		} else {
3663 			cpi->target_sprt = 0;
3664 		}
3665 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3666 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3667 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3668 		cpi->unit_number = cam_sim_unit(sim);
3669 		cpi->ccb_h.status = CAM_REQ_CMP;
3670 		break;
3671 	}
3672 	case XPT_EN_LUN:		/* Enable LUN as a target */
3673 	{
3674 		int result;
3675 
3676 		CAMLOCK_2_MPTLOCK(mpt);
3677 		if (ccb->cel.enable)
3678 			result = mpt_enable_lun(mpt,
3679 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3680 		else
3681 			result = mpt_disable_lun(mpt,
3682 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3683 		MPTLOCK_2_CAMLOCK(mpt);
3684 		if (result == 0) {
3685 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3686 		} else {
3687 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3688 		}
3689 		break;
3690 	}
3691 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3692 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3693 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3694 	{
3695 		tgt_resource_t *trtp;
3696 		lun_id_t lun = ccb->ccb_h.target_lun;
3697 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3698 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3699 		ccb->ccb_h.flags = 0;
3700 
3701 		if (lun == CAM_LUN_WILDCARD) {
3702 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3703 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3704 				break;
3705 			}
3706 			trtp = &mpt->trt_wildcard;
3707 		} else if (lun >= MPT_MAX_LUNS) {
3708 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3709 			break;
3710 		} else {
3711 			trtp = &mpt->trt[lun];
3712 		}
3713 		CAMLOCK_2_MPTLOCK(mpt);
3714 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3715 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3716 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3717 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3718 			    sim_links.stqe);
3719 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3720 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3721 			    "Put FREE INOT lun %d\n", lun);
3722 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3723 			    sim_links.stqe);
3724 		} else {
3725 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3726 		}
3727 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3728 		MPTLOCK_2_CAMLOCK(mpt);
3729 		return;
3730 	}
3731 	case XPT_CONT_TARGET_IO:
3732 		CAMLOCK_2_MPTLOCK(mpt);
3733 		mpt_target_start_io(mpt, ccb);
3734 		MPTLOCK_2_CAMLOCK(mpt);
3735 		return;
3736 
3737 	default:
3738 		ccb->ccb_h.status = CAM_REQ_INVALID;
3739 		break;
3740 	}
3741 	xpt_done(ccb);
3742 }
3743 
3744 static int
3745 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3746 {
3747 #ifdef	CAM_NEW_TRAN_CODE
3748 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3749 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3750 #endif
3751 	target_id_t tgt;
3752 	uint32_t dval, pval, oval;
3753 	int rv;
3754 
3755 	if (IS_CURRENT_SETTINGS(cts) == 0) {
3756 		tgt = cts->ccb_h.target_id;
3757 	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3758 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3759 			return (-1);
3760 		}
3761 	} else {
3762 		tgt = cts->ccb_h.target_id;
3763 	}
3764 
3765 	/*
3766 	 * We aren't looking at Port Page 2 BIOS settings here-
3767 	 * sometimes these have been known to be bogus XXX.
3768 	 *
3769 	 * For user settings, we pick the max from port page 0
3770 	 *
3771 	 * For current settings we read the current settings out from
3772 	 * device page 0 for that target.
3773 	 */
3774 	if (IS_CURRENT_SETTINGS(cts)) {
3775 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3776 		dval = 0;
3777 
3778 		CAMLOCK_2_MPTLOCK(mpt);
3779 		tmp = mpt->mpt_dev_page0[tgt];
3780 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3781 		    sizeof(tmp), FALSE, 5000);
3782 		if (rv) {
3783 			MPTLOCK_2_CAMLOCK(mpt);
3784 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3785 			return (rv);
3786 		}
3787 		mpt2host_config_page_scsi_device_0(&tmp);
3788 
3789 		MPTLOCK_2_CAMLOCK(mpt);
3790 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3791 		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3792 		    tmp.NegotiatedParameters, tmp.Information);
3793 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3794 		    DP_WIDE : DP_NARROW;
3795 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3796 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3797 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3798 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3799 		oval = tmp.NegotiatedParameters;
3800 		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3801 		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3802 		pval = tmp.NegotiatedParameters;
3803 		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3804 		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3805 		mpt->mpt_dev_page0[tgt] = tmp;
3806 	} else {
3807 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3808 		oval = mpt->mpt_port_page0.Capabilities;
3809 		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3810 		pval = mpt->mpt_port_page0.Capabilities;
3811 		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3812 	}
3813 
3814 #ifndef	CAM_NEW_TRAN_CODE
3815 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3816 	cts->valid = 0;
3817 	cts->sync_period = pval;
3818 	cts->sync_offset = oval;
3819 	cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3820 	cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3821 	cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3822 	if (dval & DP_WIDE) {
3823 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3824 	} else {
3825 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3826 	}
3827 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3828 		cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3829 		if (dval & DP_DISC_ENABLE) {
3830 			cts->flags |= CCB_TRANS_DISC_ENB;
3831 		}
3832 		if (dval & DP_TQING_ENABLE) {
3833 			cts->flags |= CCB_TRANS_TAG_ENB;
3834 		}
3835 	}
3836 #else
3837 	spi->valid = 0;
3838 	scsi->valid = 0;
3839 	spi->flags = 0;
3840 	scsi->flags = 0;
3841 	spi->sync_offset = oval;
3842 	spi->sync_period = pval;
3843 	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3844 	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3845 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3846 	if (dval & DP_WIDE) {
3847 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3848 	} else {
3849 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3850 	}
3851 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3852 		scsi->valid = CTS_SCSI_VALID_TQ;
3853 		if (dval & DP_TQING_ENABLE) {
3854 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3855 		}
3856 		spi->valid |= CTS_SPI_VALID_DISC;
3857 		if (dval & DP_DISC_ENABLE) {
3858 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3859 		}
3860 	}
3861 #endif
3862 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3863 	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3864 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3865 	return (0);
3866 }
3867 
3868 static void
3869 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3870 {
3871 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3872 
3873 	ptr = &mpt->mpt_dev_page1[tgt];
3874 	if (onoff) {
3875 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3876 	} else {
3877 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3878 	}
3879 }
3880 
3881 static void
3882 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3883 {
3884 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3885 
3886 	ptr = &mpt->mpt_dev_page1[tgt];
3887 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3888 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3889 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3890 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3891 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3892 	if (period == 0) {
3893 		return;
3894 	}
3895 	ptr->RequestedParameters |=
3896 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3897 	ptr->RequestedParameters |=
3898 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3899 	if (period < 0xa) {
3900 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3901 	}
3902 	if (period < 0x9) {
3903 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3904 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3905 	}
3906 }
3907 
3908 static int
3909 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3910 {
3911 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3912 	int rv;
3913 
3914 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3915 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3916 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3917 	tmp = mpt->mpt_dev_page1[tgt];
3918 	host2mpt_config_page_scsi_device_1(&tmp);
3919 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3920 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3921 	if (rv) {
3922 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3923 		return (-1);
3924 	}
3925 	return (0);
3926 }
3927 
3928 static void
3929 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3930 {
3931 #if __FreeBSD_version >= 500000
3932 	cam_calc_geometry(ccg, extended);
3933 #else
3934 	uint32_t size_mb;
3935 	uint32_t secs_per_cylinder;
3936 
3937 	if (ccg->block_size == 0) {
3938 		ccg->ccb_h.status = CAM_REQ_INVALID;
3939 		return;
3940 	}
3941 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3942 	if (size_mb > 1024 && extended) {
3943 		ccg->heads = 255;
3944 		ccg->secs_per_track = 63;
3945 	} else {
3946 		ccg->heads = 64;
3947 		ccg->secs_per_track = 32;
3948 	}
3949 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3950 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3951 	ccg->ccb_h.status = CAM_REQ_CMP;
3952 #endif
3953 }
3954 
3955 /****************************** Timeout Recovery ******************************/
3956 static int
3957 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3958 {
3959 	int error;
3960 
3961 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3962 	    &mpt->recovery_thread, /*flags*/0,
3963 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3964 	return (error);
3965 }
3966 
3967 static void
3968 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3969 {
3970 	if (mpt->recovery_thread == NULL) {
3971 		return;
3972 	}
3973 	mpt->shutdwn_recovery = 1;
3974 	wakeup(mpt);
3975 	/*
3976 	 * Sleep on a slightly different location
3977 	 * for this interlock just for added safety.
3978 	 */
3979 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3980 }
3981 
3982 static void
3983 mpt_recovery_thread(void *arg)
3984 {
3985 	struct mpt_softc *mpt;
3986 
3987 	mpt = (struct mpt_softc *)arg;
3988 	MPT_LOCK(mpt);
3989 	for (;;) {
3990 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3991 			if (mpt->shutdwn_recovery == 0) {
3992 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3993 			}
3994 		}
3995 		if (mpt->shutdwn_recovery != 0) {
3996 			break;
3997 		}
3998 		mpt_recover_commands(mpt);
3999 	}
4000 	mpt->recovery_thread = NULL;
4001 	wakeup(&mpt->recovery_thread);
4002 	MPT_UNLOCK(mpt);
4003 	mpt_kthread_exit(0);
4004 }
4005 
4006 static int
4007 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
4008     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
4009 {
4010 	MSG_SCSI_TASK_MGMT *tmf_req;
4011 	int		    error;
4012 
4013 	/*
4014 	 * Wait for any current TMF request to complete.
4015 	 * We're only allowed to issue one TMF at a time.
4016 	 */
4017 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
4018 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
4019 	if (error != 0) {
4020 		mpt_reset(mpt, TRUE);
4021 		return (ETIMEDOUT);
4022 	}
4023 
4024 	mpt_assign_serno(mpt, mpt->tmf_req);
4025 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
4026 
4027 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
4028 	memset(tmf_req, 0, sizeof(*tmf_req));
4029 	tmf_req->TargetID = target;
4030 	tmf_req->Bus = channel;
4031 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4032 	tmf_req->TaskType = type;
4033 	tmf_req->MsgFlags = flags;
4034 	tmf_req->MsgContext =
4035 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
4036 	if (lun > MPT_MAX_LUNS) {
4037 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4038 		tmf_req->LUN[1] = lun & 0xff;
4039 	} else {
4040 		tmf_req->LUN[1] = lun;
4041 	}
4042 	tmf_req->TaskMsgContext = abort_ctx;
4043 
4044 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4045 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
4046 	    mpt->tmf_req->serno, tmf_req->MsgContext);
4047 	if (mpt->verbose > MPT_PRT_DEBUG) {
4048 		mpt_print_request(tmf_req);
4049 	}
4050 
4051 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4052 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4053 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4054 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4055 	if (error != MPT_OK) {
4056 		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4057 		mpt->tmf_req->state = REQ_STATE_FREE;
4058 		mpt_reset(mpt, TRUE);
4059 	}
4060 	return (error);
4061 }
4062 
4063 /*
4064  * When a command times out, it is placed on the requeust_timeout_list
4065  * and we wake our recovery thread.  The MPT-Fusion architecture supports
4066  * only a single TMF operation at a time, so we serially abort/bdr, etc,
4067  * the timedout transactions.  The next TMF is issued either by the
4068  * completion handler of the current TMF waking our recovery thread,
4069  * or the TMF timeout handler causing a hard reset sequence.
4070  */
4071 static void
4072 mpt_recover_commands(struct mpt_softc *mpt)
4073 {
4074 	request_t	   *req;
4075 	union ccb	   *ccb;
4076 	int		    error;
4077 
4078 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4079 		/*
4080 		 * No work to do- leave.
4081 		 */
4082 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4083 		return;
4084 	}
4085 
4086 	/*
4087 	 * Flush any commands whose completion coincides with their timeout.
4088 	 */
4089 	mpt_intr(mpt);
4090 
4091 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4092 		/*
4093 		 * The timedout commands have already
4094 		 * completed.  This typically means
4095 		 * that either the timeout value was on
4096 		 * the hairy edge of what the device
4097 		 * requires or - more likely - interrupts
4098 		 * are not happening.
4099 		 */
4100 		mpt_prt(mpt, "Timedout requests already complete. "
4101 		    "Interrupts may not be functioning.\n");
4102 		mpt_enable_ints(mpt);
4103 		return;
4104 	}
4105 
4106 	/*
4107 	 * We have no visibility into the current state of the
4108 	 * controller, so attempt to abort the commands in the
4109 	 * order they timed-out. For initiator commands, we
4110 	 * depend on the reply handler pulling requests off
4111 	 * the timeout list.
4112 	 */
4113 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4114 		uint16_t status;
4115 		uint8_t response;
4116 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4117 
4118 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4119 		    req, req->serno, hdrp->Function);
4120 		ccb = req->ccb;
4121 		if (ccb == NULL) {
4122 			mpt_prt(mpt, "null ccb in timed out request. "
4123 			    "Resetting Controller.\n");
4124 			mpt_reset(mpt, TRUE);
4125 			continue;
4126 		}
4127 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4128 
4129 		/*
4130 		 * Check to see if this is not an initiator command and
4131 		 * deal with it differently if it is.
4132 		 */
4133 		switch (hdrp->Function) {
4134 		case MPI_FUNCTION_SCSI_IO_REQUEST:
4135 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4136 			break;
4137 		default:
4138 			/*
4139 			 * XXX: FIX ME: need to abort target assists...
4140 			 */
4141 			mpt_prt(mpt, "just putting it back on the pend q\n");
4142 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4143 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4144 			    links);
4145 			continue;
4146 		}
4147 
4148 		error = mpt_scsi_send_tmf(mpt,
4149 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4150 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4151 		    htole32(req->index | scsi_io_handler_id), TRUE);
4152 
4153 		if (error != 0) {
4154 			/*
4155 			 * mpt_scsi_send_tmf hard resets on failure, so no
4156 			 * need to do so here.  Our queue should be emptied
4157 			 * by the hard reset.
4158 			 */
4159 			continue;
4160 		}
4161 
4162 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4163 		    REQ_STATE_DONE, TRUE, 500);
4164 
4165 		status = le16toh(mpt->tmf_req->IOCStatus);
4166 		response = mpt->tmf_req->ResponseCode;
4167 		mpt->tmf_req->state = REQ_STATE_FREE;
4168 
4169 		if (error != 0) {
4170 			/*
4171 			 * If we've errored out,, reset the controller.
4172 			 */
4173 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4174 			    "Resetting controller\n");
4175 			mpt_reset(mpt, TRUE);
4176 			continue;
4177 		}
4178 
4179 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4180 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4181 			    "Resetting controller.\n", status);
4182 			mpt_reset(mpt, TRUE);
4183 			continue;
4184 		}
4185 
4186 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4187 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4188 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4189 			    "Resetting controller.\n", response);
4190 			mpt_reset(mpt, TRUE);
4191 			continue;
4192 		}
4193 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4194 	}
4195 }
4196 
4197 /************************ Target Mode Support ****************************/
4198 static void
4199 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4200 {
4201 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4202 	PTR_SGE_TRANSACTION32 tep;
4203 	PTR_SGE_SIMPLE32 se;
4204 	bus_addr_t paddr;
4205 	uint32_t fl;
4206 
4207 	paddr = req->req_pbuf;
4208 	paddr += MPT_RQSL(mpt);
4209 
4210 	fc = req->req_vbuf;
4211 	memset(fc, 0, MPT_REQUEST_AREA);
4212 	fc->BufferCount = 1;
4213 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4214 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
4215 
4216 	/*
4217 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
4218 	 * consist of a TE SGL element (with details length of zero)
4219 	 * followe by a SIMPLE SGL element which holds the address
4220 	 * of the buffer.
4221 	 */
4222 
4223 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4224 
4225 	tep->ContextSize = 4;
4226 	tep->Flags = 0;
4227 	tep->TransactionContext[0] = htole32(ioindex);
4228 
4229 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4230 	fl =
4231 		MPI_SGE_FLAGS_HOST_TO_IOC	|
4232 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4233 		MPI_SGE_FLAGS_LAST_ELEMENT	|
4234 		MPI_SGE_FLAGS_END_OF_LIST	|
4235 		MPI_SGE_FLAGS_END_OF_BUFFER;
4236 	fl <<= MPI_SGE_FLAGS_SHIFT;
4237 	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4238 	se->FlagsLength = htole32(fl);
4239 	se->Address = htole32((uint32_t) paddr);
4240 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4241 	    "add ELS index %d ioindex %d for %p:%u\n",
4242 	    req->index, ioindex, req, req->serno);
4243 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4244 	    ("mpt_fc_post_els: request not locked"));
4245 	mpt_send_cmd(mpt, req);
4246 }
4247 
4248 static void
4249 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4250 {
4251 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4252 	PTR_CMD_BUFFER_DESCRIPTOR cb;
4253 	bus_addr_t paddr;
4254 
4255 	paddr = req->req_pbuf;
4256 	paddr += MPT_RQSL(mpt);
4257 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4258 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4259 
4260 	fc = req->req_vbuf;
4261 	fc->BufferCount = 1;
4262 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4263 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4264 
4265 	cb = &fc->Buffer[0];
4266 	cb->IoIndex = htole16(ioindex);
4267 	cb->u.PhysicalAddress32 = htole32((U32) paddr);
4268 
4269 	mpt_check_doorbell(mpt);
4270 	mpt_send_cmd(mpt, req);
4271 }
4272 
4273 static int
4274 mpt_add_els_buffers(struct mpt_softc *mpt)
4275 {
4276 	int i;
4277 
4278 	if (mpt->is_fc == 0) {
4279 		return (TRUE);
4280 	}
4281 
4282 	if (mpt->els_cmds_allocated) {
4283 		return (TRUE);
4284 	}
4285 
4286 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4287 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4288 
4289 	if (mpt->els_cmd_ptrs == NULL) {
4290 		return (FALSE);
4291 	}
4292 
4293 	/*
4294 	 * Feed the chip some ELS buffer resources
4295 	 */
4296 	for (i = 0; i < MPT_MAX_ELS; i++) {
4297 		request_t *req = mpt_get_request(mpt, FALSE);
4298 		if (req == NULL) {
4299 			break;
4300 		}
4301 		req->state |= REQ_STATE_LOCKED;
4302 		mpt->els_cmd_ptrs[i] = req;
4303 		mpt_fc_post_els(mpt, req, i);
4304 	}
4305 
4306 	if (i == 0) {
4307 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
4308 		free(mpt->els_cmd_ptrs, M_DEVBUF);
4309 		mpt->els_cmd_ptrs = NULL;
4310 		return (FALSE);
4311 	}
4312 	if (i != MPT_MAX_ELS) {
4313 		mpt_lprt(mpt, MPT_PRT_INFO,
4314 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4315 	}
4316 	mpt->els_cmds_allocated = i;
4317 	return(TRUE);
4318 }
4319 
4320 static int
4321 mpt_add_target_commands(struct mpt_softc *mpt)
4322 {
4323 	int i, max;
4324 
4325 	if (mpt->tgt_cmd_ptrs) {
4326 		return (TRUE);
4327 	}
4328 
4329 	max = MPT_MAX_REQUESTS(mpt) >> 1;
4330 	if (max > mpt->mpt_max_tgtcmds) {
4331 		max = mpt->mpt_max_tgtcmds;
4332 	}
4333 	mpt->tgt_cmd_ptrs =
4334 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4335 	if (mpt->tgt_cmd_ptrs == NULL) {
4336 		mpt_prt(mpt,
4337 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4338 		return (FALSE);
4339 	}
4340 
4341 	for (i = 0; i < max; i++) {
4342 		request_t *req;
4343 
4344 		req = mpt_get_request(mpt, FALSE);
4345 		if (req == NULL) {
4346 			break;
4347 		}
4348 		req->state |= REQ_STATE_LOCKED;
4349 		mpt->tgt_cmd_ptrs[i] = req;
4350 		mpt_post_target_command(mpt, req, i);
4351 	}
4352 
4353 
4354 	if (i == 0) {
4355 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4356 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4357 		mpt->tgt_cmd_ptrs = NULL;
4358 		return (FALSE);
4359 	}
4360 
4361 	mpt->tgt_cmds_allocated = i;
4362 
4363 	if (i < max) {
4364 		mpt_lprt(mpt, MPT_PRT_INFO,
4365 		    "added %d of %d target bufs\n", i, max);
4366 	}
4367 	return (i);
4368 }
4369 
4370 static int
4371 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4372 {
4373 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4374 		mpt->twildcard = 1;
4375 	} else if (lun >= MPT_MAX_LUNS) {
4376 		return (EINVAL);
4377 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4378 		return (EINVAL);
4379 	}
4380 	if (mpt->tenabled == 0) {
4381 		if (mpt->is_fc) {
4382 			(void) mpt_fc_reset_link(mpt, 0);
4383 		}
4384 		mpt->tenabled = 1;
4385 	}
4386 	if (lun == CAM_LUN_WILDCARD) {
4387 		mpt->trt_wildcard.enabled = 1;
4388 	} else {
4389 		mpt->trt[lun].enabled = 1;
4390 	}
4391 	return (0);
4392 }
4393 
4394 static int
4395 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4396 {
4397 	int i;
4398 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4399 		mpt->twildcard = 0;
4400 	} else if (lun >= MPT_MAX_LUNS) {
4401 		return (EINVAL);
4402 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4403 		return (EINVAL);
4404 	}
4405 	if (lun == CAM_LUN_WILDCARD) {
4406 		mpt->trt_wildcard.enabled = 0;
4407 	} else {
4408 		mpt->trt[lun].enabled = 0;
4409 	}
4410 	for (i = 0; i < MPT_MAX_LUNS; i++) {
4411 		if (mpt->trt[lun].enabled) {
4412 			break;
4413 		}
4414 	}
4415 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4416 		if (mpt->is_fc) {
4417 			(void) mpt_fc_reset_link(mpt, 0);
4418 		}
4419 		mpt->tenabled = 0;
4420 	}
4421 	return (0);
4422 }
4423 
4424 /*
4425  * Called with MPT lock held
4426  */
4427 static void
4428 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4429 {
4430 	struct ccb_scsiio *csio = &ccb->csio;
4431 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4432 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4433 
4434 	switch (tgt->state) {
4435 	case TGT_STATE_IN_CAM:
4436 		break;
4437 	case TGT_STATE_MOVING_DATA:
4438 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4439 		xpt_freeze_simq(mpt->sim, 1);
4440 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4441 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4442 		MPTLOCK_2_CAMLOCK(mpt);
4443 		xpt_done(ccb);
4444 		CAMLOCK_2_MPTLOCK(mpt);
4445 		return;
4446 	default:
4447 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4448 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4449 		mpt_tgt_dump_req_state(mpt, cmd_req);
4450 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4451 		MPTLOCK_2_CAMLOCK(mpt);
4452 		xpt_done(ccb);
4453 		CAMLOCK_2_MPTLOCK(mpt);
4454 		return;
4455 	}
4456 
4457 	if (csio->dxfer_len) {
4458 		bus_dmamap_callback_t *cb;
4459 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4460 		request_t *req;
4461 
4462 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4463 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
4464 
4465 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4466 			if (mpt->outofbeer == 0) {
4467 				mpt->outofbeer = 1;
4468 				xpt_freeze_simq(mpt->sim, 1);
4469 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4470 			}
4471 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4472 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4473 			MPTLOCK_2_CAMLOCK(mpt);
4474 			xpt_done(ccb);
4475 			CAMLOCK_2_MPTLOCK(mpt);
4476 			return;
4477 		}
4478 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4479 		if (sizeof (bus_addr_t) > 4) {
4480 			cb = mpt_execute_req_a64;
4481 		} else {
4482 			cb = mpt_execute_req;
4483 		}
4484 
4485 		req->ccb = ccb;
4486 		ccb->ccb_h.ccb_req_ptr = req;
4487 
4488 		/*
4489 		 * Record the currently active ccb and the
4490 		 * request for it in our target state area.
4491 		 */
4492 		tgt->ccb = ccb;
4493 		tgt->req = req;
4494 
4495 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4496 		ta = req->req_vbuf;
4497 
4498 		if (mpt->is_sas) {
4499 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4500 			     cmd_req->req_vbuf;
4501 			ta->QueueTag = ssp->InitiatorTag;
4502 		} else if (mpt->is_spi) {
4503 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4504 			     cmd_req->req_vbuf;
4505 			ta->QueueTag = sp->Tag;
4506 		}
4507 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4508 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4509 		ta->ReplyWord = htole32(tgt->reply_desc);
4510 		if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4511 			ta->LUN[0] =
4512 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4513 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4514 		} else {
4515 			ta->LUN[1] = csio->ccb_h.target_lun;
4516 		}
4517 
4518 		ta->RelativeOffset = tgt->bytes_xfered;
4519 		ta->DataLength = ccb->csio.dxfer_len;
4520 		if (ta->DataLength > tgt->resid) {
4521 			ta->DataLength = tgt->resid;
4522 		}
4523 
4524 		/*
4525 		 * XXX Should be done after data transfer completes?
4526 		 */
4527 		tgt->resid -= csio->dxfer_len;
4528 		tgt->bytes_xfered += csio->dxfer_len;
4529 
4530 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4531 			ta->TargetAssistFlags |=
4532 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4533 		}
4534 
4535 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4536 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4537 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4538 			ta->TargetAssistFlags |=
4539 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4540 		}
4541 #endif
4542 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4543 
4544 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4545 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4546 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4547 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4548 
4549 		MPTLOCK_2_CAMLOCK(mpt);
4550 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4551 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4552 				int error;
4553 				int s = splsoftvm();
4554 				error = bus_dmamap_load(mpt->buffer_dmat,
4555 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4556 				    cb, req, 0);
4557 				splx(s);
4558 				if (error == EINPROGRESS) {
4559 					xpt_freeze_simq(mpt->sim, 1);
4560 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4561 				}
4562 			} else {
4563 				/*
4564 				 * We have been given a pointer to single
4565 				 * physical buffer.
4566 				 */
4567 				struct bus_dma_segment seg;
4568 				seg.ds_addr = (bus_addr_t)
4569 				    (vm_offset_t)csio->data_ptr;
4570 				seg.ds_len = csio->dxfer_len;
4571 				(*cb)(req, &seg, 1, 0);
4572 			}
4573 		} else {
4574 			/*
4575 			 * We have been given a list of addresses.
4576 			 * This case could be easily supported but they are not
4577 			 * currently generated by the CAM subsystem so there
4578 			 * is no point in wasting the time right now.
4579 			 */
4580 			struct bus_dma_segment *sgs;
4581 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4582 				(*cb)(req, NULL, 0, EFAULT);
4583 			} else {
4584 				/* Just use the segments provided */
4585 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4586 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4587 			}
4588 		}
4589 		CAMLOCK_2_MPTLOCK(mpt);
4590 	} else {
4591 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4592 
4593 		/*
4594 		 * XXX: I don't know why this seems to happen, but
4595 		 * XXX: completing the CCB seems to make things happy.
4596 		 * XXX: This seems to happen if the initiator requests
4597 		 * XXX: enough data that we have to do multiple CTIOs.
4598 		 */
4599 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4600 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4601 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4602 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4603 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4604 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4605 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4606 			MPTLOCK_2_CAMLOCK(mpt);
4607 			xpt_done(ccb);
4608 			CAMLOCK_2_MPTLOCK(mpt);
4609 			return;
4610 		}
4611 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4612 			sp = sense;
4613 			memcpy(sp, &csio->sense_data,
4614 			   min(csio->sense_len, MPT_SENSE_SIZE));
4615 		}
4616 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4617 	}
4618 }
4619 
4620 static void
4621 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4622     uint32_t lun, int send, uint8_t *data, size_t length)
4623 {
4624 	mpt_tgt_state_t *tgt;
4625 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4626 	SGE_SIMPLE32 *se;
4627 	uint32_t flags;
4628 	uint8_t *dptr;
4629 	bus_addr_t pptr;
4630 	request_t *req;
4631 
4632 	/*
4633 	 * We enter with resid set to the data load for the command.
4634 	 */
4635 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4636 	if (length == 0 || tgt->resid == 0) {
4637 		tgt->resid = 0;
4638 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4639 		return;
4640 	}
4641 
4642 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4643 		mpt_prt(mpt, "out of resources- dropping local response\n");
4644 		return;
4645 	}
4646 	tgt->is_local = 1;
4647 
4648 
4649 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4650 	ta = req->req_vbuf;
4651 
4652 	if (mpt->is_sas) {
4653 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4654 		ta->QueueTag = ssp->InitiatorTag;
4655 	} else if (mpt->is_spi) {
4656 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4657 		ta->QueueTag = sp->Tag;
4658 	}
4659 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4660 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4661 	ta->ReplyWord = htole32(tgt->reply_desc);
4662 	if (lun > MPT_MAX_LUNS) {
4663 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4664 		ta->LUN[1] = lun & 0xff;
4665 	} else {
4666 		ta->LUN[1] = lun;
4667 	}
4668 	ta->RelativeOffset = 0;
4669 	ta->DataLength = length;
4670 
4671 	dptr = req->req_vbuf;
4672 	dptr += MPT_RQSL(mpt);
4673 	pptr = req->req_pbuf;
4674 	pptr += MPT_RQSL(mpt);
4675 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4676 
4677 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4678 	memset(se, 0,sizeof (*se));
4679 
4680 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4681 	if (send) {
4682 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4683 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4684 	}
4685 	se->Address = pptr;
4686 	MPI_pSGE_SET_LENGTH(se, length);
4687 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4688 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4689 	MPI_pSGE_SET_FLAGS(se, flags);
4690 
4691 	tgt->ccb = NULL;
4692 	tgt->req = req;
4693 	tgt->resid -= length;
4694 	tgt->bytes_xfered = length;
4695 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4696 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4697 #else
4698 	tgt->state = TGT_STATE_MOVING_DATA;
4699 #endif
4700 	mpt_send_cmd(mpt, req);
4701 }
4702 
4703 /*
4704  * Abort queued up CCBs
4705  */
4706 static cam_status
4707 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4708 {
4709 	struct mpt_hdr_stailq *lp;
4710 	struct ccb_hdr *srch;
4711 	int found = 0;
4712 	union ccb *accb = ccb->cab.abort_ccb;
4713 	tgt_resource_t *trtp;
4714 
4715 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4716 
4717 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4718 		trtp = &mpt->trt_wildcard;
4719 	} else {
4720 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4721 	}
4722 
4723 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4724 		lp = &trtp->atios;
4725 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4726 		lp = &trtp->inots;
4727 	} else {
4728 		return (CAM_REQ_INVALID);
4729 	}
4730 
4731 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4732 		if (srch == &accb->ccb_h) {
4733 			found = 1;
4734 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4735 			break;
4736 		}
4737 	}
4738 	if (found) {
4739 		accb->ccb_h.status = CAM_REQ_ABORTED;
4740 		xpt_done(accb);
4741 		return (CAM_REQ_CMP);
4742 	}
4743 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4744 	return (CAM_PATH_INVALID);
4745 }
4746 
4747 /*
4748  * Ask the MPT to abort the current target command
4749  */
4750 static int
4751 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4752 {
4753 	int error;
4754 	request_t *req;
4755 	PTR_MSG_TARGET_MODE_ABORT abtp;
4756 
4757 	req = mpt_get_request(mpt, FALSE);
4758 	if (req == NULL) {
4759 		return (-1);
4760 	}
4761 	abtp = req->req_vbuf;
4762 	memset(abtp, 0, sizeof (*abtp));
4763 
4764 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4765 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4766 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4767 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4768 	error = 0;
4769 	if (mpt->is_fc || mpt->is_sas) {
4770 		mpt_send_cmd(mpt, req);
4771 	} else {
4772 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4773 	}
4774 	return (error);
4775 }
4776 
4777 /*
4778  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4779  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4780  * FC929 to set bogus FC_RSP fields (nonzero residuals
4781  * but w/o RESID fields set). This causes QLogic initiators
4782  * to think maybe that a frame was lost.
4783  *
4784  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4785  * we use allocated requests to do TARGET_ASSIST and we
4786  * need to know when to release them.
4787  */
4788 
4789 static void
4790 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4791     uint8_t status, uint8_t const *sense_data)
4792 {
4793 	uint8_t *cmd_vbuf;
4794 	mpt_tgt_state_t *tgt;
4795 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4796 	request_t *req;
4797 	bus_addr_t paddr;
4798 	int resplen = 0;
4799 	uint32_t fl;
4800 
4801 	cmd_vbuf = cmd_req->req_vbuf;
4802 	cmd_vbuf += MPT_RQSL(mpt);
4803 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4804 
4805 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4806 		if (mpt->outofbeer == 0) {
4807 			mpt->outofbeer = 1;
4808 			xpt_freeze_simq(mpt->sim, 1);
4809 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4810 		}
4811 		if (ccb) {
4812 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4813 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4814 			MPTLOCK_2_CAMLOCK(mpt);
4815 			xpt_done(ccb);
4816 			CAMLOCK_2_MPTLOCK(mpt);
4817 		} else {
4818 			mpt_prt(mpt,
4819 			    "could not allocate status request- dropping\n");
4820 		}
4821 		return;
4822 	}
4823 	req->ccb = ccb;
4824 	if (ccb) {
4825 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4826 		ccb->ccb_h.ccb_req_ptr = req;
4827 	}
4828 
4829 	/*
4830 	 * Record the currently active ccb, if any, and the
4831 	 * request for it in our target state area.
4832 	 */
4833 	tgt->ccb = ccb;
4834 	tgt->req = req;
4835 	tgt->state = TGT_STATE_SENDING_STATUS;
4836 
4837 	tp = req->req_vbuf;
4838 	paddr = req->req_pbuf;
4839 	paddr += MPT_RQSL(mpt);
4840 
4841 	memset(tp, 0, sizeof (*tp));
4842 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4843 	if (mpt->is_fc) {
4844 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4845 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4846 		uint8_t *sts_vbuf;
4847 		uint32_t *rsp;
4848 
4849 		sts_vbuf = req->req_vbuf;
4850 		sts_vbuf += MPT_RQSL(mpt);
4851 		rsp = (uint32_t *) sts_vbuf;
4852 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4853 
4854 		/*
4855 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4856 		 * It has to be big-endian in memory and is organized
4857 		 * in 32 bit words, which are much easier to deal with
4858 		 * as words which are swizzled as needed.
4859 		 *
4860 		 * All we're filling here is the FC_RSP payload.
4861 		 * We may just have the chip synthesize it if
4862 		 * we have no residual and an OK status.
4863 		 *
4864 		 */
4865 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4866 
4867 		rsp[2] = status;
4868 		if (tgt->resid) {
4869 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4870 			rsp[3] = htobe32(tgt->resid);
4871 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4872 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4873 #endif
4874 		}
4875 		if (status == SCSI_STATUS_CHECK_COND) {
4876 			int i;
4877 
4878 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4879 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4880 			if (sense_data) {
4881 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4882 			} else {
4883 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4884 				    "TION but no sense data?\n");
4885 				memset(&rsp, 0, MPT_SENSE_SIZE);
4886 			}
4887 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4888 				rsp[i] = htobe32(rsp[i]);
4889 			}
4890 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4891 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4892 #endif
4893 		}
4894 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4895 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4896 #endif
4897 		rsp[2] = htobe32(rsp[2]);
4898 	} else if (mpt->is_sas) {
4899 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4900 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4901 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4902 	} else {
4903 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4904 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4905 		tp->StatusCode = status;
4906 		tp->QueueTag = htole16(sp->Tag);
4907 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4908 	}
4909 
4910 	tp->ReplyWord = htole32(tgt->reply_desc);
4911 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4912 
4913 #ifdef	WE_CAN_USE_AUTO_REPOST
4914 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4915 #endif
4916 	if (status == SCSI_STATUS_OK && resplen == 0) {
4917 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4918 	} else {
4919 		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4920 		fl =
4921 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4922 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4923 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4924 			MPI_SGE_FLAGS_END_OF_LIST	|
4925 			MPI_SGE_FLAGS_END_OF_BUFFER;
4926 		fl <<= MPI_SGE_FLAGS_SHIFT;
4927 		fl |= resplen;
4928 		tp->StatusDataSGE.FlagsLength = htole32(fl);
4929 	}
4930 
4931 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4932 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4933 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4934 	    req->serno, tgt->resid);
4935 	if (ccb) {
4936 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4937 		mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
4938 	}
4939 	mpt_send_cmd(mpt, req);
4940 }
4941 
4942 static void
4943 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4944     tgt_resource_t *trtp, int init_id)
4945 {
4946 	struct ccb_immed_notify *inot;
4947 	mpt_tgt_state_t *tgt;
4948 
4949 	tgt = MPT_TGT_STATE(mpt, req);
4950 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4951 	if (inot == NULL) {
4952 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4953 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4954 		return;
4955 	}
4956 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4957 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4958 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4959 
4960 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4961 	inot->sense_len = 0;
4962 	memset(inot->message_args, 0, sizeof (inot->message_args));
4963 	inot->initiator_id = init_id;	/* XXX */
4964 
4965 	/*
4966 	 * This is a somewhat grotesque attempt to map from task management
4967 	 * to old style SCSI messages. God help us all.
4968 	 */
4969 	switch (fc) {
4970 	case MPT_ABORT_TASK_SET:
4971 		inot->message_args[0] = MSG_ABORT_TAG;
4972 		break;
4973 	case MPT_CLEAR_TASK_SET:
4974 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4975 		break;
4976 	case MPT_TARGET_RESET:
4977 		inot->message_args[0] = MSG_TARGET_RESET;
4978 		break;
4979 	case MPT_CLEAR_ACA:
4980 		inot->message_args[0] = MSG_CLEAR_ACA;
4981 		break;
4982 	case MPT_TERMINATE_TASK:
4983 		inot->message_args[0] = MSG_ABORT_TAG;
4984 		break;
4985 	default:
4986 		inot->message_args[0] = MSG_NOOP;
4987 		break;
4988 	}
4989 	tgt->ccb = (union ccb *) inot;
4990 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4991 	MPTLOCK_2_CAMLOCK(mpt);
4992 	xpt_done((union ccb *)inot);
4993 	CAMLOCK_2_MPTLOCK(mpt);
4994 }
4995 
4996 static void
4997 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4998 {
4999 	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
5000 	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
5001 	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
5002 	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
5003 	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
5004 	     '0',  '0',  '0',  '1'
5005 	};
5006 	struct ccb_accept_tio *atiop;
5007 	lun_id_t lun;
5008 	int tag_action = 0;
5009 	mpt_tgt_state_t *tgt;
5010 	tgt_resource_t *trtp = NULL;
5011 	U8 *lunptr;
5012 	U8 *vbuf;
5013 	U16 itag;
5014 	U16 ioindex;
5015 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
5016 	uint8_t *cdbp;
5017 
5018 	/*
5019 	 * First, DMA sync the received command-
5020 	 * which is in the *request* * phys area.
5021 	 *
5022 	 * XXX: We could optimize this for a range
5023 	 */
5024 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
5025 	    BUS_DMASYNC_POSTREAD);
5026 
5027 	/*
5028 	 * Stash info for the current command where we can get at it later.
5029 	 */
5030 	vbuf = req->req_vbuf;
5031 	vbuf += MPT_RQSL(mpt);
5032 
5033 	/*
5034 	 * Get our state pointer set up.
5035 	 */
5036 	tgt = MPT_TGT_STATE(mpt, req);
5037 	if (tgt->state != TGT_STATE_LOADED) {
5038 		mpt_tgt_dump_req_state(mpt, req);
5039 		panic("bad target state in mpt_scsi_tgt_atio");
5040 	}
5041 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
5042 	tgt->state = TGT_STATE_IN_CAM;
5043 	tgt->reply_desc = reply_desc;
5044 	ioindex = GET_IO_INDEX(reply_desc);
5045 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5046 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
5047 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
5048 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
5049 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
5050 	}
5051 	if (mpt->is_fc) {
5052 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
5053 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
5054 		if (fc->FcpCntl[2]) {
5055 			/*
5056 			 * Task Management Request
5057 			 */
5058 			switch (fc->FcpCntl[2]) {
5059 			case 0x2:
5060 				fct = MPT_ABORT_TASK_SET;
5061 				break;
5062 			case 0x4:
5063 				fct = MPT_CLEAR_TASK_SET;
5064 				break;
5065 			case 0x20:
5066 				fct = MPT_TARGET_RESET;
5067 				break;
5068 			case 0x40:
5069 				fct = MPT_CLEAR_ACA;
5070 				break;
5071 			case 0x80:
5072 				fct = MPT_TERMINATE_TASK;
5073 				break;
5074 			default:
5075 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
5076 				    fc->FcpCntl[2]);
5077 				mpt_scsi_tgt_status(mpt, 0, req,
5078 				    SCSI_STATUS_OK, 0);
5079 				return;
5080 			}
5081 		} else {
5082 			switch (fc->FcpCntl[1]) {
5083 			case 0:
5084 				tag_action = MSG_SIMPLE_Q_TAG;
5085 				break;
5086 			case 1:
5087 				tag_action = MSG_HEAD_OF_Q_TAG;
5088 				break;
5089 			case 2:
5090 				tag_action = MSG_ORDERED_Q_TAG;
5091 				break;
5092 			default:
5093 				/*
5094 				 * Bah. Ignore Untagged Queing and ACA
5095 				 */
5096 				tag_action = MSG_SIMPLE_Q_TAG;
5097 				break;
5098 			}
5099 		}
5100 		tgt->resid = be32toh(fc->FcpDl);
5101 		cdbp = fc->FcpCdb;
5102 		lunptr = fc->FcpLun;
5103 		itag = be16toh(fc->OptionalOxid);
5104 	} else if (mpt->is_sas) {
5105 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5106 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5107 		cdbp = ssp->CDB;
5108 		lunptr = ssp->LogicalUnitNumber;
5109 		itag = ssp->InitiatorTag;
5110 	} else {
5111 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5112 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5113 		cdbp = sp->CDB;
5114 		lunptr = sp->LogicalUnitNumber;
5115 		itag = sp->Tag;
5116 	}
5117 
5118 	/*
5119 	 * Generate a simple lun
5120 	 */
5121 	switch (lunptr[0] & 0xc0) {
5122 	case 0x40:
5123 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5124 		break;
5125 	case 0:
5126 		lun = lunptr[1];
5127 		break;
5128 	default:
5129 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5130 		lun = 0xffff;
5131 		break;
5132 	}
5133 
5134 	/*
5135 	 * Deal with non-enabled or bad luns here.
5136 	 */
5137 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5138 	    mpt->trt[lun].enabled == 0) {
5139 		if (mpt->twildcard) {
5140 			trtp = &mpt->trt_wildcard;
5141 		} else if (fct == MPT_NIL_TMT_VALUE) {
5142 			/*
5143 			 * In this case, we haven't got an upstream listener
5144 			 * for either a specific lun or wildcard luns. We
5145 			 * have to make some sensible response. For regular
5146 			 * inquiry, just return some NOT HERE inquiry data.
5147 			 * For VPD inquiry, report illegal field in cdb.
5148 			 * For REQUEST SENSE, just return NO SENSE data.
5149 			 * REPORT LUNS gets illegal command.
5150 			 * All other commands get 'no such device'.
5151 			 */
5152 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5153 			size_t len;
5154 
5155 			memset(buf, 0, MPT_SENSE_SIZE);
5156 			cond = SCSI_STATUS_CHECK_COND;
5157 			buf[0] = 0xf0;
5158 			buf[2] = 0x5;
5159 			buf[7] = 0x8;
5160 			sp = buf;
5161 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5162 
5163 			switch (cdbp[0]) {
5164 			case INQUIRY:
5165 			{
5166 				if (cdbp[1] != 0) {
5167 					buf[12] = 0x26;
5168 					buf[13] = 0x01;
5169 					break;
5170 				}
5171 				len = min(tgt->resid, cdbp[4]);
5172 				len = min(len, sizeof (null_iqd));
5173 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5174 				    "local inquiry %ld bytes\n", (long) len);
5175 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5176 				    null_iqd, len);
5177 				return;
5178 			}
5179 			case REQUEST_SENSE:
5180 			{
5181 				buf[2] = 0x0;
5182 				len = min(tgt->resid, cdbp[4]);
5183 				len = min(len, sizeof (buf));
5184 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5185 				    "local reqsense %ld bytes\n", (long) len);
5186 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5187 				    buf, len);
5188 				return;
5189 			}
5190 			case REPORT_LUNS:
5191 				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5192 				buf[12] = 0x26;
5193 				return;
5194 			default:
5195 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5196 				    "CMD 0x%x to unmanaged lun %u\n",
5197 				    cdbp[0], lun);
5198 				buf[12] = 0x25;
5199 				break;
5200 			}
5201 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5202 			return;
5203 		}
5204 		/* otherwise, leave trtp NULL */
5205 	} else {
5206 		trtp = &mpt->trt[lun];
5207 	}
5208 
5209 	/*
5210 	 * Deal with any task management
5211 	 */
5212 	if (fct != MPT_NIL_TMT_VALUE) {
5213 		if (trtp == NULL) {
5214 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
5215 			    fct);
5216 			mpt_scsi_tgt_status(mpt, 0, req,
5217 			    SCSI_STATUS_OK, 0);
5218 		} else {
5219 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5220 			    GET_INITIATOR_INDEX(reply_desc));
5221 		}
5222 		return;
5223 	}
5224 
5225 
5226 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5227 	if (atiop == NULL) {
5228 		mpt_lprt(mpt, MPT_PRT_WARN,
5229 		    "no ATIOs for lun %u- sending back %s\n", lun,
5230 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
5231 		mpt_scsi_tgt_status(mpt, NULL, req,
5232 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5233 		    NULL);
5234 		return;
5235 	}
5236 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5237 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5238 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5239 	atiop->ccb_h.ccb_mpt_ptr = mpt;
5240 	atiop->ccb_h.status = CAM_CDB_RECVD;
5241 	atiop->ccb_h.target_lun = lun;
5242 	atiop->sense_len = 0;
5243 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5244 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5245 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5246 
5247 	/*
5248 	 * The tag we construct here allows us to find the
5249 	 * original request that the command came in with.
5250 	 *
5251 	 * This way we don't have to depend on anything but the
5252 	 * tag to find things when CCBs show back up from CAM.
5253 	 */
5254 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5255 	tgt->tag_id = atiop->tag_id;
5256 	if (tag_action) {
5257 		atiop->tag_action = tag_action;
5258 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5259 	}
5260 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5261 		int i;
5262 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5263 		    atiop->ccb_h.target_lun);
5264 		for (i = 0; i < atiop->cdb_len; i++) {
5265 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5266 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
5267 		}
5268 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5269 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5270 	}
5271 
5272 	MPTLOCK_2_CAMLOCK(mpt);
5273 	xpt_done((union ccb *)atiop);
5274 	CAMLOCK_2_MPTLOCK(mpt);
5275 }
5276 
5277 static void
5278 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5279 {
5280 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5281 
5282 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5283 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5284 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5285 	    tgt->tag_id, tgt->state);
5286 }
5287 
5288 static void
5289 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5290 {
5291 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5292 	    req->index, req->index, req->state);
5293 	mpt_tgt_dump_tgt_state(mpt, req);
5294 }
5295 
5296 static int
5297 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5298     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5299 {
5300 	int dbg;
5301 	union ccb *ccb;
5302 	U16 status;
5303 
5304 	if (reply_frame == NULL) {
5305 		/*
5306 		 * Figure out what the state of the command is.
5307 		 */
5308 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5309 
5310 #ifdef	INVARIANTS
5311 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5312 		if (tgt->req) {
5313 			mpt_req_not_spcl(mpt, tgt->req,
5314 			    "turbo scsi_tgt_reply associated req", __LINE__);
5315 		}
5316 #endif
5317 		switch(tgt->state) {
5318 		case TGT_STATE_LOADED:
5319 			/*
5320 			 * This is a new command starting.
5321 			 */
5322 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5323 			break;
5324 		case TGT_STATE_MOVING_DATA:
5325 		{
5326 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5327 
5328 			ccb = tgt->ccb;
5329 			if (tgt->req == NULL) {
5330 				panic("mpt: turbo target reply with null "
5331 				    "associated request moving data");
5332 				/* NOTREACHED */
5333 			}
5334 			if (ccb == NULL) {
5335 				if (tgt->is_local == 0) {
5336 					panic("mpt: turbo target reply with "
5337 					    "null associated ccb moving data");
5338 					/* NOTREACHED */
5339 				}
5340 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5341 				    "TARGET_ASSIST local done\n");
5342 				TAILQ_REMOVE(&mpt->request_pending_list,
5343 				    tgt->req, links);
5344 				mpt_free_request(mpt, tgt->req);
5345 				tgt->req = NULL;
5346 				mpt_scsi_tgt_status(mpt, NULL, req,
5347 				    0, NULL);
5348 				return (TRUE);
5349 			}
5350 			tgt->ccb = NULL;
5351 			tgt->nxfers++;
5352 			mpt_req_untimeout(req, mpt_timeout, ccb);
5353 			mpt_lprt(mpt, MPT_PRT_DEBUG,
5354 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5355 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5356 			/*
5357 			 * Free the Target Assist Request
5358 			 */
5359 			KASSERT(tgt->req->ccb == ccb,
5360 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5361 			    tgt->req->serno, tgt->req->ccb));
5362 			TAILQ_REMOVE(&mpt->request_pending_list,
5363 			    tgt->req, links);
5364 			mpt_free_request(mpt, tgt->req);
5365 			tgt->req = NULL;
5366 
5367 			/*
5368 			 * Do we need to send status now? That is, are
5369 			 * we done with all our data transfers?
5370 			 */
5371 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5372 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5373 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5374 				KASSERT(ccb->ccb_h.status,
5375 				    ("zero ccb sts at %d\n", __LINE__));
5376 				tgt->state = TGT_STATE_IN_CAM;
5377 				if (mpt->outofbeer) {
5378 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5379 					mpt->outofbeer = 0;
5380 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5381 				}
5382 				MPTLOCK_2_CAMLOCK(mpt);
5383 				xpt_done(ccb);
5384 				CAMLOCK_2_MPTLOCK(mpt);
5385 				break;
5386 			}
5387 			/*
5388 			 * Otherwise, send status (and sense)
5389 			 */
5390 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5391 				sp = sense;
5392 				memcpy(sp, &ccb->csio.sense_data,
5393 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5394 			}
5395 			mpt_scsi_tgt_status(mpt, ccb, req,
5396 			    ccb->csio.scsi_status, sp);
5397 			break;
5398 		}
5399 		case TGT_STATE_SENDING_STATUS:
5400 		case TGT_STATE_MOVING_DATA_AND_STATUS:
5401 		{
5402 			int ioindex;
5403 			ccb = tgt->ccb;
5404 
5405 			if (tgt->req == NULL) {
5406 				panic("mpt: turbo target reply with null "
5407 				    "associated request sending status");
5408 				/* NOTREACHED */
5409 			}
5410 
5411 			if (ccb) {
5412 				tgt->ccb = NULL;
5413 				if (tgt->state ==
5414 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5415 					tgt->nxfers++;
5416 				}
5417 				mpt_req_untimeout(req, mpt_timeout, ccb);
5418 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5419 					ccb->ccb_h.status |= CAM_SENT_SENSE;
5420 				}
5421 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5422 				    "TARGET_STATUS tag %x sts %x flgs %x req "
5423 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5424 				    ccb->ccb_h.flags, tgt->req);
5425 				/*
5426 				 * Free the Target Send Status Request
5427 				 */
5428 				KASSERT(tgt->req->ccb == ccb,
5429 				    ("tgt->req %p:%u tgt->req->ccb %p",
5430 				    tgt->req, tgt->req->serno, tgt->req->ccb));
5431 				/*
5432 				 * Notify CAM that we're done
5433 				 */
5434 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5435 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5436 				KASSERT(ccb->ccb_h.status,
5437 				    ("ZERO ccb sts at %d\n", __LINE__));
5438 				tgt->ccb = NULL;
5439 			} else {
5440 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5441 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
5442 				    tgt->req, tgt->req->serno);
5443 			}
5444 			TAILQ_REMOVE(&mpt->request_pending_list,
5445 			    tgt->req, links);
5446 			mpt_free_request(mpt, tgt->req);
5447 			tgt->req = NULL;
5448 
5449 			/*
5450 			 * And re-post the Command Buffer.
5451 			 * This will reset the state.
5452 			 */
5453 			ioindex = GET_IO_INDEX(reply_desc);
5454 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5455 			tgt->is_local = 0;
5456 			mpt_post_target_command(mpt, req, ioindex);
5457 
5458 			/*
5459 			 * And post a done for anyone who cares
5460 			 */
5461 			if (ccb) {
5462 				if (mpt->outofbeer) {
5463 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5464 					mpt->outofbeer = 0;
5465 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5466 				}
5467 				MPTLOCK_2_CAMLOCK(mpt);
5468 				xpt_done(ccb);
5469 				CAMLOCK_2_MPTLOCK(mpt);
5470 			}
5471 			break;
5472 		}
5473 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5474 			tgt->state = TGT_STATE_LOADED;
5475 			break;
5476 		default:
5477 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5478 			    "Reply Function\n", tgt->state);
5479 		}
5480 		return (TRUE);
5481 	}
5482 
5483 	status = le16toh(reply_frame->IOCStatus);
5484 	if (status != MPI_IOCSTATUS_SUCCESS) {
5485 		dbg = MPT_PRT_ERROR;
5486 	} else {
5487 		dbg = MPT_PRT_DEBUG1;
5488 	}
5489 
5490 	mpt_lprt(mpt, dbg,
5491 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5492 	     req, req->serno, reply_frame, reply_frame->Function, status);
5493 
5494 	switch (reply_frame->Function) {
5495 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5496 	{
5497 		mpt_tgt_state_t *tgt;
5498 #ifdef	INVARIANTS
5499 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5500 #endif
5501 		if (status != MPI_IOCSTATUS_SUCCESS) {
5502 			/*
5503 			 * XXX What to do?
5504 			 */
5505 			break;
5506 		}
5507 		tgt = MPT_TGT_STATE(mpt, req);
5508 		KASSERT(tgt->state == TGT_STATE_LOADING,
5509 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
5510 		mpt_assign_serno(mpt, req);
5511 		tgt->state = TGT_STATE_LOADED;
5512 		break;
5513 	}
5514 	case MPI_FUNCTION_TARGET_ASSIST:
5515 #ifdef	INVARIANTS
5516 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5517 #endif
5518 		mpt_prt(mpt, "target assist completion\n");
5519 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5520 		mpt_free_request(mpt, req);
5521 		break;
5522 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5523 #ifdef	INVARIANTS
5524 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5525 #endif
5526 		mpt_prt(mpt, "status send completion\n");
5527 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5528 		mpt_free_request(mpt, req);
5529 		break;
5530 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5531 	{
5532 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5533 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5534 		PTR_MSG_TARGET_MODE_ABORT abtp =
5535 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5536 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5537 #ifdef	INVARIANTS
5538 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5539 #endif
5540 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5541 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5542 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5543 		mpt_free_request(mpt, req);
5544 		break;
5545 	}
5546 	default:
5547 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5548 		    "0x%x\n", reply_frame->Function);
5549 		break;
5550 	}
5551 	return (TRUE);
5552 }
5553