xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 70e0bbedef95258a4dadc996d641a9bebd3f107d)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
108 #if __FreeBSD_version >= 500000
109 #include <sys/sysctl.h>
110 #endif
111 #include <sys/callout.h>
112 #include <sys/kthread.h>
113 
114 #if __FreeBSD_version >= 700025
115 #ifndef	CAM_NEW_TRAN_CODE
116 #define	CAM_NEW_TRAN_CODE	1
117 #endif
118 #endif
119 
120 static void mpt_poll(struct cam_sim *);
121 static timeout_t mpt_timeout;
122 static void mpt_action(struct cam_sim *, union ccb *);
123 static int
124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
125 static void mpt_setwidth(struct mpt_softc *, int, int);
126 static void mpt_setsync(struct mpt_softc *, int, int, int);
127 static int mpt_update_spi_config(struct mpt_softc *, int);
128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
129 
130 static mpt_reply_handler_t mpt_scsi_reply_handler;
131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
132 static mpt_reply_handler_t mpt_fc_els_reply_handler;
133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
134 					MSG_DEFAULT_REPLY *);
135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
136 static int mpt_fc_reset_link(struct mpt_softc *, int);
137 
138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
140 static void mpt_recovery_thread(void *arg);
141 static void mpt_recover_commands(struct mpt_softc *mpt);
142 
143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
144     u_int, u_int, u_int, int);
145 
146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
148 static int mpt_add_els_buffers(struct mpt_softc *mpt);
149 static int mpt_add_target_commands(struct mpt_softc *mpt);
150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
152 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
156     uint8_t, uint8_t const *);
157 static void
158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
159     tgt_resource_t *, int);
160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
163 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
164 
165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
169 
170 static mpt_probe_handler_t	mpt_cam_probe;
171 static mpt_attach_handler_t	mpt_cam_attach;
172 static mpt_enable_handler_t	mpt_cam_enable;
173 static mpt_ready_handler_t	mpt_cam_ready;
174 static mpt_event_handler_t	mpt_cam_event;
175 static mpt_reset_handler_t	mpt_cam_ioc_reset;
176 static mpt_detach_handler_t	mpt_cam_detach;
177 
178 static struct mpt_personality mpt_cam_personality =
179 {
180 	.name		= "mpt_cam",
181 	.probe		= mpt_cam_probe,
182 	.attach		= mpt_cam_attach,
183 	.enable		= mpt_cam_enable,
184 	.ready		= mpt_cam_ready,
185 	.event		= mpt_cam_event,
186 	.reset		= mpt_cam_ioc_reset,
187 	.detach		= mpt_cam_detach,
188 };
189 
190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
192 
193 int mpt_enable_sata_wc = -1;
194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
195 
196 static int
197 mpt_cam_probe(struct mpt_softc *mpt)
198 {
199 	int role;
200 
201 	/*
202 	 * Only attach to nodes that support the initiator or target role
203 	 * (or want to) or have RAID physical devices that need CAM pass-thru
204 	 * support.
205 	 */
206 	if (mpt->do_cfg_role) {
207 		role = mpt->cfg_role;
208 	} else {
209 		role = mpt->role;
210 	}
211 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
212 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
213 		return (0);
214 	}
215 	return (ENODEV);
216 }
217 
218 static int
219 mpt_cam_attach(struct mpt_softc *mpt)
220 {
221 	struct cam_devq *devq;
222 	mpt_handler_t	 handler;
223 	int		 maxq;
224 	int		 error;
225 
226 	MPT_LOCK(mpt);
227 	TAILQ_INIT(&mpt->request_timeout_list);
228 	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
229 	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
230 
231 	handler.reply_handler = mpt_scsi_reply_handler;
232 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 				     &scsi_io_handler_id);
234 	if (error != 0) {
235 		MPT_UNLOCK(mpt);
236 		goto cleanup;
237 	}
238 
239 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
240 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
241 				     &scsi_tmf_handler_id);
242 	if (error != 0) {
243 		MPT_UNLOCK(mpt);
244 		goto cleanup;
245 	}
246 
247 	/*
248 	 * If we're fibre channel and could support target mode, we register
249 	 * an ELS reply handler and give it resources.
250 	 */
251 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
252 		handler.reply_handler = mpt_fc_els_reply_handler;
253 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
254 		    &fc_els_handler_id);
255 		if (error != 0) {
256 			MPT_UNLOCK(mpt);
257 			goto cleanup;
258 		}
259 		if (mpt_add_els_buffers(mpt) == FALSE) {
260 			error = ENOMEM;
261 			MPT_UNLOCK(mpt);
262 			goto cleanup;
263 		}
264 		maxq -= mpt->els_cmds_allocated;
265 	}
266 
267 	/*
268 	 * If we support target mode, we register a reply handler for it,
269 	 * but don't add command resources until we actually enable target
270 	 * mode.
271 	 */
272 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
273 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
274 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
275 		    &mpt->scsi_tgt_handler_id);
276 		if (error != 0) {
277 			MPT_UNLOCK(mpt);
278 			goto cleanup;
279 		}
280 	}
281 
282 	if (mpt->is_sas) {
283 		handler.reply_handler = mpt_sata_pass_reply_handler;
284 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
285 		    &sata_pass_handler_id);
286 		if (error != 0) {
287 			MPT_UNLOCK(mpt);
288 			goto cleanup;
289 		}
290 	}
291 
292 	/*
293 	 * We keep one request reserved for timeout TMF requests.
294 	 */
295 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
296 	if (mpt->tmf_req == NULL) {
297 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
298 		error = ENOMEM;
299 		MPT_UNLOCK(mpt);
300 		goto cleanup;
301 	}
302 
303 	/*
304 	 * Mark the request as free even though not on the free list.
305 	 * There is only one TMF request allowed to be outstanding at
306 	 * a time and the TMF routines perform their own allocation
307 	 * tracking using the standard state flags.
308 	 */
309 	mpt->tmf_req->state = REQ_STATE_FREE;
310 	maxq--;
311 
312 	/*
313 	 * The rest of this is CAM foo, for which we need to drop our lock
314 	 */
315 	MPT_UNLOCK(mpt);
316 
317 	if (mpt_spawn_recovery_thread(mpt) != 0) {
318 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
319 		error = ENOMEM;
320 		goto cleanup;
321 	}
322 
323 	/*
324 	 * Create the device queue for our SIM(s).
325 	 */
326 	devq = cam_simq_alloc(maxq);
327 	if (devq == NULL) {
328 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
329 		error = ENOMEM;
330 		goto cleanup;
331 	}
332 
333 	/*
334 	 * Construct our SIM entry.
335 	 */
336 	mpt->sim =
337 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
338 	if (mpt->sim == NULL) {
339 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
340 		cam_simq_free(devq);
341 		error = ENOMEM;
342 		goto cleanup;
343 	}
344 
345 	/*
346 	 * Register exactly this bus.
347 	 */
348 	MPT_LOCK(mpt);
349 	if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
350 		mpt_prt(mpt, "Bus registration Failed!\n");
351 		error = ENOMEM;
352 		MPT_UNLOCK(mpt);
353 		goto cleanup;
354 	}
355 
356 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
357 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
358 		mpt_prt(mpt, "Unable to allocate Path!\n");
359 		error = ENOMEM;
360 		MPT_UNLOCK(mpt);
361 		goto cleanup;
362 	}
363 	MPT_UNLOCK(mpt);
364 
365 	/*
366 	 * Only register a second bus for RAID physical
367 	 * devices if the controller supports RAID.
368 	 */
369 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
370 		return (0);
371 	}
372 
373 	/*
374 	 * Create a "bus" to export all hidden disks to CAM.
375 	 */
376 	mpt->phydisk_sim =
377 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
378 	if (mpt->phydisk_sim == NULL) {
379 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
380 		error = ENOMEM;
381 		goto cleanup;
382 	}
383 
384 	/*
385 	 * Register this bus.
386 	 */
387 	MPT_LOCK(mpt);
388 	if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
389 	    CAM_SUCCESS) {
390 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
391 		error = ENOMEM;
392 		MPT_UNLOCK(mpt);
393 		goto cleanup;
394 	}
395 
396 	if (xpt_create_path(&mpt->phydisk_path, NULL,
397 	    cam_sim_path(mpt->phydisk_sim),
398 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
399 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
400 		error = ENOMEM;
401 		MPT_UNLOCK(mpt);
402 		goto cleanup;
403 	}
404 	MPT_UNLOCK(mpt);
405 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
406 	return (0);
407 
408 cleanup:
409 	mpt_cam_detach(mpt);
410 	return (error);
411 }
412 
413 /*
414  * Read FC configuration information
415  */
416 static int
417 mpt_read_config_info_fc(struct mpt_softc *mpt)
418 {
419 	char *topology = NULL;
420 	int rv;
421 
422 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
423 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
424 	if (rv) {
425 		return (-1);
426 	}
427 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
428 		 mpt->mpt_fcport_page0.Header.PageVersion,
429 		 mpt->mpt_fcport_page0.Header.PageLength,
430 		 mpt->mpt_fcport_page0.Header.PageNumber,
431 		 mpt->mpt_fcport_page0.Header.PageType);
432 
433 
434 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
435 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
436 	if (rv) {
437 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
438 		return (-1);
439 	}
440 	mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
441 
442 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
443 
444 	switch (mpt->mpt_fcport_page0.Flags &
445 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
446 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
447 		mpt->mpt_fcport_speed = 0;
448 		topology = "<NO LOOP>";
449 		break;
450 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
451 		topology = "N-Port";
452 		break;
453 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
454 		topology = "NL-Port";
455 		break;
456 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
457 		topology = "F-Port";
458 		break;
459 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
460 		topology = "FL-Port";
461 		break;
462 	default:
463 		mpt->mpt_fcport_speed = 0;
464 		topology = "?";
465 		break;
466 	}
467 
468 	mpt_lprt(mpt, MPT_PRT_INFO,
469 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
470 	    "Speed %u-Gbit\n", topology,
471 	    mpt->mpt_fcport_page0.WWNN.High,
472 	    mpt->mpt_fcport_page0.WWNN.Low,
473 	    mpt->mpt_fcport_page0.WWPN.High,
474 	    mpt->mpt_fcport_page0.WWPN.Low,
475 	    mpt->mpt_fcport_speed);
476 #if __FreeBSD_version >= 500000
477 	MPT_UNLOCK(mpt);
478 	{
479 		struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
480 		struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
481 
482 		snprintf(mpt->scinfo.fc.wwnn,
483 		    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
484 		    mpt->mpt_fcport_page0.WWNN.High,
485 		    mpt->mpt_fcport_page0.WWNN.Low);
486 
487 		snprintf(mpt->scinfo.fc.wwpn,
488 		    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
489 		    mpt->mpt_fcport_page0.WWPN.High,
490 		    mpt->mpt_fcport_page0.WWPN.Low);
491 
492 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
493 		       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
494 		       "World Wide Node Name");
495 
496 		SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
497 		       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
498 		       "World Wide Port Name");
499 
500 	}
501 	MPT_LOCK(mpt);
502 #endif
503 	return (0);
504 }
505 
506 /*
507  * Set FC configuration information.
508  */
509 static int
510 mpt_set_initial_config_fc(struct mpt_softc *mpt)
511 {
512 	CONFIG_PAGE_FC_PORT_1 fc;
513 	U32 fl;
514 	int r, doit = 0;
515 	int role;
516 
517 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
518 	    &fc.Header, FALSE, 5000);
519 	if (r) {
520 		mpt_prt(mpt, "failed to read FC page 1 header\n");
521 		return (mpt_fc_reset_link(mpt, 1));
522 	}
523 
524 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
525 	    &fc.Header, sizeof (fc), FALSE, 5000);
526 	if (r) {
527 		mpt_prt(mpt, "failed to read FC page 1\n");
528 		return (mpt_fc_reset_link(mpt, 1));
529 	}
530 	mpt2host_config_page_fc_port_1(&fc);
531 
532 	/*
533 	 * Check our flags to make sure we support the role we want.
534 	 */
535 	doit = 0;
536 	role = 0;
537 	fl = fc.Flags;
538 
539 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
540 		role |= MPT_ROLE_INITIATOR;
541 	}
542 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
543 		role |= MPT_ROLE_TARGET;
544 	}
545 
546 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
547 
548 	if (mpt->do_cfg_role == 0) {
549 		role = mpt->cfg_role;
550 	} else {
551 		mpt->do_cfg_role = 0;
552 	}
553 
554 	if (role != mpt->cfg_role) {
555 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
556 			if ((role & MPT_ROLE_INITIATOR) == 0) {
557 				mpt_prt(mpt, "adding initiator role\n");
558 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
559 				doit++;
560 			} else {
561 				mpt_prt(mpt, "keeping initiator role\n");
562 			}
563 		} else if (role & MPT_ROLE_INITIATOR) {
564 			mpt_prt(mpt, "removing initiator role\n");
565 			doit++;
566 		}
567 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
568 			if ((role & MPT_ROLE_TARGET) == 0) {
569 				mpt_prt(mpt, "adding target role\n");
570 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
571 				doit++;
572 			} else {
573 				mpt_prt(mpt, "keeping target role\n");
574 			}
575 		} else if (role & MPT_ROLE_TARGET) {
576 			mpt_prt(mpt, "removing target role\n");
577 			doit++;
578 		}
579 		mpt->role = mpt->cfg_role;
580 	}
581 
582 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
583 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
584 			mpt_prt(mpt, "adding OXID option\n");
585 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
586 			doit++;
587 		}
588 	}
589 
590 	if (doit) {
591 		fc.Flags = fl;
592 		host2mpt_config_page_fc_port_1(&fc);
593 		r = mpt_write_cfg_page(mpt,
594 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
595 		    sizeof(fc), FALSE, 5000);
596 		if (r != 0) {
597 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
598 			return (0);
599 		}
600 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
601 		    "effect until next reboot or IOC reset\n");
602 	}
603 	return (0);
604 }
605 
606 static int
607 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
608 {
609 	ConfigExtendedPageHeader_t hdr;
610 	struct mptsas_phyinfo *phyinfo;
611 	SasIOUnitPage0_t *buffer;
612 	int error, len, i;
613 
614 	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
615 				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
616 				       &hdr, 0, 10000);
617 	if (error)
618 		goto out;
619 	if (hdr.ExtPageLength == 0) {
620 		error = ENXIO;
621 		goto out;
622 	}
623 
624 	len = hdr.ExtPageLength * 4;
625 	buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
626 	if (buffer == NULL) {
627 		error = ENOMEM;
628 		goto out;
629 	}
630 
631 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
632 				     0, &hdr, buffer, len, 0, 10000);
633 	if (error) {
634 		free(buffer, M_DEVBUF);
635 		goto out;
636 	}
637 
638 	portinfo->num_phys = buffer->NumPhys;
639 	portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
640 	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
641 	if (portinfo->phy_info == NULL) {
642 		free(buffer, M_DEVBUF);
643 		error = ENOMEM;
644 		goto out;
645 	}
646 
647 	for (i = 0; i < portinfo->num_phys; i++) {
648 		phyinfo = &portinfo->phy_info[i];
649 		phyinfo->phy_num = i;
650 		phyinfo->port_id = buffer->PhyData[i].Port;
651 		phyinfo->negotiated_link_rate =
652 		    buffer->PhyData[i].NegotiatedLinkRate;
653 		phyinfo->handle =
654 		    le16toh(buffer->PhyData[i].ControllerDevHandle);
655 	}
656 
657 	free(buffer, M_DEVBUF);
658 out:
659 	return (error);
660 }
661 
662 static int
663 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
664 	uint32_t form, uint32_t form_specific)
665 {
666 	ConfigExtendedPageHeader_t hdr;
667 	SasPhyPage0_t *buffer;
668 	int error;
669 
670 	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
671 				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
672 				       0, 10000);
673 	if (error)
674 		goto out;
675 	if (hdr.ExtPageLength == 0) {
676 		error = ENXIO;
677 		goto out;
678 	}
679 
680 	buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
681 	if (buffer == NULL) {
682 		error = ENOMEM;
683 		goto out;
684 	}
685 
686 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
687 				     form + form_specific, &hdr, buffer,
688 				     sizeof(SasPhyPage0_t), 0, 10000);
689 	if (error) {
690 		free(buffer, M_DEVBUF);
691 		goto out;
692 	}
693 
694 	phy_info->hw_link_rate = buffer->HwLinkRate;
695 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
696 	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
697 	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
698 
699 	free(buffer, M_DEVBUF);
700 out:
701 	return (error);
702 }
703 
704 static int
705 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
706 	uint32_t form, uint32_t form_specific)
707 {
708 	ConfigExtendedPageHeader_t hdr;
709 	SasDevicePage0_t *buffer;
710 	uint64_t sas_address;
711 	int error = 0;
712 
713 	bzero(device_info, sizeof(*device_info));
714 	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
715 				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
716 				       &hdr, 0, 10000);
717 	if (error)
718 		goto out;
719 	if (hdr.ExtPageLength == 0) {
720 		error = ENXIO;
721 		goto out;
722 	}
723 
724 	buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
725 	if (buffer == NULL) {
726 		error = ENOMEM;
727 		goto out;
728 	}
729 
730 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
731 				     form + form_specific, &hdr, buffer,
732 				     sizeof(SasDevicePage0_t), 0, 10000);
733 	if (error) {
734 		free(buffer, M_DEVBUF);
735 		goto out;
736 	}
737 
738 	device_info->dev_handle = le16toh(buffer->DevHandle);
739 	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
740 	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
741 	device_info->slot = le16toh(buffer->Slot);
742 	device_info->phy_num = buffer->PhyNum;
743 	device_info->physical_port = buffer->PhysicalPort;
744 	device_info->target_id = buffer->TargetID;
745 	device_info->bus = buffer->Bus;
746 	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
747 	device_info->sas_address = le64toh(sas_address);
748 	device_info->device_info = le32toh(buffer->DeviceInfo);
749 
750 	free(buffer, M_DEVBUF);
751 out:
752 	return (error);
753 }
754 
755 /*
756  * Read SAS configuration information. Nothing to do yet.
757  */
758 static int
759 mpt_read_config_info_sas(struct mpt_softc *mpt)
760 {
761 	struct mptsas_portinfo *portinfo;
762 	struct mptsas_phyinfo *phyinfo;
763 	int error, i;
764 
765 	portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
766 	if (portinfo == NULL)
767 		return (ENOMEM);
768 
769 	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
770 	if (error) {
771 		free(portinfo, M_DEVBUF);
772 		return (0);
773 	}
774 
775 	for (i = 0; i < portinfo->num_phys; i++) {
776 		phyinfo = &portinfo->phy_info[i];
777 		error = mptsas_sas_phy_pg0(mpt, phyinfo,
778 		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
779 		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
780 		if (error)
781 			break;
782 		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
783 		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
784 		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
785 		    phyinfo->handle);
786 		if (error)
787 			break;
788 		phyinfo->identify.phy_num = phyinfo->phy_num = i;
789 		if (phyinfo->attached.dev_handle)
790 			error = mptsas_sas_device_pg0(mpt,
791 			    &phyinfo->attached,
792 			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
793 			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
794 			    phyinfo->attached.dev_handle);
795 		if (error)
796 			break;
797 	}
798 	mpt->sas_portinfo = portinfo;
799 	return (0);
800 }
801 
802 static void
803 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
804 	int enabled)
805 {
806 	SataPassthroughRequest_t	*pass;
807 	request_t *req;
808 	int error, status;
809 
810 	req = mpt_get_request(mpt, 0);
811 	if (req == NULL)
812 		return;
813 
814 	pass = req->req_vbuf;
815 	bzero(pass, sizeof(SataPassthroughRequest_t));
816 	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
817 	pass->TargetID = devinfo->target_id;
818 	pass->Bus = devinfo->bus;
819 	pass->PassthroughFlags = 0;
820 	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
821 	pass->DataLength = 0;
822 	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
823 	pass->CommandFIS[0] = 0x27;
824 	pass->CommandFIS[1] = 0x80;
825 	pass->CommandFIS[2] = 0xef;
826 	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
827 	pass->CommandFIS[7] = 0x40;
828 	pass->CommandFIS[15] = 0x08;
829 
830 	mpt_check_doorbell(mpt);
831 	mpt_send_cmd(mpt, req);
832 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
833 			     10 * 1000);
834 	if (error) {
835 		mpt_free_request(mpt, req);
836 		printf("error %d sending passthrough\n", error);
837 		return;
838 	}
839 
840 	status = le16toh(req->IOCStatus);
841 	if (status != MPI_IOCSTATUS_SUCCESS) {
842 		mpt_free_request(mpt, req);
843 		printf("IOCSTATUS %d\n", status);
844 		return;
845 	}
846 
847 	mpt_free_request(mpt, req);
848 }
849 
850 /*
851  * Set SAS configuration information. Nothing to do yet.
852  */
853 static int
854 mpt_set_initial_config_sas(struct mpt_softc *mpt)
855 {
856 	struct mptsas_phyinfo *phyinfo;
857 	int i;
858 
859 	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
860 		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
861 			phyinfo = &mpt->sas_portinfo->phy_info[i];
862 			if (phyinfo->attached.dev_handle == 0)
863 				continue;
864 			if ((phyinfo->attached.device_info &
865 			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
866 				continue;
867 			if (bootverbose)
868 				device_printf(mpt->dev,
869 				    "%sabling SATA WC on phy %d\n",
870 				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
871 			mptsas_set_sata_wc(mpt, &phyinfo->attached,
872 					   mpt_enable_sata_wc);
873 		}
874 	}
875 
876 	return (0);
877 }
878 
879 static int
880 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
881  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
882 {
883 
884 	if (req != NULL) {
885 		if (reply_frame != NULL) {
886 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
887 		}
888 		req->state &= ~REQ_STATE_QUEUED;
889 		req->state |= REQ_STATE_DONE;
890 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
891 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
892 			wakeup(req);
893 		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
894 			/*
895 			 * Whew- we can free this request (late completion)
896 			 */
897 			mpt_free_request(mpt, req);
898 		}
899 	}
900 
901 	return (TRUE);
902 }
903 
904 /*
905  * Read SCSI configuration information
906  */
907 static int
908 mpt_read_config_info_spi(struct mpt_softc *mpt)
909 {
910 	int rv, i;
911 
912 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
913 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
914 	if (rv) {
915 		return (-1);
916 	}
917 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
918 	    mpt->mpt_port_page0.Header.PageVersion,
919 	    mpt->mpt_port_page0.Header.PageLength,
920 	    mpt->mpt_port_page0.Header.PageNumber,
921 	    mpt->mpt_port_page0.Header.PageType);
922 
923 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
924 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
925 	if (rv) {
926 		return (-1);
927 	}
928 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
929 	    mpt->mpt_port_page1.Header.PageVersion,
930 	    mpt->mpt_port_page1.Header.PageLength,
931 	    mpt->mpt_port_page1.Header.PageNumber,
932 	    mpt->mpt_port_page1.Header.PageType);
933 
934 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
935 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
936 	if (rv) {
937 		return (-1);
938 	}
939 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
940 	    mpt->mpt_port_page2.Header.PageVersion,
941 	    mpt->mpt_port_page2.Header.PageLength,
942 	    mpt->mpt_port_page2.Header.PageNumber,
943 	    mpt->mpt_port_page2.Header.PageType);
944 
945 	for (i = 0; i < 16; i++) {
946 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
947 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
948 		if (rv) {
949 			return (-1);
950 		}
951 		mpt_lprt(mpt, MPT_PRT_DEBUG,
952 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
953 		    mpt->mpt_dev_page0[i].Header.PageVersion,
954 		    mpt->mpt_dev_page0[i].Header.PageLength,
955 		    mpt->mpt_dev_page0[i].Header.PageNumber,
956 		    mpt->mpt_dev_page0[i].Header.PageType);
957 
958 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
959 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
960 		if (rv) {
961 			return (-1);
962 		}
963 		mpt_lprt(mpt, MPT_PRT_DEBUG,
964 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
965 		    mpt->mpt_dev_page1[i].Header.PageVersion,
966 		    mpt->mpt_dev_page1[i].Header.PageLength,
967 		    mpt->mpt_dev_page1[i].Header.PageNumber,
968 		    mpt->mpt_dev_page1[i].Header.PageType);
969 	}
970 
971 	/*
972 	 * At this point, we don't *have* to fail. As long as we have
973 	 * valid config header information, we can (barely) lurch
974 	 * along.
975 	 */
976 
977 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
978 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
979 	if (rv) {
980 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
981 	} else {
982 		mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
983 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
984 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
985 		    mpt->mpt_port_page0.Capabilities,
986 		    mpt->mpt_port_page0.PhysicalInterface);
987 	}
988 
989 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
990 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
991 	if (rv) {
992 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
993 	} else {
994 		mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
995 		mpt_lprt(mpt, MPT_PRT_DEBUG,
996 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
997 		    mpt->mpt_port_page1.Configuration,
998 		    mpt->mpt_port_page1.OnBusTimerValue);
999 	}
1000 
1001 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1002 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
1003 	if (rv) {
1004 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1005 	} else {
1006 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1007 		    "Port Page 2: Flags %x Settings %x\n",
1008 		    mpt->mpt_port_page2.PortFlags,
1009 		    mpt->mpt_port_page2.PortSettings);
1010 		mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1011 		for (i = 0; i < 16; i++) {
1012 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1013 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1014 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1015 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1016 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1017 		}
1018 	}
1019 
1020 	for (i = 0; i < 16; i++) {
1021 		rv = mpt_read_cur_cfg_page(mpt, i,
1022 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1023 		    FALSE, 5000);
1024 		if (rv) {
1025 			mpt_prt(mpt,
1026 			    "cannot read SPI Target %d Device Page 0\n", i);
1027 			continue;
1028 		}
1029 		mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1030 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1031 		    "target %d page 0: Negotiated Params %x Information %x\n",
1032 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1033 		    mpt->mpt_dev_page0[i].Information);
1034 
1035 		rv = mpt_read_cur_cfg_page(mpt, i,
1036 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1037 		    FALSE, 5000);
1038 		if (rv) {
1039 			mpt_prt(mpt,
1040 			    "cannot read SPI Target %d Device Page 1\n", i);
1041 			continue;
1042 		}
1043 		mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1044 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1045 		    "target %d page 1: Requested Params %x Configuration %x\n",
1046 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
1047 		    mpt->mpt_dev_page1[i].Configuration);
1048 	}
1049 	return (0);
1050 }
1051 
1052 /*
1053  * Validate SPI configuration information.
1054  *
1055  * In particular, validate SPI Port Page 1.
1056  */
1057 static int
1058 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1059 {
1060 	int error, i, pp1val;
1061 
1062 	mpt->mpt_disc_enable = 0xff;
1063 	mpt->mpt_tag_enable = 0;
1064 
1065 	pp1val = ((1 << mpt->mpt_ini_id) <<
1066 	    MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1067 	if (mpt->mpt_port_page1.Configuration != pp1val) {
1068 		CONFIG_PAGE_SCSI_PORT_1 tmp;
1069 
1070 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1071 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1072 		tmp = mpt->mpt_port_page1;
1073 		tmp.Configuration = pp1val;
1074 		host2mpt_config_page_scsi_port_1(&tmp);
1075 		error = mpt_write_cur_cfg_page(mpt, 0,
1076 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1077 		if (error) {
1078 			return (-1);
1079 		}
1080 		error = mpt_read_cur_cfg_page(mpt, 0,
1081 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1082 		if (error) {
1083 			return (-1);
1084 		}
1085 		mpt2host_config_page_scsi_port_1(&tmp);
1086 		if (tmp.Configuration != pp1val) {
1087 			mpt_prt(mpt,
1088 			    "failed to reset SPI Port Page 1 Config value\n");
1089 			return (-1);
1090 		}
1091 		mpt->mpt_port_page1 = tmp;
1092 	}
1093 
1094 	/*
1095 	 * The purpose of this exercise is to get
1096 	 * all targets back to async/narrow.
1097 	 *
1098 	 * We skip this step if the BIOS has already negotiated
1099 	 * speeds with the targets.
1100 	 */
1101 	i = mpt->mpt_port_page2.PortSettings &
1102 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1103 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1104 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1105 		    "honoring BIOS transfer negotiations\n");
1106 	} else {
1107 		for (i = 0; i < 16; i++) {
1108 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
1109 			mpt->mpt_dev_page1[i].Configuration = 0;
1110 			(void) mpt_update_spi_config(mpt, i);
1111 		}
1112 	}
1113 	return (0);
1114 }
1115 
1116 static int
1117 mpt_cam_enable(struct mpt_softc *mpt)
1118 {
1119 	int error;
1120 
1121 	MPT_LOCK(mpt);
1122 
1123 	error = EIO;
1124 	if (mpt->is_fc) {
1125 		if (mpt_read_config_info_fc(mpt)) {
1126 			goto out;
1127 		}
1128 		if (mpt_set_initial_config_fc(mpt)) {
1129 			goto out;
1130 		}
1131 	} else if (mpt->is_sas) {
1132 		if (mpt_read_config_info_sas(mpt)) {
1133 			goto out;
1134 		}
1135 		if (mpt_set_initial_config_sas(mpt)) {
1136 			goto out;
1137 		}
1138 	} else if (mpt->is_spi) {
1139 		if (mpt_read_config_info_spi(mpt)) {
1140 			goto out;
1141 		}
1142 		if (mpt_set_initial_config_spi(mpt)) {
1143 			goto out;
1144 		}
1145 	}
1146 	error = 0;
1147 
1148 out:
1149 	MPT_UNLOCK(mpt);
1150 	return (error);
1151 }
1152 
1153 static void
1154 mpt_cam_ready(struct mpt_softc *mpt)
1155 {
1156 
1157 	/*
1158 	 * If we're in target mode, hang out resources now
1159 	 * so we don't cause the world to hang talking to us.
1160 	 */
1161 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1162 		/*
1163 		 * Try to add some target command resources
1164 		 */
1165 		MPT_LOCK(mpt);
1166 		if (mpt_add_target_commands(mpt) == FALSE) {
1167 			mpt_prt(mpt, "failed to add target commands\n");
1168 		}
1169 		MPT_UNLOCK(mpt);
1170 	}
1171 	mpt->ready = 1;
1172 }
1173 
1174 static void
1175 mpt_cam_detach(struct mpt_softc *mpt)
1176 {
1177 	mpt_handler_t handler;
1178 
1179 	MPT_LOCK(mpt);
1180 	mpt->ready = 0;
1181 	mpt_terminate_recovery_thread(mpt);
1182 
1183 	handler.reply_handler = mpt_scsi_reply_handler;
1184 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1185 			       scsi_io_handler_id);
1186 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
1187 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1188 			       scsi_tmf_handler_id);
1189 	handler.reply_handler = mpt_fc_els_reply_handler;
1190 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1191 			       fc_els_handler_id);
1192 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
1193 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1194 			       mpt->scsi_tgt_handler_id);
1195 	handler.reply_handler = mpt_sata_pass_reply_handler;
1196 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1197 			       sata_pass_handler_id);
1198 
1199 	if (mpt->tmf_req != NULL) {
1200 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1201 		mpt_free_request(mpt, mpt->tmf_req);
1202 		mpt->tmf_req = NULL;
1203 	}
1204 	if (mpt->sas_portinfo != NULL) {
1205 		free(mpt->sas_portinfo, M_DEVBUF);
1206 		mpt->sas_portinfo = NULL;
1207 	}
1208 
1209 	if (mpt->sim != NULL) {
1210 		xpt_free_path(mpt->path);
1211 		xpt_bus_deregister(cam_sim_path(mpt->sim));
1212 		cam_sim_free(mpt->sim, TRUE);
1213 		mpt->sim = NULL;
1214 	}
1215 
1216 	if (mpt->phydisk_sim != NULL) {
1217 		xpt_free_path(mpt->phydisk_path);
1218 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1219 		cam_sim_free(mpt->phydisk_sim, TRUE);
1220 		mpt->phydisk_sim = NULL;
1221 	}
1222 	MPT_UNLOCK(mpt);
1223 }
1224 
1225 /* This routine is used after a system crash to dump core onto the swap device.
1226  */
1227 static void
1228 mpt_poll(struct cam_sim *sim)
1229 {
1230 	struct mpt_softc *mpt;
1231 
1232 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1233 	mpt_intr(mpt);
1234 }
1235 
1236 /*
1237  * Watchdog timeout routine for SCSI requests.
1238  */
1239 static void
1240 mpt_timeout(void *arg)
1241 {
1242 	union ccb	 *ccb;
1243 	struct mpt_softc *mpt;
1244 	request_t	 *req;
1245 
1246 	ccb = (union ccb *)arg;
1247 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1248 
1249 #if __FreeBSD_version < 500000
1250 	MPT_LOCK(mpt);
1251 #endif
1252 	MPT_LOCK_ASSERT(mpt);
1253 	req = ccb->ccb_h.ccb_req_ptr;
1254 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1255 	    req->serno, ccb, req->ccb);
1256 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1257 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1258 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1259 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1260 		req->state |= REQ_STATE_TIMEDOUT;
1261 		mpt_wakeup_recovery_thread(mpt);
1262 	}
1263 #if __FreeBSD_version < 500000
1264 	MPT_UNLOCK(mpt);
1265 #endif
1266 }
1267 
1268 /*
1269  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1270  *
1271  * Takes a list of physical segments and builds the SGL for SCSI IO command
1272  * and forwards the commard to the IOC after one last check that CAM has not
1273  * aborted the transaction.
1274  */
1275 static void
1276 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1277 {
1278 	request_t *req, *trq;
1279 	char *mpt_off;
1280 	union ccb *ccb;
1281 	struct mpt_softc *mpt;
1282 	int seg, first_lim;
1283 	uint32_t flags, nxt_off;
1284 	void *sglp = NULL;
1285 	MSG_REQUEST_HEADER *hdrp;
1286 	SGE_SIMPLE64 *se;
1287 	SGE_CHAIN64 *ce;
1288 	int istgt = 0;
1289 
1290 	req = (request_t *)arg;
1291 	ccb = req->ccb;
1292 
1293 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1294 	req = ccb->ccb_h.ccb_req_ptr;
1295 
1296 	hdrp = req->req_vbuf;
1297 	mpt_off = req->req_vbuf;
1298 
1299 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1300 		error = EFBIG;
1301 	}
1302 
1303 	if (error == 0) {
1304 		switch (hdrp->Function) {
1305 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1306 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1307 			istgt = 0;
1308 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1309 			break;
1310 		case MPI_FUNCTION_TARGET_ASSIST:
1311 			istgt = 1;
1312 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1313 			break;
1314 		default:
1315 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1316 			    hdrp->Function);
1317 			error = EINVAL;
1318 			break;
1319 		}
1320 	}
1321 
1322 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1323 		error = EFBIG;
1324 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1325 		    nseg, mpt->max_seg_cnt);
1326 	}
1327 
1328 bad:
1329 	if (error != 0) {
1330 		if (error != EFBIG && error != ENOMEM) {
1331 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1332 		}
1333 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1334 			cam_status status;
1335 			mpt_freeze_ccb(ccb);
1336 			if (error == EFBIG) {
1337 				status = CAM_REQ_TOO_BIG;
1338 			} else if (error == ENOMEM) {
1339 				if (mpt->outofbeer == 0) {
1340 					mpt->outofbeer = 1;
1341 					xpt_freeze_simq(mpt->sim, 1);
1342 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1343 					    "FREEZEQ\n");
1344 				}
1345 				status = CAM_REQUEUE_REQ;
1346 			} else {
1347 				status = CAM_REQ_CMP_ERR;
1348 			}
1349 			mpt_set_ccb_status(ccb, status);
1350 		}
1351 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1352 			request_t *cmd_req =
1353 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1354 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1355 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1356 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1357 		}
1358 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1359 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1360 		xpt_done(ccb);
1361 		CAMLOCK_2_MPTLOCK(mpt);
1362 		mpt_free_request(mpt, req);
1363 		MPTLOCK_2_CAMLOCK(mpt);
1364 		return;
1365 	}
1366 
1367 	/*
1368 	 * No data to transfer?
1369 	 * Just make a single simple SGL with zero length.
1370 	 */
1371 
1372 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1373 		int tidx = ((char *)sglp) - mpt_off;
1374 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1375 	}
1376 
1377 	if (nseg == 0) {
1378 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1379 		MPI_pSGE_SET_FLAGS(se1,
1380 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1381 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1382 		se1->FlagsLength = htole32(se1->FlagsLength);
1383 		goto out;
1384 	}
1385 
1386 
1387 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1388 	if (istgt == 0) {
1389 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1390 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1391 		}
1392 	} else {
1393 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1394 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1395 		}
1396 	}
1397 
1398 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1399 		bus_dmasync_op_t op;
1400 		if (istgt == 0) {
1401 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1402 				op = BUS_DMASYNC_PREREAD;
1403 			} else {
1404 				op = BUS_DMASYNC_PREWRITE;
1405 			}
1406 		} else {
1407 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1408 				op = BUS_DMASYNC_PREWRITE;
1409 			} else {
1410 				op = BUS_DMASYNC_PREREAD;
1411 			}
1412 		}
1413 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1414 	}
1415 
1416 	/*
1417 	 * Okay, fill in what we can at the end of the command frame.
1418 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1419 	 * the command frame.
1420 	 *
1421 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1422 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1423 	 * that.
1424 	 */
1425 
1426 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1427 		first_lim = nseg;
1428 	} else {
1429 		/*
1430 		 * Leave room for CHAIN element
1431 		 */
1432 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1433 	}
1434 
1435 	se = (SGE_SIMPLE64 *) sglp;
1436 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1437 		uint32_t tf;
1438 
1439 		memset(se, 0, sizeof (*se));
1440 		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1441 		if (sizeof(bus_addr_t) > 4) {
1442 			se->Address.High =
1443 			    htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1444 		}
1445 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1446 		tf = flags;
1447 		if (seg == first_lim - 1) {
1448 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1449 		}
1450 		if (seg == nseg - 1) {
1451 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1452 				MPI_SGE_FLAGS_END_OF_BUFFER;
1453 		}
1454 		MPI_pSGE_SET_FLAGS(se, tf);
1455 		se->FlagsLength = htole32(se->FlagsLength);
1456 	}
1457 
1458 	if (seg == nseg) {
1459 		goto out;
1460 	}
1461 
1462 	/*
1463 	 * Tell the IOC where to find the first chain element.
1464 	 */
1465 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1466 	nxt_off = MPT_RQSL(mpt);
1467 	trq = req;
1468 
1469 	/*
1470 	 * Make up the rest of the data segments out of a chain element
1471 	 * (contiained in the current request frame) which points to
1472 	 * SIMPLE64 elements in the next request frame, possibly ending
1473 	 * with *another* chain element (if there's more).
1474 	 */
1475 	while (seg < nseg) {
1476 		int this_seg_lim;
1477 		uint32_t tf, cur_off;
1478 		bus_addr_t chain_list_addr;
1479 
1480 		/*
1481 		 * Point to the chain descriptor. Note that the chain
1482 		 * descriptor is at the end of the *previous* list (whether
1483 		 * chain or simple).
1484 		 */
1485 		ce = (SGE_CHAIN64 *) se;
1486 
1487 		/*
1488 		 * Before we change our current pointer, make  sure we won't
1489 		 * overflow the request area with this frame. Note that we
1490 		 * test against 'greater than' here as it's okay in this case
1491 		 * to have next offset be just outside the request area.
1492 		 */
1493 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1494 			nxt_off = MPT_REQUEST_AREA;
1495 			goto next_chain;
1496 		}
1497 
1498 		/*
1499 		 * Set our SGE element pointer to the beginning of the chain
1500 		 * list and update our next chain list offset.
1501 		 */
1502 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1503 		cur_off = nxt_off;
1504 		nxt_off += MPT_RQSL(mpt);
1505 
1506 		/*
1507 		 * Now initialized the chain descriptor.
1508 		 */
1509 		memset(ce, 0, sizeof (*ce));
1510 
1511 		/*
1512 		 * Get the physical address of the chain list.
1513 		 */
1514 		chain_list_addr = trq->req_pbuf;
1515 		chain_list_addr += cur_off;
1516 		if (sizeof (bus_addr_t) > 4) {
1517 			ce->Address.High =
1518 			    htole32(((uint64_t)chain_list_addr) >> 32);
1519 		}
1520 		ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1521 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1522 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1523 
1524 		/*
1525 		 * If we have more than a frame's worth of segments left,
1526 		 * set up the chain list to have the last element be another
1527 		 * chain descriptor.
1528 		 */
1529 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1530 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1531 			/*
1532 			 * The length of the chain is the length in bytes of the
1533 			 * number of segments plus the next chain element.
1534 			 *
1535 			 * The next chain descriptor offset is the length,
1536 			 * in words, of the number of segments.
1537 			 */
1538 			ce->Length = (this_seg_lim - seg) *
1539 			    sizeof (SGE_SIMPLE64);
1540 			ce->NextChainOffset = ce->Length >> 2;
1541 			ce->Length += sizeof (SGE_CHAIN64);
1542 		} else {
1543 			this_seg_lim = nseg;
1544 			ce->Length = (this_seg_lim - seg) *
1545 			    sizeof (SGE_SIMPLE64);
1546 		}
1547 		ce->Length = htole16(ce->Length);
1548 
1549 		/*
1550 		 * Fill in the chain list SGE elements with our segment data.
1551 		 *
1552 		 * If we're the last element in this chain list, set the last
1553 		 * element flag. If we're the completely last element period,
1554 		 * set the end of list and end of buffer flags.
1555 		 */
1556 		while (seg < this_seg_lim) {
1557 			memset(se, 0, sizeof (*se));
1558 			se->Address.Low = htole32(dm_segs->ds_addr &
1559 			    0xffffffff);
1560 			if (sizeof (bus_addr_t) > 4) {
1561 				se->Address.High =
1562 				    htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1563 			}
1564 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1565 			tf = flags;
1566 			if (seg ==  this_seg_lim - 1) {
1567 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1568 			}
1569 			if (seg == nseg - 1) {
1570 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1571 					MPI_SGE_FLAGS_END_OF_BUFFER;
1572 			}
1573 			MPI_pSGE_SET_FLAGS(se, tf);
1574 			se->FlagsLength = htole32(se->FlagsLength);
1575 			se++;
1576 			seg++;
1577 			dm_segs++;
1578 		}
1579 
1580     next_chain:
1581 		/*
1582 		 * If we have more segments to do and we've used up all of
1583 		 * the space in a request area, go allocate another one
1584 		 * and chain to that.
1585 		 */
1586 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1587 			request_t *nrq;
1588 
1589 			CAMLOCK_2_MPTLOCK(mpt);
1590 			nrq = mpt_get_request(mpt, FALSE);
1591 			MPTLOCK_2_CAMLOCK(mpt);
1592 
1593 			if (nrq == NULL) {
1594 				error = ENOMEM;
1595 				goto bad;
1596 			}
1597 
1598 			/*
1599 			 * Append the new request area on the tail of our list.
1600 			 */
1601 			if ((trq = req->chain) == NULL) {
1602 				req->chain = nrq;
1603 			} else {
1604 				while (trq->chain != NULL) {
1605 					trq = trq->chain;
1606 				}
1607 				trq->chain = nrq;
1608 			}
1609 			trq = nrq;
1610 			mpt_off = trq->req_vbuf;
1611 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1612 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1613 			}
1614 			nxt_off = 0;
1615 		}
1616 	}
1617 out:
1618 
1619 	/*
1620 	 * Last time we need to check if this CCB needs to be aborted.
1621 	 */
1622 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1623 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1624 			request_t *cmd_req =
1625 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1626 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1627 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1628 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1629 		}
1630 		mpt_prt(mpt,
1631 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1632 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1633 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1634 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1635 		}
1636 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1637 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1638 		xpt_done(ccb);
1639 		CAMLOCK_2_MPTLOCK(mpt);
1640 		mpt_free_request(mpt, req);
1641 		MPTLOCK_2_CAMLOCK(mpt);
1642 		return;
1643 	}
1644 
1645 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1646 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1647 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1648 		    mpt_timeout, ccb);
1649 	}
1650 	if (mpt->verbose > MPT_PRT_DEBUG) {
1651 		int nc = 0;
1652 		mpt_print_request(req->req_vbuf);
1653 		for (trq = req->chain; trq; trq = trq->chain) {
1654 			printf("  Additional Chain Area %d\n", nc++);
1655 			mpt_dump_sgl(trq->req_vbuf, 0);
1656 		}
1657 	}
1658 
1659 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1660 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1661 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1662 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1663 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1664 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1665 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1666 		} else {
1667 			tgt->state = TGT_STATE_MOVING_DATA;
1668 		}
1669 #else
1670 		tgt->state = TGT_STATE_MOVING_DATA;
1671 #endif
1672 	}
1673 	CAMLOCK_2_MPTLOCK(mpt);
1674 	mpt_send_cmd(mpt, req);
1675 	MPTLOCK_2_CAMLOCK(mpt);
1676 }
1677 
1678 static void
1679 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1680 {
1681 	request_t *req, *trq;
1682 	char *mpt_off;
1683 	union ccb *ccb;
1684 	struct mpt_softc *mpt;
1685 	int seg, first_lim;
1686 	uint32_t flags, nxt_off;
1687 	void *sglp = NULL;
1688 	MSG_REQUEST_HEADER *hdrp;
1689 	SGE_SIMPLE32 *se;
1690 	SGE_CHAIN32 *ce;
1691 	int istgt = 0;
1692 
1693 	req = (request_t *)arg;
1694 	ccb = req->ccb;
1695 
1696 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1697 	req = ccb->ccb_h.ccb_req_ptr;
1698 
1699 	hdrp = req->req_vbuf;
1700 	mpt_off = req->req_vbuf;
1701 
1702 
1703 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1704 		error = EFBIG;
1705 	}
1706 
1707 	if (error == 0) {
1708 		switch (hdrp->Function) {
1709 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1710 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1711 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1712 			break;
1713 		case MPI_FUNCTION_TARGET_ASSIST:
1714 			istgt = 1;
1715 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1716 			break;
1717 		default:
1718 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1719 			    hdrp->Function);
1720 			error = EINVAL;
1721 			break;
1722 		}
1723 	}
1724 
1725 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1726 		error = EFBIG;
1727 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1728 		    nseg, mpt->max_seg_cnt);
1729 	}
1730 
1731 bad:
1732 	if (error != 0) {
1733 		if (error != EFBIG && error != ENOMEM) {
1734 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1735 		}
1736 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1737 			cam_status status;
1738 			mpt_freeze_ccb(ccb);
1739 			if (error == EFBIG) {
1740 				status = CAM_REQ_TOO_BIG;
1741 			} else if (error == ENOMEM) {
1742 				if (mpt->outofbeer == 0) {
1743 					mpt->outofbeer = 1;
1744 					xpt_freeze_simq(mpt->sim, 1);
1745 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1746 					    "FREEZEQ\n");
1747 				}
1748 				status = CAM_REQUEUE_REQ;
1749 			} else {
1750 				status = CAM_REQ_CMP_ERR;
1751 			}
1752 			mpt_set_ccb_status(ccb, status);
1753 		}
1754 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1755 			request_t *cmd_req =
1756 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1757 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1758 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1759 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1760 		}
1761 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1762 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1763 		xpt_done(ccb);
1764 		CAMLOCK_2_MPTLOCK(mpt);
1765 		mpt_free_request(mpt, req);
1766 		MPTLOCK_2_CAMLOCK(mpt);
1767 		return;
1768 	}
1769 
1770 	/*
1771 	 * No data to transfer?
1772 	 * Just make a single simple SGL with zero length.
1773 	 */
1774 
1775 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1776 		int tidx = ((char *)sglp) - mpt_off;
1777 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1778 	}
1779 
1780 	if (nseg == 0) {
1781 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1782 		MPI_pSGE_SET_FLAGS(se1,
1783 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1784 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1785 		se1->FlagsLength = htole32(se1->FlagsLength);
1786 		goto out;
1787 	}
1788 
1789 
1790 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1791 	if (istgt == 0) {
1792 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1793 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1794 		}
1795 	} else {
1796 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1797 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1798 		}
1799 	}
1800 
1801 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1802 		bus_dmasync_op_t op;
1803 		if (istgt) {
1804 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1805 				op = BUS_DMASYNC_PREREAD;
1806 			} else {
1807 				op = BUS_DMASYNC_PREWRITE;
1808 			}
1809 		} else {
1810 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1811 				op = BUS_DMASYNC_PREWRITE;
1812 			} else {
1813 				op = BUS_DMASYNC_PREREAD;
1814 			}
1815 		}
1816 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1817 	}
1818 
1819 	/*
1820 	 * Okay, fill in what we can at the end of the command frame.
1821 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1822 	 * the command frame.
1823 	 *
1824 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1825 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1826 	 * that.
1827 	 */
1828 
1829 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1830 		first_lim = nseg;
1831 	} else {
1832 		/*
1833 		 * Leave room for CHAIN element
1834 		 */
1835 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1836 	}
1837 
1838 	se = (SGE_SIMPLE32 *) sglp;
1839 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1840 		uint32_t tf;
1841 
1842 		memset(se, 0,sizeof (*se));
1843 		se->Address = htole32(dm_segs->ds_addr);
1844 
1845 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1846 		tf = flags;
1847 		if (seg == first_lim - 1) {
1848 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1849 		}
1850 		if (seg == nseg - 1) {
1851 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1852 				MPI_SGE_FLAGS_END_OF_BUFFER;
1853 		}
1854 		MPI_pSGE_SET_FLAGS(se, tf);
1855 		se->FlagsLength = htole32(se->FlagsLength);
1856 	}
1857 
1858 	if (seg == nseg) {
1859 		goto out;
1860 	}
1861 
1862 	/*
1863 	 * Tell the IOC where to find the first chain element.
1864 	 */
1865 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1866 	nxt_off = MPT_RQSL(mpt);
1867 	trq = req;
1868 
1869 	/*
1870 	 * Make up the rest of the data segments out of a chain element
1871 	 * (contiained in the current request frame) which points to
1872 	 * SIMPLE32 elements in the next request frame, possibly ending
1873 	 * with *another* chain element (if there's more).
1874 	 */
1875 	while (seg < nseg) {
1876 		int this_seg_lim;
1877 		uint32_t tf, cur_off;
1878 		bus_addr_t chain_list_addr;
1879 
1880 		/*
1881 		 * Point to the chain descriptor. Note that the chain
1882 		 * descriptor is at the end of the *previous* list (whether
1883 		 * chain or simple).
1884 		 */
1885 		ce = (SGE_CHAIN32 *) se;
1886 
1887 		/*
1888 		 * Before we change our current pointer, make  sure we won't
1889 		 * overflow the request area with this frame. Note that we
1890 		 * test against 'greater than' here as it's okay in this case
1891 		 * to have next offset be just outside the request area.
1892 		 */
1893 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1894 			nxt_off = MPT_REQUEST_AREA;
1895 			goto next_chain;
1896 		}
1897 
1898 		/*
1899 		 * Set our SGE element pointer to the beginning of the chain
1900 		 * list and update our next chain list offset.
1901 		 */
1902 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1903 		cur_off = nxt_off;
1904 		nxt_off += MPT_RQSL(mpt);
1905 
1906 		/*
1907 		 * Now initialized the chain descriptor.
1908 		 */
1909 		memset(ce, 0, sizeof (*ce));
1910 
1911 		/*
1912 		 * Get the physical address of the chain list.
1913 		 */
1914 		chain_list_addr = trq->req_pbuf;
1915 		chain_list_addr += cur_off;
1916 
1917 
1918 
1919 		ce->Address = htole32(chain_list_addr);
1920 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1921 
1922 
1923 		/*
1924 		 * If we have more than a frame's worth of segments left,
1925 		 * set up the chain list to have the last element be another
1926 		 * chain descriptor.
1927 		 */
1928 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1929 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1930 			/*
1931 			 * The length of the chain is the length in bytes of the
1932 			 * number of segments plus the next chain element.
1933 			 *
1934 			 * The next chain descriptor offset is the length,
1935 			 * in words, of the number of segments.
1936 			 */
1937 			ce->Length = (this_seg_lim - seg) *
1938 			    sizeof (SGE_SIMPLE32);
1939 			ce->NextChainOffset = ce->Length >> 2;
1940 			ce->Length += sizeof (SGE_CHAIN32);
1941 		} else {
1942 			this_seg_lim = nseg;
1943 			ce->Length = (this_seg_lim - seg) *
1944 			    sizeof (SGE_SIMPLE32);
1945 		}
1946 		ce->Length = htole16(ce->Length);
1947 
1948 		/*
1949 		 * Fill in the chain list SGE elements with our segment data.
1950 		 *
1951 		 * If we're the last element in this chain list, set the last
1952 		 * element flag. If we're the completely last element period,
1953 		 * set the end of list and end of buffer flags.
1954 		 */
1955 		while (seg < this_seg_lim) {
1956 			memset(se, 0, sizeof (*se));
1957 			se->Address = htole32(dm_segs->ds_addr);
1958 
1959 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1960 			tf = flags;
1961 			if (seg ==  this_seg_lim - 1) {
1962 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1963 			}
1964 			if (seg == nseg - 1) {
1965 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1966 					MPI_SGE_FLAGS_END_OF_BUFFER;
1967 			}
1968 			MPI_pSGE_SET_FLAGS(se, tf);
1969 			se->FlagsLength = htole32(se->FlagsLength);
1970 			se++;
1971 			seg++;
1972 			dm_segs++;
1973 		}
1974 
1975     next_chain:
1976 		/*
1977 		 * If we have more segments to do and we've used up all of
1978 		 * the space in a request area, go allocate another one
1979 		 * and chain to that.
1980 		 */
1981 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1982 			request_t *nrq;
1983 
1984 			CAMLOCK_2_MPTLOCK(mpt);
1985 			nrq = mpt_get_request(mpt, FALSE);
1986 			MPTLOCK_2_CAMLOCK(mpt);
1987 
1988 			if (nrq == NULL) {
1989 				error = ENOMEM;
1990 				goto bad;
1991 			}
1992 
1993 			/*
1994 			 * Append the new request area on the tail of our list.
1995 			 */
1996 			if ((trq = req->chain) == NULL) {
1997 				req->chain = nrq;
1998 			} else {
1999 				while (trq->chain != NULL) {
2000 					trq = trq->chain;
2001 				}
2002 				trq->chain = nrq;
2003 			}
2004 			trq = nrq;
2005 			mpt_off = trq->req_vbuf;
2006 			if (mpt->verbose >= MPT_PRT_DEBUG) {
2007 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2008 			}
2009 			nxt_off = 0;
2010 		}
2011 	}
2012 out:
2013 
2014 	/*
2015 	 * Last time we need to check if this CCB needs to be aborted.
2016 	 */
2017 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2018 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2019 			request_t *cmd_req =
2020 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2021 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2022 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2023 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2024 		}
2025 		mpt_prt(mpt,
2026 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2027 		    ccb->ccb_h.status & CAM_STATUS_MASK);
2028 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2029 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2030 		}
2031 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2032 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2033 		xpt_done(ccb);
2034 		CAMLOCK_2_MPTLOCK(mpt);
2035 		mpt_free_request(mpt, req);
2036 		MPTLOCK_2_CAMLOCK(mpt);
2037 		return;
2038 	}
2039 
2040 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2041 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2042 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2043 		    mpt_timeout, ccb);
2044 	}
2045 	if (mpt->verbose > MPT_PRT_DEBUG) {
2046 		int nc = 0;
2047 		mpt_print_request(req->req_vbuf);
2048 		for (trq = req->chain; trq; trq = trq->chain) {
2049 			printf("  Additional Chain Area %d\n", nc++);
2050 			mpt_dump_sgl(trq->req_vbuf, 0);
2051 		}
2052 	}
2053 
2054 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2055 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2056 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2057 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
2058 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2059 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2060 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2061 		} else {
2062 			tgt->state = TGT_STATE_MOVING_DATA;
2063 		}
2064 #else
2065 		tgt->state = TGT_STATE_MOVING_DATA;
2066 #endif
2067 	}
2068 	CAMLOCK_2_MPTLOCK(mpt);
2069 	mpt_send_cmd(mpt, req);
2070 	MPTLOCK_2_CAMLOCK(mpt);
2071 }
2072 
2073 static void
2074 mpt_start(struct cam_sim *sim, union ccb *ccb)
2075 {
2076 	request_t *req;
2077 	struct mpt_softc *mpt;
2078 	MSG_SCSI_IO_REQUEST *mpt_req;
2079 	struct ccb_scsiio *csio = &ccb->csio;
2080 	struct ccb_hdr *ccbh = &ccb->ccb_h;
2081 	bus_dmamap_callback_t *cb;
2082 	target_id_t tgt;
2083 	int raid_passthru;
2084 
2085 	/* Get the pointer for the physical addapter */
2086 	mpt = ccb->ccb_h.ccb_mpt_ptr;
2087 	raid_passthru = (sim == mpt->phydisk_sim);
2088 
2089 	CAMLOCK_2_MPTLOCK(mpt);
2090 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2091 		if (mpt->outofbeer == 0) {
2092 			mpt->outofbeer = 1;
2093 			xpt_freeze_simq(mpt->sim, 1);
2094 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2095 		}
2096 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2097 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2098 		MPTLOCK_2_CAMLOCK(mpt);
2099 		xpt_done(ccb);
2100 		return;
2101 	}
2102 #ifdef	INVARIANTS
2103 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2104 #endif
2105 	MPTLOCK_2_CAMLOCK(mpt);
2106 
2107 	if (sizeof (bus_addr_t) > 4) {
2108 		cb = mpt_execute_req_a64;
2109 	} else {
2110 		cb = mpt_execute_req;
2111 	}
2112 
2113 	/*
2114 	 * Link the ccb and the request structure so we can find
2115 	 * the other knowing either the request or the ccb
2116 	 */
2117 	req->ccb = ccb;
2118 	ccb->ccb_h.ccb_req_ptr = req;
2119 
2120 	/* Now we build the command for the IOC */
2121 	mpt_req = req->req_vbuf;
2122 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2123 
2124 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2125 	if (raid_passthru) {
2126 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2127 		CAMLOCK_2_MPTLOCK(mpt);
2128 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2129 			MPTLOCK_2_CAMLOCK(mpt);
2130 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2131 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2132 			xpt_done(ccb);
2133 			return;
2134 		}
2135 		MPTLOCK_2_CAMLOCK(mpt);
2136 		mpt_req->Bus = 0;	/* we never set bus here */
2137 	} else {
2138 		tgt = ccb->ccb_h.target_id;
2139 		mpt_req->Bus = 0;	/* XXX */
2140 
2141 	}
2142 	mpt_req->SenseBufferLength =
2143 		(csio->sense_len < MPT_SENSE_SIZE) ?
2144 		 csio->sense_len : MPT_SENSE_SIZE;
2145 
2146 	/*
2147 	 * We use the message context to find the request structure when we
2148 	 * Get the command completion interrupt from the IOC.
2149 	 */
2150 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2151 
2152 	/* Which physical device to do the I/O on */
2153 	mpt_req->TargetID = tgt;
2154 
2155 	/* We assume a single level LUN type */
2156 	if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2157 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2158 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2159 	} else {
2160 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2161 	}
2162 
2163 	/* Set the direction of the transfer */
2164 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2165 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2166 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2167 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2168 	} else {
2169 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2170 	}
2171 
2172 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2173 		switch(ccb->csio.tag_action) {
2174 		case MSG_HEAD_OF_Q_TAG:
2175 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2176 			break;
2177 		case MSG_ACA_TASK:
2178 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2179 			break;
2180 		case MSG_ORDERED_Q_TAG:
2181 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2182 			break;
2183 		case MSG_SIMPLE_Q_TAG:
2184 		default:
2185 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2186 			break;
2187 		}
2188 	} else {
2189 		if (mpt->is_fc || mpt->is_sas) {
2190 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2191 		} else {
2192 			/* XXX No such thing for a target doing packetized. */
2193 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2194 		}
2195 	}
2196 
2197 	if (mpt->is_spi) {
2198 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2199 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2200 		}
2201 	}
2202 	mpt_req->Control = htole32(mpt_req->Control);
2203 
2204 	/* Copy the scsi command block into place */
2205 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2206 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2207 	} else {
2208 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2209 	}
2210 
2211 	mpt_req->CDBLength = csio->cdb_len;
2212 	mpt_req->DataLength = htole32(csio->dxfer_len);
2213 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2214 
2215 	/*
2216 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2217 	 */
2218 	if (mpt->verbose == MPT_PRT_DEBUG) {
2219 		U32 df;
2220 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2221 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2222 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2223 		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2224 		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2225 			mpt_prtc(mpt, "(%s %u byte%s ",
2226 			    (df == MPI_SCSIIO_CONTROL_READ)?
2227 			    "read" : "write",  csio->dxfer_len,
2228 			    (csio->dxfer_len == 1)? ")" : "s)");
2229 		}
2230 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2231 		    ccb->ccb_h.target_lun, req, req->serno);
2232 	}
2233 
2234 	/*
2235 	 * If we have any data to send with this command map it into bus space.
2236 	 */
2237 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2238 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2239 			/*
2240 			 * We've been given a pointer to a single buffer.
2241 			 */
2242 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2243 				/*
2244 				 * Virtual address that needs to translated into
2245 				 * one or more physical address ranges.
2246 				 */
2247 				int error;
2248 				int s = splsoftvm();
2249 				error = bus_dmamap_load(mpt->buffer_dmat,
2250 				    req->dmap, csio->data_ptr, csio->dxfer_len,
2251 				    cb, req, 0);
2252 				splx(s);
2253 				if (error == EINPROGRESS) {
2254 					/*
2255 					 * So as to maintain ordering,
2256 					 * freeze the controller queue
2257 					 * until our mapping is
2258 					 * returned.
2259 					 */
2260 					xpt_freeze_simq(mpt->sim, 1);
2261 					ccbh->status |= CAM_RELEASE_SIMQ;
2262 				}
2263 			} else {
2264 				/*
2265 				 * We have been given a pointer to single
2266 				 * physical buffer.
2267 				 */
2268 				struct bus_dma_segment seg;
2269 				seg.ds_addr =
2270 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
2271 				seg.ds_len = csio->dxfer_len;
2272 				(*cb)(req, &seg, 1, 0);
2273 			}
2274 		} else {
2275 			/*
2276 			 * We have been given a list of addresses.
2277 			 * This case could be easily supported but they are not
2278 			 * currently generated by the CAM subsystem so there
2279 			 * is no point in wasting the time right now.
2280 			 */
2281 			struct bus_dma_segment *segs;
2282 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2283 				(*cb)(req, NULL, 0, EFAULT);
2284 			} else {
2285 				/* Just use the segments provided */
2286 				segs = (struct bus_dma_segment *)csio->data_ptr;
2287 				(*cb)(req, segs, csio->sglist_cnt, 0);
2288 			}
2289 		}
2290 	} else {
2291 		(*cb)(req, NULL, 0, 0);
2292 	}
2293 }
2294 
2295 static int
2296 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2297     int sleep_ok)
2298 {
2299 	int   error;
2300 	uint16_t status;
2301 	uint8_t response;
2302 
2303 	error = mpt_scsi_send_tmf(mpt,
2304 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2305 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2306 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2307 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2308 	    0,	/* XXX How do I get the channel ID? */
2309 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2310 	    lun != CAM_LUN_WILDCARD ? lun : 0,
2311 	    0, sleep_ok);
2312 
2313 	if (error != 0) {
2314 		/*
2315 		 * mpt_scsi_send_tmf hard resets on failure, so no
2316 		 * need to do so here.
2317 		 */
2318 		mpt_prt(mpt,
2319 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2320 		return (EIO);
2321 	}
2322 
2323 	/* Wait for bus reset to be processed by the IOC. */
2324 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2325 	    REQ_STATE_DONE, sleep_ok, 5000);
2326 
2327 	status = le16toh(mpt->tmf_req->IOCStatus);
2328 	response = mpt->tmf_req->ResponseCode;
2329 	mpt->tmf_req->state = REQ_STATE_FREE;
2330 
2331 	if (error) {
2332 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2333 		    "Resetting controller.\n");
2334 		mpt_reset(mpt, TRUE);
2335 		return (ETIMEDOUT);
2336 	}
2337 
2338 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2339 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2340 		    "Resetting controller.\n", status);
2341 		mpt_reset(mpt, TRUE);
2342 		return (EIO);
2343 	}
2344 
2345 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2346 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2347 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2348 		    "Resetting controller.\n", response);
2349 		mpt_reset(mpt, TRUE);
2350 		return (EIO);
2351 	}
2352 	return (0);
2353 }
2354 
2355 static int
2356 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2357 {
2358 	int r = 0;
2359 	request_t *req;
2360 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2361 
2362  	req = mpt_get_request(mpt, FALSE);
2363 	if (req == NULL) {
2364 		return (ENOMEM);
2365 	}
2366 	fc = req->req_vbuf;
2367 	memset(fc, 0, sizeof(*fc));
2368 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2369 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2370 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2371 	mpt_send_cmd(mpt, req);
2372 	if (dowait) {
2373 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2374 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2375 		if (r == 0) {
2376 			mpt_free_request(mpt, req);
2377 		}
2378 	}
2379 	return (r);
2380 }
2381 
2382 static int
2383 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2384 	      MSG_EVENT_NOTIFY_REPLY *msg)
2385 {
2386 	uint32_t data0, data1;
2387 
2388 	data0 = le32toh(msg->Data[0]);
2389 	data1 = le32toh(msg->Data[1]);
2390 	switch(msg->Event & 0xFF) {
2391 	case MPI_EVENT_UNIT_ATTENTION:
2392 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2393 		    (data0 >> 8) & 0xff, data0 & 0xff);
2394 		break;
2395 
2396 	case MPI_EVENT_IOC_BUS_RESET:
2397 		/* We generated a bus reset */
2398 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2399 		    (data0 >> 8) & 0xff);
2400 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2401 		break;
2402 
2403 	case MPI_EVENT_EXT_BUS_RESET:
2404 		/* Someone else generated a bus reset */
2405 		mpt_prt(mpt, "External Bus Reset Detected\n");
2406 		/*
2407 		 * These replies don't return EventData like the MPI
2408 		 * spec says they do
2409 		 */
2410 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2411 		break;
2412 
2413 	case MPI_EVENT_RESCAN:
2414 #if __FreeBSD_version >= 600000
2415 	{
2416 		union ccb *ccb;
2417 		uint32_t pathid;
2418 		/*
2419 		 * In general this means a device has been added to the loop.
2420 		 */
2421 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2422 		if (mpt->ready == 0) {
2423 			break;
2424 		}
2425 		if (mpt->phydisk_sim) {
2426 			pathid = cam_sim_path(mpt->phydisk_sim);
2427 		} else {
2428 			pathid = cam_sim_path(mpt->sim);
2429 		}
2430 		MPTLOCK_2_CAMLOCK(mpt);
2431 		/*
2432 		 * Allocate a CCB, create a wildcard path for this bus,
2433 		 * and schedule a rescan.
2434 		 */
2435 		ccb = xpt_alloc_ccb_nowait();
2436 		if (ccb == NULL) {
2437 			mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2438 			CAMLOCK_2_MPTLOCK(mpt);
2439 			break;
2440 		}
2441 
2442 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2443 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2444 			CAMLOCK_2_MPTLOCK(mpt);
2445 			mpt_prt(mpt, "unable to create path for rescan\n");
2446 			xpt_free_ccb(ccb);
2447 			break;
2448 		}
2449 		xpt_rescan(ccb);
2450 		CAMLOCK_2_MPTLOCK(mpt);
2451 		break;
2452 	}
2453 #else
2454 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2455 		break;
2456 #endif
2457 	case MPI_EVENT_LINK_STATUS_CHANGE:
2458 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2459 		    (data1 >> 8) & 0xff,
2460 		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2461 		break;
2462 
2463 	case MPI_EVENT_LOOP_STATE_CHANGE:
2464 		switch ((data0 >> 16) & 0xff) {
2465 		case 0x01:
2466 			mpt_prt(mpt,
2467 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2468 			    "(Loop Initialization)\n",
2469 			    (data1 >> 8) & 0xff,
2470 			    (data0 >> 8) & 0xff,
2471 			    (data0     ) & 0xff);
2472 			switch ((data0 >> 8) & 0xff) {
2473 			case 0xF7:
2474 				if ((data0 & 0xff) == 0xF7) {
2475 					mpt_prt(mpt, "Device needs AL_PA\n");
2476 				} else {
2477 					mpt_prt(mpt, "Device %02x doesn't like "
2478 					    "FC performance\n",
2479 					    data0 & 0xFF);
2480 				}
2481 				break;
2482 			case 0xF8:
2483 				if ((data0 & 0xff) == 0xF7) {
2484 					mpt_prt(mpt, "Device had loop failure "
2485 					    "at its receiver prior to acquiring"
2486 					    " AL_PA\n");
2487 				} else {
2488 					mpt_prt(mpt, "Device %02x detected loop"
2489 					    " failure at its receiver\n",
2490 					    data0 & 0xFF);
2491 				}
2492 				break;
2493 			default:
2494 				mpt_prt(mpt, "Device %02x requests that device "
2495 				    "%02x reset itself\n",
2496 				    data0 & 0xFF,
2497 				    (data0 >> 8) & 0xFF);
2498 				break;
2499 			}
2500 			break;
2501 		case 0x02:
2502 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2503 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2504 			    (data1 >> 8) & 0xff, /* Port */
2505 			    (data0 >>  8) & 0xff, /* Character 3 */
2506 			    (data0      ) & 0xff  /* Character 4 */);
2507 			break;
2508 		case 0x03:
2509 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2510 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2511 			    (data1 >> 8) & 0xff, /* Port */
2512 			    (data0 >> 8) & 0xff, /* Character 3 */
2513 			    (data0     ) & 0xff  /* Character 4 */);
2514 			break;
2515 		default:
2516 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2517 			    "FC event (%02x %02x %02x)\n",
2518 			    (data1 >> 8) & 0xff, /* Port */
2519 			    (data0 >> 16) & 0xff, /* Event */
2520 			    (data0 >>  8) & 0xff, /* Character 3 */
2521 			    (data0      ) & 0xff  /* Character 4 */);
2522 		}
2523 		break;
2524 
2525 	case MPI_EVENT_LOGOUT:
2526 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2527 		    (data1 >> 8) & 0xff, data0);
2528 		break;
2529 	case MPI_EVENT_QUEUE_FULL:
2530 	{
2531 		struct cam_sim *sim;
2532 		struct cam_path *tmppath;
2533 		struct ccb_relsim crs;
2534 		PTR_EVENT_DATA_QUEUE_FULL pqf;
2535 		lun_id_t lun_id;
2536 
2537 		pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2538 		pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2539 		mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2540 		    "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2541 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2542 		    pqf->TargetID) != 0) {
2543 			sim = mpt->phydisk_sim;
2544 		} else {
2545 			sim = mpt->sim;
2546 		}
2547 		MPTLOCK_2_CAMLOCK(mpt);
2548 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2549 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2550 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2551 				mpt_prt(mpt, "unable to create a path to send "
2552 				    "XPT_REL_SIMQ");
2553 				CAMLOCK_2_MPTLOCK(mpt);
2554 				break;
2555 			}
2556 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2557 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2558 			crs.ccb_h.flags = CAM_DEV_QFREEZE;
2559 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2560 			crs.openings = pqf->CurrentDepth - 1;
2561 			xpt_action((union ccb *)&crs);
2562 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2563 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2564 			}
2565 			xpt_free_path(tmppath);
2566 		}
2567 		CAMLOCK_2_MPTLOCK(mpt);
2568 		break;
2569 	}
2570 	case MPI_EVENT_IR_RESYNC_UPDATE:
2571 		mpt_prt(mpt, "IR resync update %d completed\n",
2572 		    (data0 >> 16) & 0xff);
2573 		break;
2574 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2575 	{
2576 		union ccb *ccb;
2577 		struct cam_sim *sim;
2578 		struct cam_path *tmppath;
2579 		PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2580 
2581 		psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2582 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2583 		    psdsc->TargetID) != 0)
2584 			sim = mpt->phydisk_sim;
2585 		else
2586 			sim = mpt->sim;
2587 		switch(psdsc->ReasonCode) {
2588 		case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2589 			MPTLOCK_2_CAMLOCK(mpt);
2590 			ccb = xpt_alloc_ccb_nowait();
2591 			if (ccb == NULL) {
2592 				mpt_prt(mpt,
2593 				    "unable to alloc CCB for rescan\n");
2594 				CAMLOCK_2_MPTLOCK(mpt);
2595 				break;
2596 			}
2597 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2598 			    cam_sim_path(sim), psdsc->TargetID,
2599 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2600 				CAMLOCK_2_MPTLOCK(mpt);
2601 				mpt_prt(mpt,
2602 				    "unable to create path for rescan\n");
2603 				xpt_free_ccb(ccb);
2604 				break;
2605 			}
2606 			xpt_rescan(ccb);
2607 			CAMLOCK_2_MPTLOCK(mpt);
2608 			break;
2609 		case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2610 			MPTLOCK_2_CAMLOCK(mpt);
2611 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2612 			    psdsc->TargetID, CAM_LUN_WILDCARD) !=
2613 			    CAM_REQ_CMP) {
2614 				mpt_prt(mpt,
2615 				    "unable to create path for async event");
2616 				CAMLOCK_2_MPTLOCK(mpt);
2617 				break;
2618 			}
2619 			xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2620 			xpt_free_path(tmppath);
2621 			CAMLOCK_2_MPTLOCK(mpt);
2622 			break;
2623 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2624 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2625 		case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2626 			break;
2627 		default:
2628 			mpt_lprt(mpt, MPT_PRT_WARN,
2629 			    "SAS device status change: Bus: 0x%02x TargetID: "
2630 			    "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2631 			    psdsc->TargetID, psdsc->ReasonCode);
2632 			break;
2633 		}
2634 		break;
2635 	}
2636 	case MPI_EVENT_SAS_DISCOVERY_ERROR:
2637 	{
2638 		PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2639 
2640 		pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2641 		pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2642 		mpt_lprt(mpt, MPT_PRT_WARN,
2643 		    "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2644 		    pde->Port, pde->DiscoveryStatus);
2645 		break;
2646 	}
2647 	case MPI_EVENT_EVENT_CHANGE:
2648 	case MPI_EVENT_INTEGRATED_RAID:
2649 	case MPI_EVENT_IR2:
2650 	case MPI_EVENT_LOG_ENTRY_ADDED:
2651 	case MPI_EVENT_SAS_DISCOVERY:
2652 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2653 	case MPI_EVENT_SAS_SES:
2654 		break;
2655 	default:
2656 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2657 		    msg->Event & 0xFF);
2658 		return (0);
2659 	}
2660 	return (1);
2661 }
2662 
2663 /*
2664  * Reply path for all SCSI I/O requests, called from our
2665  * interrupt handler by extracting our handler index from
2666  * the MsgContext field of the reply from the IOC.
2667  *
2668  * This routine is optimized for the common case of a
2669  * completion without error.  All exception handling is
2670  * offloaded to non-inlined helper routines to minimize
2671  * cache footprint.
2672  */
2673 static int
2674 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2675     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2676 {
2677 	MSG_SCSI_IO_REQUEST *scsi_req;
2678 	union ccb *ccb;
2679 
2680 	if (req->state == REQ_STATE_FREE) {
2681 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2682 		return (TRUE);
2683 	}
2684 
2685 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2686 	ccb = req->ccb;
2687 	if (ccb == NULL) {
2688 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2689 		    req, req->serno);
2690 		return (TRUE);
2691 	}
2692 
2693 	mpt_req_untimeout(req, mpt_timeout, ccb);
2694 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2695 
2696 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2697 		bus_dmasync_op_t op;
2698 
2699 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2700 			op = BUS_DMASYNC_POSTREAD;
2701 		else
2702 			op = BUS_DMASYNC_POSTWRITE;
2703 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2704 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2705 	}
2706 
2707 	if (reply_frame == NULL) {
2708 		/*
2709 		 * Context only reply, completion without error status.
2710 		 */
2711 		ccb->csio.resid = 0;
2712 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2713 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2714 	} else {
2715 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2716 	}
2717 
2718 	if (mpt->outofbeer) {
2719 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2720 		mpt->outofbeer = 0;
2721 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2722 	}
2723 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2724 		struct scsi_inquiry_data *iq =
2725 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2726 		if (scsi_req->Function ==
2727 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2728 			/*
2729 			 * Fake out the device type so that only the
2730 			 * pass-thru device will attach.
2731 			 */
2732 			iq->device &= ~0x1F;
2733 			iq->device |= T_NODEVICE;
2734 		}
2735 	}
2736 	if (mpt->verbose == MPT_PRT_DEBUG) {
2737 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2738 		    req, req->serno);
2739 	}
2740 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2741 	MPTLOCK_2_CAMLOCK(mpt);
2742 	xpt_done(ccb);
2743 	CAMLOCK_2_MPTLOCK(mpt);
2744 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2745 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2746 	} else {
2747 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2748 		    req, req->serno);
2749 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2750 	}
2751 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2752 	    ("CCB req needed wakeup"));
2753 #ifdef	INVARIANTS
2754 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2755 #endif
2756 	mpt_free_request(mpt, req);
2757 	return (TRUE);
2758 }
2759 
2760 static int
2761 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2762     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2763 {
2764 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2765 
2766 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2767 #ifdef	INVARIANTS
2768 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2769 #endif
2770 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2771 	/* Record IOC Status and Response Code of TMF for any waiters. */
2772 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2773 	req->ResponseCode = tmf_reply->ResponseCode;
2774 
2775 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2776 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2777 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2778 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2779 		req->state |= REQ_STATE_DONE;
2780 		wakeup(req);
2781 	} else {
2782 		mpt->tmf_req->state = REQ_STATE_FREE;
2783 	}
2784 	return (TRUE);
2785 }
2786 
2787 /*
2788  * XXX: Move to definitions file
2789  */
2790 #define	ELS	0x22
2791 #define	FC4LS	0x32
2792 #define	ABTS	0x81
2793 #define	BA_ACC	0x84
2794 
2795 #define	LS_RJT	0x01
2796 #define	LS_ACC	0x02
2797 #define	PLOGI	0x03
2798 #define	LOGO	0x05
2799 #define SRR	0x14
2800 #define PRLI	0x20
2801 #define PRLO	0x21
2802 #define ADISC	0x52
2803 #define RSCN	0x61
2804 
2805 static void
2806 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2807     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2808 {
2809 	uint32_t fl;
2810 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2811 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2812 
2813 	/*
2814 	 * We are going to reuse the ELS request to send this response back.
2815 	 */
2816 	rsp = &tmp;
2817 	memset(rsp, 0, sizeof(*rsp));
2818 
2819 #ifdef	USE_IMMEDIATE_LINK_DATA
2820 	/*
2821 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2822 	 */
2823 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2824 #endif
2825 	rsp->RspLength = length;
2826 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2827 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2828 
2829 	/*
2830 	 * Copy over information from the original reply frame to
2831 	 * it's correct place in the response.
2832 	 */
2833 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2834 
2835 	/*
2836 	 * And now copy back the temporary area to the original frame.
2837 	 */
2838 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2839 	rsp = req->req_vbuf;
2840 
2841 #ifdef	USE_IMMEDIATE_LINK_DATA
2842 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2843 #else
2844 {
2845 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2846 	bus_addr_t paddr = req->req_pbuf;
2847 	paddr += MPT_RQSL(mpt);
2848 
2849 	fl =
2850 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2851 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2852 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2853 		MPI_SGE_FLAGS_END_OF_LIST	|
2854 		MPI_SGE_FLAGS_END_OF_BUFFER;
2855 	fl <<= MPI_SGE_FLAGS_SHIFT;
2856 	fl |= (length);
2857 	se->FlagsLength = htole32(fl);
2858 	se->Address = htole32((uint32_t) paddr);
2859 }
2860 #endif
2861 
2862 	/*
2863 	 * Send it on...
2864 	 */
2865 	mpt_send_cmd(mpt, req);
2866 }
2867 
2868 static int
2869 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2870     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2871 {
2872 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2873 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2874 	U8 rctl;
2875 	U8 type;
2876 	U8 cmd;
2877 	U16 status = le16toh(reply_frame->IOCStatus);
2878 	U32 *elsbuf;
2879 	int ioindex;
2880 	int do_refresh = TRUE;
2881 
2882 #ifdef	INVARIANTS
2883 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2884 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2885 	    req, req->serno, rp->Function));
2886 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2887 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2888 	} else {
2889 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2890 	}
2891 #endif
2892 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2893 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2894 	    req, req->serno, reply_frame, reply_frame->Function);
2895 
2896 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2897 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2898 		    status, reply_frame->Function);
2899 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2900 			/*
2901 			 * XXX: to get around shutdown issue
2902 			 */
2903 			mpt->disabled = 1;
2904 			return (TRUE);
2905 		}
2906 		return (TRUE);
2907 	}
2908 
2909 	/*
2910 	 * If the function of a link service response, we recycle the
2911 	 * response to be a refresh for a new link service request.
2912 	 *
2913 	 * The request pointer is bogus in this case and we have to fetch
2914 	 * it based upon the TransactionContext.
2915 	 */
2916 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2917 		/* Freddie Uncle Charlie Katie */
2918 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2919 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2920 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2921 				break;
2922 			}
2923 
2924 		KASSERT(ioindex < mpt->els_cmds_allocated,
2925 		    ("can't find my mommie!"));
2926 
2927 		/* remove from active list as we're going to re-post it */
2928 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2929 		req->state &= ~REQ_STATE_QUEUED;
2930 		req->state |= REQ_STATE_DONE;
2931 		mpt_fc_post_els(mpt, req, ioindex);
2932 		return (TRUE);
2933 	}
2934 
2935 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2936 		/* remove from active list as we're done */
2937 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2938 		req->state &= ~REQ_STATE_QUEUED;
2939 		req->state |= REQ_STATE_DONE;
2940 		if (req->state & REQ_STATE_TIMEDOUT) {
2941 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2942 			    "Sync Primitive Send Completed After Timeout\n");
2943 			mpt_free_request(mpt, req);
2944 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2945 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2946 			    "Async Primitive Send Complete\n");
2947 			mpt_free_request(mpt, req);
2948 		} else {
2949 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2950 			    "Sync Primitive Send Complete- Waking Waiter\n");
2951 			wakeup(req);
2952 		}
2953 		return (TRUE);
2954 	}
2955 
2956 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2957 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2958 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2959 		    rp->MsgLength, rp->MsgFlags);
2960 		return (TRUE);
2961 	}
2962 
2963 	if (rp->MsgLength <= 5) {
2964 		/*
2965 		 * This is just a ack of an original ELS buffer post
2966 		 */
2967 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2968 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2969 		return (TRUE);
2970 	}
2971 
2972 
2973 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2974 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2975 
2976 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2977 	cmd = be32toh(elsbuf[0]) >> 24;
2978 
2979 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2980 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2981 		return (TRUE);
2982 	}
2983 
2984 	ioindex = le32toh(rp->TransactionContext);
2985 	req = mpt->els_cmd_ptrs[ioindex];
2986 
2987 	if (rctl == ELS && type == 1) {
2988 		switch (cmd) {
2989 		case PRLI:
2990 			/*
2991 			 * Send back a PRLI ACC
2992 			 */
2993 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2994 			    le32toh(rp->Wwn.PortNameHigh),
2995 			    le32toh(rp->Wwn.PortNameLow));
2996 			elsbuf[0] = htobe32(0x02100014);
2997 			elsbuf[1] |= htobe32(0x00000100);
2998 			elsbuf[4] = htobe32(0x00000002);
2999 			if (mpt->role & MPT_ROLE_TARGET)
3000 				elsbuf[4] |= htobe32(0x00000010);
3001 			if (mpt->role & MPT_ROLE_INITIATOR)
3002 				elsbuf[4] |= htobe32(0x00000020);
3003 			/* remove from active list as we're done */
3004 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3005 			req->state &= ~REQ_STATE_QUEUED;
3006 			req->state |= REQ_STATE_DONE;
3007 			mpt_fc_els_send_response(mpt, req, rp, 20);
3008 			do_refresh = FALSE;
3009 			break;
3010 		case PRLO:
3011 			memset(elsbuf, 0, 5 * (sizeof (U32)));
3012 			elsbuf[0] = htobe32(0x02100014);
3013 			elsbuf[1] = htobe32(0x08000100);
3014 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
3015 			    le32toh(rp->Wwn.PortNameHigh),
3016 			    le32toh(rp->Wwn.PortNameLow));
3017 			/* remove from active list as we're done */
3018 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3019 			req->state &= ~REQ_STATE_QUEUED;
3020 			req->state |= REQ_STATE_DONE;
3021 			mpt_fc_els_send_response(mpt, req, rp, 20);
3022 			do_refresh = FALSE;
3023 			break;
3024 		default:
3025 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
3026 			break;
3027 		}
3028 	} else if (rctl == ABTS && type == 0) {
3029 		uint16_t rx_id = le16toh(rp->Rxid);
3030 		uint16_t ox_id = le16toh(rp->Oxid);
3031 		request_t *tgt_req = NULL;
3032 
3033 		mpt_prt(mpt,
3034 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
3035 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
3036 		    le32toh(rp->Wwn.PortNameLow));
3037 		if (rx_id >= mpt->mpt_max_tgtcmds) {
3038 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
3039 		} else if (mpt->tgt_cmd_ptrs == NULL) {
3040 			mpt_prt(mpt, "No TGT CMD PTRS\n");
3041 		} else {
3042 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
3043 		}
3044 		if (tgt_req) {
3045 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
3046 			union ccb *ccb;
3047 			uint32_t ct_id;
3048 
3049 			/*
3050 			 * Check to make sure we have the correct command
3051 			 * The reply descriptor in the target state should
3052 			 * should contain an IoIndex that should match the
3053 			 * RX_ID.
3054 			 *
3055 			 * It'd be nice to have OX_ID to crosscheck with
3056 			 * as well.
3057 			 */
3058 			ct_id = GET_IO_INDEX(tgt->reply_desc);
3059 
3060 			if (ct_id != rx_id) {
3061 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
3062 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
3063 				    rx_id, ct_id);
3064 				goto skip;
3065 			}
3066 
3067 			ccb = tgt->ccb;
3068 			if (ccb) {
3069 				mpt_prt(mpt,
3070 				    "CCB (%p): lun %u flags %x status %x\n",
3071 				    ccb, ccb->ccb_h.target_lun,
3072 				    ccb->ccb_h.flags, ccb->ccb_h.status);
3073 			}
3074 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
3075 			    "%x nxfers %x\n", tgt->state,
3076 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
3077 			    tgt->nxfers);
3078   skip:
3079 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
3080 				mpt_prt(mpt, "unable to start TargetAbort\n");
3081 			}
3082 		} else {
3083 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3084 		}
3085 		memset(elsbuf, 0, 5 * (sizeof (U32)));
3086 		elsbuf[0] = htobe32(0);
3087 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3088 		elsbuf[2] = htobe32(0x000ffff);
3089 		/*
3090 		 * Dork with the reply frame so that the response to it
3091 		 * will be correct.
3092 		 */
3093 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3094 		/* remove from active list as we're done */
3095 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3096 		req->state &= ~REQ_STATE_QUEUED;
3097 		req->state |= REQ_STATE_DONE;
3098 		mpt_fc_els_send_response(mpt, req, rp, 12);
3099 		do_refresh = FALSE;
3100 	} else {
3101 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3102 	}
3103 	if (do_refresh == TRUE) {
3104 		/* remove from active list as we're done */
3105 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3106 		req->state &= ~REQ_STATE_QUEUED;
3107 		req->state |= REQ_STATE_DONE;
3108 		mpt_fc_post_els(mpt, req, ioindex);
3109 	}
3110 	return (TRUE);
3111 }
3112 
3113 /*
3114  * Clean up all SCSI Initiator personality state in response
3115  * to a controller reset.
3116  */
3117 static void
3118 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3119 {
3120 
3121 	/*
3122 	 * The pending list is already run down by
3123 	 * the generic handler.  Perform the same
3124 	 * operation on the timed out request list.
3125 	 */
3126 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3127 				   MPI_IOCSTATUS_INVALID_STATE);
3128 
3129 	/*
3130 	 * XXX: We need to repost ELS and Target Command Buffers?
3131 	 */
3132 
3133 	/*
3134 	 * Inform the XPT that a bus reset has occurred.
3135 	 */
3136 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
3137 }
3138 
3139 /*
3140  * Parse additional completion information in the reply
3141  * frame for SCSI I/O requests.
3142  */
3143 static int
3144 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3145 			     MSG_DEFAULT_REPLY *reply_frame)
3146 {
3147 	union ccb *ccb;
3148 	MSG_SCSI_IO_REPLY *scsi_io_reply;
3149 	u_int ioc_status;
3150 	u_int sstate;
3151 
3152 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3153 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3154 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3155 		("MPT SCSI I/O Handler called with incorrect reply type"));
3156 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3157 		("MPT SCSI I/O Handler called with continuation reply"));
3158 
3159 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3160 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
3161 	ioc_status &= MPI_IOCSTATUS_MASK;
3162 	sstate = scsi_io_reply->SCSIState;
3163 
3164 	ccb = req->ccb;
3165 	ccb->csio.resid =
3166 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3167 
3168 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3169 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3170 		uint32_t sense_returned;
3171 
3172 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3173 
3174 		sense_returned = le32toh(scsi_io_reply->SenseCount);
3175 		if (sense_returned < ccb->csio.sense_len)
3176 			ccb->csio.sense_resid = ccb->csio.sense_len -
3177 						sense_returned;
3178 		else
3179 			ccb->csio.sense_resid = 0;
3180 
3181 		bzero(&ccb->csio.sense_data, sizeof(&ccb->csio.sense_data));
3182 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3183 		    min(ccb->csio.sense_len, sense_returned));
3184 	}
3185 
3186 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3187 		/*
3188 		 * Tag messages rejected, but non-tagged retry
3189 		 * was successful.
3190 XXXX
3191 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3192 		 */
3193 	}
3194 
3195 	switch(ioc_status) {
3196 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3197 		/*
3198 		 * XXX
3199 		 * Linux driver indicates that a zero
3200 		 * transfer length with this error code
3201 		 * indicates a CRC error.
3202 		 *
3203 		 * No need to swap the bytes for checking
3204 		 * against zero.
3205 		 */
3206 		if (scsi_io_reply->TransferCount == 0) {
3207 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3208 			break;
3209 		}
3210 		/* FALLTHROUGH */
3211 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3212 	case MPI_IOCSTATUS_SUCCESS:
3213 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3214 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3215 			/*
3216 			 * Status was never returned for this transaction.
3217 			 */
3218 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3219 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3220 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3221 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3222 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3223 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3224 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3225 
3226 			/* XXX Handle SPI-Packet and FCP-2 response info. */
3227 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3228 		} else
3229 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3230 		break;
3231 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3232 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3233 		break;
3234 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3235 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3236 		break;
3237 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3238 		/*
3239 		 * Since selection timeouts and "device really not
3240 		 * there" are grouped into this error code, report
3241 		 * selection timeout.  Selection timeouts are
3242 		 * typically retried before giving up on the device
3243 		 * whereas "device not there" errors are considered
3244 		 * unretryable.
3245 		 */
3246 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3247 		break;
3248 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3249 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3250 		break;
3251 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3252 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3253 		break;
3254 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3255 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3256 		break;
3257 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3258 		ccb->ccb_h.status = CAM_UA_TERMIO;
3259 		break;
3260 	case MPI_IOCSTATUS_INVALID_STATE:
3261 		/*
3262 		 * The IOC has been reset.  Emulate a bus reset.
3263 		 */
3264 		/* FALLTHROUGH */
3265 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3266 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3267 		break;
3268 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3269 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3270 		/*
3271 		 * Don't clobber any timeout status that has
3272 		 * already been set for this transaction.  We
3273 		 * want the SCSI layer to be able to differentiate
3274 		 * between the command we aborted due to timeout
3275 		 * and any innocent bystanders.
3276 		 */
3277 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3278 			break;
3279 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3280 		break;
3281 
3282 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3283 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3284 		break;
3285 	case MPI_IOCSTATUS_BUSY:
3286 		mpt_set_ccb_status(ccb, CAM_BUSY);
3287 		break;
3288 	case MPI_IOCSTATUS_INVALID_FUNCTION:
3289 	case MPI_IOCSTATUS_INVALID_SGL:
3290 	case MPI_IOCSTATUS_INTERNAL_ERROR:
3291 	case MPI_IOCSTATUS_INVALID_FIELD:
3292 	default:
3293 		/* XXX
3294 		 * Some of the above may need to kick
3295 		 * of a recovery action!!!!
3296 		 */
3297 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3298 		break;
3299 	}
3300 
3301 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3302 		mpt_freeze_ccb(ccb);
3303 	}
3304 
3305 	return (TRUE);
3306 }
3307 
3308 static void
3309 mpt_action(struct cam_sim *sim, union ccb *ccb)
3310 {
3311 	struct mpt_softc *mpt;
3312 	struct ccb_trans_settings *cts;
3313 	target_id_t tgt;
3314 	lun_id_t lun;
3315 	int raid_passthru;
3316 
3317 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3318 
3319 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
3320 	raid_passthru = (sim == mpt->phydisk_sim);
3321 	MPT_LOCK_ASSERT(mpt);
3322 
3323 	tgt = ccb->ccb_h.target_id;
3324 	lun = ccb->ccb_h.target_lun;
3325 	if (raid_passthru &&
3326 	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
3327 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
3328 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
3329 		CAMLOCK_2_MPTLOCK(mpt);
3330 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3331 			MPTLOCK_2_CAMLOCK(mpt);
3332 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3333 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3334 			xpt_done(ccb);
3335 			return;
3336 		}
3337 		MPTLOCK_2_CAMLOCK(mpt);
3338 	}
3339 	ccb->ccb_h.ccb_mpt_ptr = mpt;
3340 
3341 	switch (ccb->ccb_h.func_code) {
3342 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
3343 		/*
3344 		 * Do a couple of preliminary checks...
3345 		 */
3346 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3347 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3348 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3349 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3350 				break;
3351 			}
3352 		}
3353 		/* Max supported CDB length is 16 bytes */
3354 		/* XXX Unless we implement the new 32byte message type */
3355 		if (ccb->csio.cdb_len >
3356 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3357 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3358 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3359 			break;
3360 		}
3361 #ifdef	MPT_TEST_MULTIPATH
3362 		if (mpt->failure_id == ccb->ccb_h.target_id) {
3363 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3364 			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3365 			break;
3366 		}
3367 #endif
3368 		ccb->csio.scsi_status = SCSI_STATUS_OK;
3369 		mpt_start(sim, ccb);
3370 		return;
3371 
3372 	case XPT_RESET_BUS:
3373 		if (raid_passthru) {
3374 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3375 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3376 			break;
3377 		}
3378 	case XPT_RESET_DEV:
3379 		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3380 			if (bootverbose) {
3381 				xpt_print(ccb->ccb_h.path, "reset bus\n");
3382 			}
3383 		} else {
3384 			xpt_print(ccb->ccb_h.path, "reset device\n");
3385 		}
3386 		CAMLOCK_2_MPTLOCK(mpt);
3387 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3388 		MPTLOCK_2_CAMLOCK(mpt);
3389 
3390 		/*
3391 		 * mpt_bus_reset is always successful in that it
3392 		 * will fall back to a hard reset should a bus
3393 		 * reset attempt fail.
3394 		 */
3395 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3396 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3397 		break;
3398 
3399 	case XPT_ABORT:
3400 	{
3401 		union ccb *accb = ccb->cab.abort_ccb;
3402 		CAMLOCK_2_MPTLOCK(mpt);
3403 		switch (accb->ccb_h.func_code) {
3404 		case XPT_ACCEPT_TARGET_IO:
3405 		case XPT_IMMED_NOTIFY:
3406 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3407 			break;
3408 		case XPT_CONT_TARGET_IO:
3409 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3410 			ccb->ccb_h.status = CAM_UA_ABORT;
3411 			break;
3412 		case XPT_SCSI_IO:
3413 			ccb->ccb_h.status = CAM_UA_ABORT;
3414 			break;
3415 		default:
3416 			ccb->ccb_h.status = CAM_REQ_INVALID;
3417 			break;
3418 		}
3419 		MPTLOCK_2_CAMLOCK(mpt);
3420 		break;
3421 	}
3422 
3423 #ifdef	CAM_NEW_TRAN_CODE
3424 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3425 #else
3426 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3427 #endif
3428 #define	DP_DISC_ENABLE	0x1
3429 #define	DP_DISC_DISABL	0x2
3430 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3431 
3432 #define	DP_TQING_ENABLE	0x4
3433 #define	DP_TQING_DISABL	0x8
3434 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3435 
3436 #define	DP_WIDE		0x10
3437 #define	DP_NARROW	0x20
3438 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3439 
3440 #define	DP_SYNC		0x40
3441 
3442 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3443 	{
3444 #ifdef	CAM_NEW_TRAN_CODE
3445 		struct ccb_trans_settings_scsi *scsi;
3446 		struct ccb_trans_settings_spi *spi;
3447 #endif
3448 		uint8_t dval;
3449 		u_int period;
3450 		u_int offset;
3451 		int i, j;
3452 
3453 		cts = &ccb->cts;
3454 
3455 		if (mpt->is_fc || mpt->is_sas) {
3456 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3457 			break;
3458 		}
3459 
3460 #ifdef	CAM_NEW_TRAN_CODE
3461 		scsi = &cts->proto_specific.scsi;
3462 		spi = &cts->xport_specific.spi;
3463 
3464 		/*
3465 		 * We can be called just to valid transport and proto versions
3466 		 */
3467 		if (scsi->valid == 0 && spi->valid == 0) {
3468 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3469 			break;
3470 		}
3471 #endif
3472 
3473 		/*
3474 		 * Skip attempting settings on RAID volume disks.
3475 		 * Other devices on the bus get the normal treatment.
3476 		 */
3477 		if (mpt->phydisk_sim && raid_passthru == 0 &&
3478 		    mpt_is_raid_volume(mpt, tgt) != 0) {
3479 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3480 			    "no transfer settings for RAID vols\n");
3481 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3482 			break;
3483 		}
3484 
3485 		i = mpt->mpt_port_page2.PortSettings &
3486 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3487 		j = mpt->mpt_port_page2.PortFlags &
3488 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3489 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3490 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3491 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3492 			    "honoring BIOS transfer negotiations\n");
3493 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3494 			break;
3495 		}
3496 
3497 		dval = 0;
3498 		period = 0;
3499 		offset = 0;
3500 
3501 #ifndef	CAM_NEW_TRAN_CODE
3502 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3503 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3504 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3505 		}
3506 
3507 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3508 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3509 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3510 		}
3511 
3512 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3513 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3514 		}
3515 
3516 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3517 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3518 			dval |= DP_SYNC;
3519 			period = cts->sync_period;
3520 			offset = cts->sync_offset;
3521 		}
3522 #else
3523 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3524 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3525 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3526 		}
3527 
3528 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3529 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3530 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3531 		}
3532 
3533 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3534 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3535 			    DP_WIDE : DP_NARROW;
3536 		}
3537 
3538 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3539 			dval |= DP_SYNC;
3540 			offset = spi->sync_offset;
3541 		} else {
3542 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3543 			    &mpt->mpt_dev_page1[tgt];
3544 			offset = ptr->RequestedParameters;
3545 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3546 	    		offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3547 		}
3548 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3549 			dval |= DP_SYNC;
3550 			period = spi->sync_period;
3551 		} else {
3552 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3553 			    &mpt->mpt_dev_page1[tgt];
3554 			period = ptr->RequestedParameters;
3555 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3556 	    		period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3557 		}
3558 #endif
3559 		CAMLOCK_2_MPTLOCK(mpt);
3560 		if (dval & DP_DISC_ENABLE) {
3561 			mpt->mpt_disc_enable |= (1 << tgt);
3562 		} else if (dval & DP_DISC_DISABL) {
3563 			mpt->mpt_disc_enable &= ~(1 << tgt);
3564 		}
3565 		if (dval & DP_TQING_ENABLE) {
3566 			mpt->mpt_tag_enable |= (1 << tgt);
3567 		} else if (dval & DP_TQING_DISABL) {
3568 			mpt->mpt_tag_enable &= ~(1 << tgt);
3569 		}
3570 		if (dval & DP_WIDTH) {
3571 			mpt_setwidth(mpt, tgt, 1);
3572 		}
3573 		if (dval & DP_SYNC) {
3574 			mpt_setsync(mpt, tgt, period, offset);
3575 		}
3576 		if (dval == 0) {
3577 			MPTLOCK_2_CAMLOCK(mpt);
3578 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3579 			break;
3580 		}
3581 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3582 		    "set [%d]: 0x%x period 0x%x offset %d\n",
3583 		    tgt, dval, period, offset);
3584 		if (mpt_update_spi_config(mpt, tgt)) {
3585 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3586 		} else {
3587 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3588 		}
3589 		MPTLOCK_2_CAMLOCK(mpt);
3590 		break;
3591 	}
3592 	case XPT_GET_TRAN_SETTINGS:
3593 	{
3594 #ifdef	CAM_NEW_TRAN_CODE
3595 		struct ccb_trans_settings_scsi *scsi;
3596 		cts = &ccb->cts;
3597 		cts->protocol = PROTO_SCSI;
3598 		if (mpt->is_fc) {
3599 			struct ccb_trans_settings_fc *fc =
3600 			    &cts->xport_specific.fc;
3601 			cts->protocol_version = SCSI_REV_SPC;
3602 			cts->transport = XPORT_FC;
3603 			cts->transport_version = 0;
3604 			fc->valid = CTS_FC_VALID_SPEED;
3605 			fc->bitrate = 100000;
3606 		} else if (mpt->is_sas) {
3607 			struct ccb_trans_settings_sas *sas =
3608 			    &cts->xport_specific.sas;
3609 			cts->protocol_version = SCSI_REV_SPC2;
3610 			cts->transport = XPORT_SAS;
3611 			cts->transport_version = 0;
3612 			sas->valid = CTS_SAS_VALID_SPEED;
3613 			sas->bitrate = 300000;
3614 		} else {
3615 			cts->protocol_version = SCSI_REV_2;
3616 			cts->transport = XPORT_SPI;
3617 			cts->transport_version = 2;
3618 			if (mpt_get_spi_settings(mpt, cts) != 0) {
3619 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3620 				break;
3621 			}
3622 		}
3623 		scsi = &cts->proto_specific.scsi;
3624 		scsi->valid = CTS_SCSI_VALID_TQ;
3625 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3626 #else
3627 		cts = &ccb->cts;
3628 		if (mpt->is_fc) {
3629 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3630 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3631 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3632 		} else if (mpt->is_sas) {
3633 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3634 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3635 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3636 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3637 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3638 			break;
3639 		}
3640 #endif
3641 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3642 		break;
3643 	}
3644 	case XPT_CALC_GEOMETRY:
3645 	{
3646 		struct ccb_calc_geometry *ccg;
3647 
3648 		ccg = &ccb->ccg;
3649 		if (ccg->block_size == 0) {
3650 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3651 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3652 			break;
3653 		}
3654 		mpt_calc_geometry(ccg, /*extended*/1);
3655 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3656 		break;
3657 	}
3658 	case XPT_PATH_INQ:		/* Path routing inquiry */
3659 	{
3660 		struct ccb_pathinq *cpi = &ccb->cpi;
3661 
3662 		cpi->version_num = 1;
3663 		cpi->target_sprt = 0;
3664 		cpi->hba_eng_cnt = 0;
3665 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3666 		cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3667 		/*
3668 		 * FC cards report MAX_DEVICES of 512, but
3669 		 * the MSG_SCSI_IO_REQUEST target id field
3670 		 * is only 8 bits. Until we fix the driver
3671 		 * to support 'channels' for bus overflow,
3672 		 * just limit it.
3673 		 */
3674 		if (cpi->max_target > 255) {
3675 			cpi->max_target = 255;
3676 		}
3677 
3678 		/*
3679 		 * VMware ESX reports > 16 devices and then dies when we probe.
3680 		 */
3681 		if (mpt->is_spi && cpi->max_target > 15) {
3682 			cpi->max_target = 15;
3683 		}
3684 		if (mpt->is_spi)
3685 			cpi->max_lun = 7;
3686 		else
3687 			cpi->max_lun = MPT_MAX_LUNS;
3688 		cpi->initiator_id = mpt->mpt_ini_id;
3689 		cpi->bus_id = cam_sim_bus(sim);
3690 
3691 		/*
3692 		 * The base speed is the speed of the underlying connection.
3693 		 */
3694 #ifdef	CAM_NEW_TRAN_CODE
3695 		cpi->protocol = PROTO_SCSI;
3696 		if (mpt->is_fc) {
3697 			cpi->hba_misc = PIM_NOBUSRESET;
3698 			cpi->base_transfer_speed = 100000;
3699 			cpi->hba_inquiry = PI_TAG_ABLE;
3700 			cpi->transport = XPORT_FC;
3701 			cpi->transport_version = 0;
3702 			cpi->protocol_version = SCSI_REV_SPC;
3703 		} else if (mpt->is_sas) {
3704 			cpi->hba_misc = PIM_NOBUSRESET;
3705 			cpi->base_transfer_speed = 300000;
3706 			cpi->hba_inquiry = PI_TAG_ABLE;
3707 			cpi->transport = XPORT_SAS;
3708 			cpi->transport_version = 0;
3709 			cpi->protocol_version = SCSI_REV_SPC2;
3710 		} else {
3711 			cpi->hba_misc = PIM_SEQSCAN;
3712 			cpi->base_transfer_speed = 3300;
3713 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3714 			cpi->transport = XPORT_SPI;
3715 			cpi->transport_version = 2;
3716 			cpi->protocol_version = SCSI_REV_2;
3717 		}
3718 #else
3719 		if (mpt->is_fc) {
3720 			cpi->hba_misc = PIM_NOBUSRESET;
3721 			cpi->base_transfer_speed = 100000;
3722 			cpi->hba_inquiry = PI_TAG_ABLE;
3723 		} else if (mpt->is_sas) {
3724 			cpi->hba_misc = PIM_NOBUSRESET;
3725 			cpi->base_transfer_speed = 300000;
3726 			cpi->hba_inquiry = PI_TAG_ABLE;
3727 		} else {
3728 			cpi->hba_misc = PIM_SEQSCAN;
3729 			cpi->base_transfer_speed = 3300;
3730 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3731 		}
3732 #endif
3733 
3734 		/*
3735 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3736 		 * wide and restrict it to one lun.
3737 		 */
3738 		if (raid_passthru) {
3739 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3740 			cpi->initiator_id = cpi->max_target + 1;
3741 			cpi->max_lun = 0;
3742 		}
3743 
3744 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3745 			cpi->hba_misc |= PIM_NOINITIATOR;
3746 		}
3747 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3748 			cpi->target_sprt =
3749 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3750 		} else {
3751 			cpi->target_sprt = 0;
3752 		}
3753 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3754 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3755 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3756 		cpi->unit_number = cam_sim_unit(sim);
3757 		cpi->ccb_h.status = CAM_REQ_CMP;
3758 		break;
3759 	}
3760 	case XPT_EN_LUN:		/* Enable LUN as a target */
3761 	{
3762 		int result;
3763 
3764 		CAMLOCK_2_MPTLOCK(mpt);
3765 		if (ccb->cel.enable)
3766 			result = mpt_enable_lun(mpt,
3767 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3768 		else
3769 			result = mpt_disable_lun(mpt,
3770 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3771 		MPTLOCK_2_CAMLOCK(mpt);
3772 		if (result == 0) {
3773 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3774 		} else {
3775 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3776 		}
3777 		break;
3778 	}
3779 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3780 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3781 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3782 	{
3783 		tgt_resource_t *trtp;
3784 		lun_id_t lun = ccb->ccb_h.target_lun;
3785 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3786 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3787 		ccb->ccb_h.flags = 0;
3788 
3789 		if (lun == CAM_LUN_WILDCARD) {
3790 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3791 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3792 				break;
3793 			}
3794 			trtp = &mpt->trt_wildcard;
3795 		} else if (lun >= MPT_MAX_LUNS) {
3796 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3797 			break;
3798 		} else {
3799 			trtp = &mpt->trt[lun];
3800 		}
3801 		CAMLOCK_2_MPTLOCK(mpt);
3802 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3803 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3804 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3805 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3806 			    sim_links.stqe);
3807 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3808 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3809 			    "Put FREE INOT lun %d\n", lun);
3810 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3811 			    sim_links.stqe);
3812 		} else {
3813 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3814 		}
3815 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3816 		MPTLOCK_2_CAMLOCK(mpt);
3817 		return;
3818 	}
3819 	case XPT_CONT_TARGET_IO:
3820 		CAMLOCK_2_MPTLOCK(mpt);
3821 		mpt_target_start_io(mpt, ccb);
3822 		MPTLOCK_2_CAMLOCK(mpt);
3823 		return;
3824 
3825 	default:
3826 		ccb->ccb_h.status = CAM_REQ_INVALID;
3827 		break;
3828 	}
3829 	xpt_done(ccb);
3830 }
3831 
3832 static int
3833 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3834 {
3835 #ifdef	CAM_NEW_TRAN_CODE
3836 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3837 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3838 #endif
3839 	target_id_t tgt;
3840 	uint32_t dval, pval, oval;
3841 	int rv;
3842 
3843 	if (IS_CURRENT_SETTINGS(cts) == 0) {
3844 		tgt = cts->ccb_h.target_id;
3845 	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3846 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3847 			return (-1);
3848 		}
3849 	} else {
3850 		tgt = cts->ccb_h.target_id;
3851 	}
3852 
3853 	/*
3854 	 * We aren't looking at Port Page 2 BIOS settings here-
3855 	 * sometimes these have been known to be bogus XXX.
3856 	 *
3857 	 * For user settings, we pick the max from port page 0
3858 	 *
3859 	 * For current settings we read the current settings out from
3860 	 * device page 0 for that target.
3861 	 */
3862 	if (IS_CURRENT_SETTINGS(cts)) {
3863 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3864 		dval = 0;
3865 
3866 		CAMLOCK_2_MPTLOCK(mpt);
3867 		tmp = mpt->mpt_dev_page0[tgt];
3868 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3869 		    sizeof(tmp), FALSE, 5000);
3870 		if (rv) {
3871 			MPTLOCK_2_CAMLOCK(mpt);
3872 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3873 			return (rv);
3874 		}
3875 		mpt2host_config_page_scsi_device_0(&tmp);
3876 
3877 		MPTLOCK_2_CAMLOCK(mpt);
3878 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3879 		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3880 		    tmp.NegotiatedParameters, tmp.Information);
3881 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3882 		    DP_WIDE : DP_NARROW;
3883 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3884 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3885 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3886 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3887 		oval = tmp.NegotiatedParameters;
3888 		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3889 		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3890 		pval = tmp.NegotiatedParameters;
3891 		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3892 		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3893 		mpt->mpt_dev_page0[tgt] = tmp;
3894 	} else {
3895 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3896 		oval = mpt->mpt_port_page0.Capabilities;
3897 		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3898 		pval = mpt->mpt_port_page0.Capabilities;
3899 		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3900 	}
3901 
3902 #ifndef	CAM_NEW_TRAN_CODE
3903 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3904 	cts->valid = 0;
3905 	cts->sync_period = pval;
3906 	cts->sync_offset = oval;
3907 	cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3908 	cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3909 	cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3910 	if (dval & DP_WIDE) {
3911 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3912 	} else {
3913 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3914 	}
3915 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3916 		cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3917 		if (dval & DP_DISC_ENABLE) {
3918 			cts->flags |= CCB_TRANS_DISC_ENB;
3919 		}
3920 		if (dval & DP_TQING_ENABLE) {
3921 			cts->flags |= CCB_TRANS_TAG_ENB;
3922 		}
3923 	}
3924 #else
3925 	spi->valid = 0;
3926 	scsi->valid = 0;
3927 	spi->flags = 0;
3928 	scsi->flags = 0;
3929 	spi->sync_offset = oval;
3930 	spi->sync_period = pval;
3931 	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3932 	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3933 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3934 	if (dval & DP_WIDE) {
3935 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3936 	} else {
3937 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3938 	}
3939 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3940 		scsi->valid = CTS_SCSI_VALID_TQ;
3941 		if (dval & DP_TQING_ENABLE) {
3942 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3943 		}
3944 		spi->valid |= CTS_SPI_VALID_DISC;
3945 		if (dval & DP_DISC_ENABLE) {
3946 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3947 		}
3948 	}
3949 #endif
3950 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3951 	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3952 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3953 	return (0);
3954 }
3955 
3956 static void
3957 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3958 {
3959 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3960 
3961 	ptr = &mpt->mpt_dev_page1[tgt];
3962 	if (onoff) {
3963 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3964 	} else {
3965 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3966 	}
3967 }
3968 
3969 static void
3970 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3971 {
3972 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3973 
3974 	ptr = &mpt->mpt_dev_page1[tgt];
3975 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3976 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3977 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3978 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3979 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3980 	if (period == 0) {
3981 		return;
3982 	}
3983 	ptr->RequestedParameters |=
3984 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3985 	ptr->RequestedParameters |=
3986 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3987 	if (period < 0xa) {
3988 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3989 	}
3990 	if (period < 0x9) {
3991 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3992 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3993 	}
3994 }
3995 
3996 static int
3997 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3998 {
3999 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
4000 	int rv;
4001 
4002 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
4003 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
4004 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
4005 	tmp = mpt->mpt_dev_page1[tgt];
4006 	host2mpt_config_page_scsi_device_1(&tmp);
4007 	rv = mpt_write_cur_cfg_page(mpt, tgt,
4008 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
4009 	if (rv) {
4010 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
4011 		return (-1);
4012 	}
4013 	return (0);
4014 }
4015 
4016 static void
4017 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
4018 {
4019 #if __FreeBSD_version >= 500000
4020 	cam_calc_geometry(ccg, extended);
4021 #else
4022 	uint32_t size_mb;
4023 	uint32_t secs_per_cylinder;
4024 
4025 	if (ccg->block_size == 0) {
4026 		ccg->ccb_h.status = CAM_REQ_INVALID;
4027 		return;
4028 	}
4029 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
4030 	if (size_mb > 1024 && extended) {
4031 		ccg->heads = 255;
4032 		ccg->secs_per_track = 63;
4033 	} else {
4034 		ccg->heads = 64;
4035 		ccg->secs_per_track = 32;
4036 	}
4037 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
4038 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
4039 	ccg->ccb_h.status = CAM_REQ_CMP;
4040 #endif
4041 }
4042 
4043 /****************************** Timeout Recovery ******************************/
4044 static int
4045 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
4046 {
4047 	int error;
4048 
4049 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
4050 	    &mpt->recovery_thread, /*flags*/0,
4051 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
4052 	return (error);
4053 }
4054 
4055 static void
4056 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
4057 {
4058 
4059 	if (mpt->recovery_thread == NULL) {
4060 		return;
4061 	}
4062 	mpt->shutdwn_recovery = 1;
4063 	wakeup(mpt);
4064 	/*
4065 	 * Sleep on a slightly different location
4066 	 * for this interlock just for added safety.
4067 	 */
4068 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
4069 }
4070 
4071 static void
4072 mpt_recovery_thread(void *arg)
4073 {
4074 	struct mpt_softc *mpt;
4075 
4076 	mpt = (struct mpt_softc *)arg;
4077 	MPT_LOCK(mpt);
4078 	for (;;) {
4079 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4080 			if (mpt->shutdwn_recovery == 0) {
4081 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
4082 			}
4083 		}
4084 		if (mpt->shutdwn_recovery != 0) {
4085 			break;
4086 		}
4087 		mpt_recover_commands(mpt);
4088 	}
4089 	mpt->recovery_thread = NULL;
4090 	wakeup(&mpt->recovery_thread);
4091 	MPT_UNLOCK(mpt);
4092 	mpt_kthread_exit(0);
4093 }
4094 
4095 static int
4096 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
4097     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
4098 {
4099 	MSG_SCSI_TASK_MGMT *tmf_req;
4100 	int		    error;
4101 
4102 	/*
4103 	 * Wait for any current TMF request to complete.
4104 	 * We're only allowed to issue one TMF at a time.
4105 	 */
4106 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
4107 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
4108 	if (error != 0) {
4109 		mpt_reset(mpt, TRUE);
4110 		return (ETIMEDOUT);
4111 	}
4112 
4113 	mpt_assign_serno(mpt, mpt->tmf_req);
4114 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
4115 
4116 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
4117 	memset(tmf_req, 0, sizeof(*tmf_req));
4118 	tmf_req->TargetID = target;
4119 	tmf_req->Bus = channel;
4120 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4121 	tmf_req->TaskType = type;
4122 	tmf_req->MsgFlags = flags;
4123 	tmf_req->MsgContext =
4124 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
4125 	if (lun > MPT_MAX_LUNS) {
4126 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4127 		tmf_req->LUN[1] = lun & 0xff;
4128 	} else {
4129 		tmf_req->LUN[1] = lun;
4130 	}
4131 	tmf_req->TaskMsgContext = abort_ctx;
4132 
4133 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4134 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
4135 	    mpt->tmf_req->serno, tmf_req->MsgContext);
4136 	if (mpt->verbose > MPT_PRT_DEBUG) {
4137 		mpt_print_request(tmf_req);
4138 	}
4139 
4140 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4141 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4142 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4143 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4144 	if (error != MPT_OK) {
4145 		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4146 		mpt->tmf_req->state = REQ_STATE_FREE;
4147 		mpt_reset(mpt, TRUE);
4148 	}
4149 	return (error);
4150 }
4151 
4152 /*
4153  * When a command times out, it is placed on the requeust_timeout_list
4154  * and we wake our recovery thread.  The MPT-Fusion architecture supports
4155  * only a single TMF operation at a time, so we serially abort/bdr, etc,
4156  * the timedout transactions.  The next TMF is issued either by the
4157  * completion handler of the current TMF waking our recovery thread,
4158  * or the TMF timeout handler causing a hard reset sequence.
4159  */
4160 static void
4161 mpt_recover_commands(struct mpt_softc *mpt)
4162 {
4163 	request_t	   *req;
4164 	union ccb	   *ccb;
4165 	int		    error;
4166 
4167 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4168 		/*
4169 		 * No work to do- leave.
4170 		 */
4171 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4172 		return;
4173 	}
4174 
4175 	/*
4176 	 * Flush any commands whose completion coincides with their timeout.
4177 	 */
4178 	mpt_intr(mpt);
4179 
4180 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4181 		/*
4182 		 * The timedout commands have already
4183 		 * completed.  This typically means
4184 		 * that either the timeout value was on
4185 		 * the hairy edge of what the device
4186 		 * requires or - more likely - interrupts
4187 		 * are not happening.
4188 		 */
4189 		mpt_prt(mpt, "Timedout requests already complete. "
4190 		    "Interrupts may not be functioning.\n");
4191 		mpt_enable_ints(mpt);
4192 		return;
4193 	}
4194 
4195 	/*
4196 	 * We have no visibility into the current state of the
4197 	 * controller, so attempt to abort the commands in the
4198 	 * order they timed-out. For initiator commands, we
4199 	 * depend on the reply handler pulling requests off
4200 	 * the timeout list.
4201 	 */
4202 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4203 		uint16_t status;
4204 		uint8_t response;
4205 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4206 
4207 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4208 		    req, req->serno, hdrp->Function);
4209 		ccb = req->ccb;
4210 		if (ccb == NULL) {
4211 			mpt_prt(mpt, "null ccb in timed out request. "
4212 			    "Resetting Controller.\n");
4213 			mpt_reset(mpt, TRUE);
4214 			continue;
4215 		}
4216 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4217 
4218 		/*
4219 		 * Check to see if this is not an initiator command and
4220 		 * deal with it differently if it is.
4221 		 */
4222 		switch (hdrp->Function) {
4223 		case MPI_FUNCTION_SCSI_IO_REQUEST:
4224 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4225 			break;
4226 		default:
4227 			/*
4228 			 * XXX: FIX ME: need to abort target assists...
4229 			 */
4230 			mpt_prt(mpt, "just putting it back on the pend q\n");
4231 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4232 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4233 			    links);
4234 			continue;
4235 		}
4236 
4237 		error = mpt_scsi_send_tmf(mpt,
4238 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4239 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4240 		    htole32(req->index | scsi_io_handler_id), TRUE);
4241 
4242 		if (error != 0) {
4243 			/*
4244 			 * mpt_scsi_send_tmf hard resets on failure, so no
4245 			 * need to do so here.  Our queue should be emptied
4246 			 * by the hard reset.
4247 			 */
4248 			continue;
4249 		}
4250 
4251 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4252 		    REQ_STATE_DONE, TRUE, 500);
4253 
4254 		status = le16toh(mpt->tmf_req->IOCStatus);
4255 		response = mpt->tmf_req->ResponseCode;
4256 		mpt->tmf_req->state = REQ_STATE_FREE;
4257 
4258 		if (error != 0) {
4259 			/*
4260 			 * If we've errored out,, reset the controller.
4261 			 */
4262 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4263 			    "Resetting controller\n");
4264 			mpt_reset(mpt, TRUE);
4265 			continue;
4266 		}
4267 
4268 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4269 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4270 			    "Resetting controller.\n", status);
4271 			mpt_reset(mpt, TRUE);
4272 			continue;
4273 		}
4274 
4275 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4276 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4277 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4278 			    "Resetting controller.\n", response);
4279 			mpt_reset(mpt, TRUE);
4280 			continue;
4281 		}
4282 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4283 	}
4284 }
4285 
4286 /************************ Target Mode Support ****************************/
4287 static void
4288 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4289 {
4290 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4291 	PTR_SGE_TRANSACTION32 tep;
4292 	PTR_SGE_SIMPLE32 se;
4293 	bus_addr_t paddr;
4294 	uint32_t fl;
4295 
4296 	paddr = req->req_pbuf;
4297 	paddr += MPT_RQSL(mpt);
4298 
4299 	fc = req->req_vbuf;
4300 	memset(fc, 0, MPT_REQUEST_AREA);
4301 	fc->BufferCount = 1;
4302 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4303 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
4304 
4305 	/*
4306 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
4307 	 * consist of a TE SGL element (with details length of zero)
4308 	 * followed by a SIMPLE SGL element which holds the address
4309 	 * of the buffer.
4310 	 */
4311 
4312 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4313 
4314 	tep->ContextSize = 4;
4315 	tep->Flags = 0;
4316 	tep->TransactionContext[0] = htole32(ioindex);
4317 
4318 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4319 	fl =
4320 		MPI_SGE_FLAGS_HOST_TO_IOC	|
4321 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4322 		MPI_SGE_FLAGS_LAST_ELEMENT	|
4323 		MPI_SGE_FLAGS_END_OF_LIST	|
4324 		MPI_SGE_FLAGS_END_OF_BUFFER;
4325 	fl <<= MPI_SGE_FLAGS_SHIFT;
4326 	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4327 	se->FlagsLength = htole32(fl);
4328 	se->Address = htole32((uint32_t) paddr);
4329 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4330 	    "add ELS index %d ioindex %d for %p:%u\n",
4331 	    req->index, ioindex, req, req->serno);
4332 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4333 	    ("mpt_fc_post_els: request not locked"));
4334 	mpt_send_cmd(mpt, req);
4335 }
4336 
4337 static void
4338 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4339 {
4340 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4341 	PTR_CMD_BUFFER_DESCRIPTOR cb;
4342 	bus_addr_t paddr;
4343 
4344 	paddr = req->req_pbuf;
4345 	paddr += MPT_RQSL(mpt);
4346 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4347 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4348 
4349 	fc = req->req_vbuf;
4350 	fc->BufferCount = 1;
4351 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4352 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4353 
4354 	cb = &fc->Buffer[0];
4355 	cb->IoIndex = htole16(ioindex);
4356 	cb->u.PhysicalAddress32 = htole32((U32) paddr);
4357 
4358 	mpt_check_doorbell(mpt);
4359 	mpt_send_cmd(mpt, req);
4360 }
4361 
4362 static int
4363 mpt_add_els_buffers(struct mpt_softc *mpt)
4364 {
4365 	int i;
4366 
4367 	if (mpt->is_fc == 0) {
4368 		return (TRUE);
4369 	}
4370 
4371 	if (mpt->els_cmds_allocated) {
4372 		return (TRUE);
4373 	}
4374 
4375 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4376 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4377 
4378 	if (mpt->els_cmd_ptrs == NULL) {
4379 		return (FALSE);
4380 	}
4381 
4382 	/*
4383 	 * Feed the chip some ELS buffer resources
4384 	 */
4385 	for (i = 0; i < MPT_MAX_ELS; i++) {
4386 		request_t *req = mpt_get_request(mpt, FALSE);
4387 		if (req == NULL) {
4388 			break;
4389 		}
4390 		req->state |= REQ_STATE_LOCKED;
4391 		mpt->els_cmd_ptrs[i] = req;
4392 		mpt_fc_post_els(mpt, req, i);
4393 	}
4394 
4395 	if (i == 0) {
4396 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
4397 		free(mpt->els_cmd_ptrs, M_DEVBUF);
4398 		mpt->els_cmd_ptrs = NULL;
4399 		return (FALSE);
4400 	}
4401 	if (i != MPT_MAX_ELS) {
4402 		mpt_lprt(mpt, MPT_PRT_INFO,
4403 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4404 	}
4405 	mpt->els_cmds_allocated = i;
4406 	return(TRUE);
4407 }
4408 
4409 static int
4410 mpt_add_target_commands(struct mpt_softc *mpt)
4411 {
4412 	int i, max;
4413 
4414 	if (mpt->tgt_cmd_ptrs) {
4415 		return (TRUE);
4416 	}
4417 
4418 	max = MPT_MAX_REQUESTS(mpt) >> 1;
4419 	if (max > mpt->mpt_max_tgtcmds) {
4420 		max = mpt->mpt_max_tgtcmds;
4421 	}
4422 	mpt->tgt_cmd_ptrs =
4423 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4424 	if (mpt->tgt_cmd_ptrs == NULL) {
4425 		mpt_prt(mpt,
4426 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4427 		return (FALSE);
4428 	}
4429 
4430 	for (i = 0; i < max; i++) {
4431 		request_t *req;
4432 
4433 		req = mpt_get_request(mpt, FALSE);
4434 		if (req == NULL) {
4435 			break;
4436 		}
4437 		req->state |= REQ_STATE_LOCKED;
4438 		mpt->tgt_cmd_ptrs[i] = req;
4439 		mpt_post_target_command(mpt, req, i);
4440 	}
4441 
4442 
4443 	if (i == 0) {
4444 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4445 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4446 		mpt->tgt_cmd_ptrs = NULL;
4447 		return (FALSE);
4448 	}
4449 
4450 	mpt->tgt_cmds_allocated = i;
4451 
4452 	if (i < max) {
4453 		mpt_lprt(mpt, MPT_PRT_INFO,
4454 		    "added %d of %d target bufs\n", i, max);
4455 	}
4456 	return (i);
4457 }
4458 
4459 static int
4460 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4461 {
4462 
4463 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4464 		mpt->twildcard = 1;
4465 	} else if (lun >= MPT_MAX_LUNS) {
4466 		return (EINVAL);
4467 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4468 		return (EINVAL);
4469 	}
4470 	if (mpt->tenabled == 0) {
4471 		if (mpt->is_fc) {
4472 			(void) mpt_fc_reset_link(mpt, 0);
4473 		}
4474 		mpt->tenabled = 1;
4475 	}
4476 	if (lun == CAM_LUN_WILDCARD) {
4477 		mpt->trt_wildcard.enabled = 1;
4478 	} else {
4479 		mpt->trt[lun].enabled = 1;
4480 	}
4481 	return (0);
4482 }
4483 
4484 static int
4485 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4486 {
4487 	int i;
4488 
4489 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4490 		mpt->twildcard = 0;
4491 	} else if (lun >= MPT_MAX_LUNS) {
4492 		return (EINVAL);
4493 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4494 		return (EINVAL);
4495 	}
4496 	if (lun == CAM_LUN_WILDCARD) {
4497 		mpt->trt_wildcard.enabled = 0;
4498 	} else {
4499 		mpt->trt[lun].enabled = 0;
4500 	}
4501 	for (i = 0; i < MPT_MAX_LUNS; i++) {
4502 		if (mpt->trt[lun].enabled) {
4503 			break;
4504 		}
4505 	}
4506 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4507 		if (mpt->is_fc) {
4508 			(void) mpt_fc_reset_link(mpt, 0);
4509 		}
4510 		mpt->tenabled = 0;
4511 	}
4512 	return (0);
4513 }
4514 
4515 /*
4516  * Called with MPT lock held
4517  */
4518 static void
4519 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4520 {
4521 	struct ccb_scsiio *csio = &ccb->csio;
4522 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4523 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4524 
4525 	switch (tgt->state) {
4526 	case TGT_STATE_IN_CAM:
4527 		break;
4528 	case TGT_STATE_MOVING_DATA:
4529 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4530 		xpt_freeze_simq(mpt->sim, 1);
4531 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4532 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4533 		MPTLOCK_2_CAMLOCK(mpt);
4534 		xpt_done(ccb);
4535 		CAMLOCK_2_MPTLOCK(mpt);
4536 		return;
4537 	default:
4538 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4539 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4540 		mpt_tgt_dump_req_state(mpt, cmd_req);
4541 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4542 		MPTLOCK_2_CAMLOCK(mpt);
4543 		xpt_done(ccb);
4544 		CAMLOCK_2_MPTLOCK(mpt);
4545 		return;
4546 	}
4547 
4548 	if (csio->dxfer_len) {
4549 		bus_dmamap_callback_t *cb;
4550 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4551 		request_t *req;
4552 
4553 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4554 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
4555 
4556 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4557 			if (mpt->outofbeer == 0) {
4558 				mpt->outofbeer = 1;
4559 				xpt_freeze_simq(mpt->sim, 1);
4560 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4561 			}
4562 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4563 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4564 			MPTLOCK_2_CAMLOCK(mpt);
4565 			xpt_done(ccb);
4566 			CAMLOCK_2_MPTLOCK(mpt);
4567 			return;
4568 		}
4569 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4570 		if (sizeof (bus_addr_t) > 4) {
4571 			cb = mpt_execute_req_a64;
4572 		} else {
4573 			cb = mpt_execute_req;
4574 		}
4575 
4576 		req->ccb = ccb;
4577 		ccb->ccb_h.ccb_req_ptr = req;
4578 
4579 		/*
4580 		 * Record the currently active ccb and the
4581 		 * request for it in our target state area.
4582 		 */
4583 		tgt->ccb = ccb;
4584 		tgt->req = req;
4585 
4586 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4587 		ta = req->req_vbuf;
4588 
4589 		if (mpt->is_sas) {
4590 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4591 			     cmd_req->req_vbuf;
4592 			ta->QueueTag = ssp->InitiatorTag;
4593 		} else if (mpt->is_spi) {
4594 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4595 			     cmd_req->req_vbuf;
4596 			ta->QueueTag = sp->Tag;
4597 		}
4598 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4599 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4600 		ta->ReplyWord = htole32(tgt->reply_desc);
4601 		if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4602 			ta->LUN[0] =
4603 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4604 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4605 		} else {
4606 			ta->LUN[1] = csio->ccb_h.target_lun;
4607 		}
4608 
4609 		ta->RelativeOffset = tgt->bytes_xfered;
4610 		ta->DataLength = ccb->csio.dxfer_len;
4611 		if (ta->DataLength > tgt->resid) {
4612 			ta->DataLength = tgt->resid;
4613 		}
4614 
4615 		/*
4616 		 * XXX Should be done after data transfer completes?
4617 		 */
4618 		tgt->resid -= csio->dxfer_len;
4619 		tgt->bytes_xfered += csio->dxfer_len;
4620 
4621 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4622 			ta->TargetAssistFlags |=
4623 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4624 		}
4625 
4626 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4627 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4628 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4629 			ta->TargetAssistFlags |=
4630 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4631 		}
4632 #endif
4633 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4634 
4635 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4636 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4637 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4638 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4639 
4640 		MPTLOCK_2_CAMLOCK(mpt);
4641 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4642 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4643 				int error;
4644 				int s = splsoftvm();
4645 				error = bus_dmamap_load(mpt->buffer_dmat,
4646 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4647 				    cb, req, 0);
4648 				splx(s);
4649 				if (error == EINPROGRESS) {
4650 					xpt_freeze_simq(mpt->sim, 1);
4651 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4652 				}
4653 			} else {
4654 				/*
4655 				 * We have been given a pointer to single
4656 				 * physical buffer.
4657 				 */
4658 				struct bus_dma_segment seg;
4659 				seg.ds_addr = (bus_addr_t)
4660 				    (vm_offset_t)csio->data_ptr;
4661 				seg.ds_len = csio->dxfer_len;
4662 				(*cb)(req, &seg, 1, 0);
4663 			}
4664 		} else {
4665 			/*
4666 			 * We have been given a list of addresses.
4667 			 * This case could be easily supported but they are not
4668 			 * currently generated by the CAM subsystem so there
4669 			 * is no point in wasting the time right now.
4670 			 */
4671 			struct bus_dma_segment *sgs;
4672 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4673 				(*cb)(req, NULL, 0, EFAULT);
4674 			} else {
4675 				/* Just use the segments provided */
4676 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4677 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4678 			}
4679 		}
4680 		CAMLOCK_2_MPTLOCK(mpt);
4681 	} else {
4682 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4683 
4684 		/*
4685 		 * XXX: I don't know why this seems to happen, but
4686 		 * XXX: completing the CCB seems to make things happy.
4687 		 * XXX: This seems to happen if the initiator requests
4688 		 * XXX: enough data that we have to do multiple CTIOs.
4689 		 */
4690 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4691 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4692 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4693 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4694 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4695 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4696 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4697 			MPTLOCK_2_CAMLOCK(mpt);
4698 			xpt_done(ccb);
4699 			CAMLOCK_2_MPTLOCK(mpt);
4700 			return;
4701 		}
4702 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4703 			sp = sense;
4704 			memcpy(sp, &csio->sense_data,
4705 			   min(csio->sense_len, MPT_SENSE_SIZE));
4706 		}
4707 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4708 	}
4709 }
4710 
4711 static void
4712 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4713     uint32_t lun, int send, uint8_t *data, size_t length)
4714 {
4715 	mpt_tgt_state_t *tgt;
4716 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4717 	SGE_SIMPLE32 *se;
4718 	uint32_t flags;
4719 	uint8_t *dptr;
4720 	bus_addr_t pptr;
4721 	request_t *req;
4722 
4723 	/*
4724 	 * We enter with resid set to the data load for the command.
4725 	 */
4726 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4727 	if (length == 0 || tgt->resid == 0) {
4728 		tgt->resid = 0;
4729 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4730 		return;
4731 	}
4732 
4733 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4734 		mpt_prt(mpt, "out of resources- dropping local response\n");
4735 		return;
4736 	}
4737 	tgt->is_local = 1;
4738 
4739 
4740 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4741 	ta = req->req_vbuf;
4742 
4743 	if (mpt->is_sas) {
4744 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4745 		ta->QueueTag = ssp->InitiatorTag;
4746 	} else if (mpt->is_spi) {
4747 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4748 		ta->QueueTag = sp->Tag;
4749 	}
4750 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4751 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4752 	ta->ReplyWord = htole32(tgt->reply_desc);
4753 	if (lun > MPT_MAX_LUNS) {
4754 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4755 		ta->LUN[1] = lun & 0xff;
4756 	} else {
4757 		ta->LUN[1] = lun;
4758 	}
4759 	ta->RelativeOffset = 0;
4760 	ta->DataLength = length;
4761 
4762 	dptr = req->req_vbuf;
4763 	dptr += MPT_RQSL(mpt);
4764 	pptr = req->req_pbuf;
4765 	pptr += MPT_RQSL(mpt);
4766 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4767 
4768 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4769 	memset(se, 0,sizeof (*se));
4770 
4771 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4772 	if (send) {
4773 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4774 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4775 	}
4776 	se->Address = pptr;
4777 	MPI_pSGE_SET_LENGTH(se, length);
4778 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4779 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4780 	MPI_pSGE_SET_FLAGS(se, flags);
4781 
4782 	tgt->ccb = NULL;
4783 	tgt->req = req;
4784 	tgt->resid -= length;
4785 	tgt->bytes_xfered = length;
4786 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4787 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4788 #else
4789 	tgt->state = TGT_STATE_MOVING_DATA;
4790 #endif
4791 	mpt_send_cmd(mpt, req);
4792 }
4793 
4794 /*
4795  * Abort queued up CCBs
4796  */
4797 static cam_status
4798 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4799 {
4800 	struct mpt_hdr_stailq *lp;
4801 	struct ccb_hdr *srch;
4802 	int found = 0;
4803 	union ccb *accb = ccb->cab.abort_ccb;
4804 	tgt_resource_t *trtp;
4805 
4806 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4807 
4808 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4809 		trtp = &mpt->trt_wildcard;
4810 	} else {
4811 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4812 	}
4813 
4814 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4815 		lp = &trtp->atios;
4816 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4817 		lp = &trtp->inots;
4818 	} else {
4819 		return (CAM_REQ_INVALID);
4820 	}
4821 
4822 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4823 		if (srch == &accb->ccb_h) {
4824 			found = 1;
4825 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4826 			break;
4827 		}
4828 	}
4829 	if (found) {
4830 		accb->ccb_h.status = CAM_REQ_ABORTED;
4831 		xpt_done(accb);
4832 		return (CAM_REQ_CMP);
4833 	}
4834 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4835 	return (CAM_PATH_INVALID);
4836 }
4837 
4838 /*
4839  * Ask the MPT to abort the current target command
4840  */
4841 static int
4842 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4843 {
4844 	int error;
4845 	request_t *req;
4846 	PTR_MSG_TARGET_MODE_ABORT abtp;
4847 
4848 	req = mpt_get_request(mpt, FALSE);
4849 	if (req == NULL) {
4850 		return (-1);
4851 	}
4852 	abtp = req->req_vbuf;
4853 	memset(abtp, 0, sizeof (*abtp));
4854 
4855 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4856 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4857 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4858 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4859 	error = 0;
4860 	if (mpt->is_fc || mpt->is_sas) {
4861 		mpt_send_cmd(mpt, req);
4862 	} else {
4863 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4864 	}
4865 	return (error);
4866 }
4867 
4868 /*
4869  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4870  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4871  * FC929 to set bogus FC_RSP fields (nonzero residuals
4872  * but w/o RESID fields set). This causes QLogic initiators
4873  * to think maybe that a frame was lost.
4874  *
4875  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4876  * we use allocated requests to do TARGET_ASSIST and we
4877  * need to know when to release them.
4878  */
4879 
4880 static void
4881 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4882     uint8_t status, uint8_t const *sense_data)
4883 {
4884 	uint8_t *cmd_vbuf;
4885 	mpt_tgt_state_t *tgt;
4886 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4887 	request_t *req;
4888 	bus_addr_t paddr;
4889 	int resplen = 0;
4890 	uint32_t fl;
4891 
4892 	cmd_vbuf = cmd_req->req_vbuf;
4893 	cmd_vbuf += MPT_RQSL(mpt);
4894 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4895 
4896 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4897 		if (mpt->outofbeer == 0) {
4898 			mpt->outofbeer = 1;
4899 			xpt_freeze_simq(mpt->sim, 1);
4900 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4901 		}
4902 		if (ccb) {
4903 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4904 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4905 			MPTLOCK_2_CAMLOCK(mpt);
4906 			xpt_done(ccb);
4907 			CAMLOCK_2_MPTLOCK(mpt);
4908 		} else {
4909 			mpt_prt(mpt,
4910 			    "could not allocate status request- dropping\n");
4911 		}
4912 		return;
4913 	}
4914 	req->ccb = ccb;
4915 	if (ccb) {
4916 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4917 		ccb->ccb_h.ccb_req_ptr = req;
4918 	}
4919 
4920 	/*
4921 	 * Record the currently active ccb, if any, and the
4922 	 * request for it in our target state area.
4923 	 */
4924 	tgt->ccb = ccb;
4925 	tgt->req = req;
4926 	tgt->state = TGT_STATE_SENDING_STATUS;
4927 
4928 	tp = req->req_vbuf;
4929 	paddr = req->req_pbuf;
4930 	paddr += MPT_RQSL(mpt);
4931 
4932 	memset(tp, 0, sizeof (*tp));
4933 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4934 	if (mpt->is_fc) {
4935 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4936 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4937 		uint8_t *sts_vbuf;
4938 		uint32_t *rsp;
4939 
4940 		sts_vbuf = req->req_vbuf;
4941 		sts_vbuf += MPT_RQSL(mpt);
4942 		rsp = (uint32_t *) sts_vbuf;
4943 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4944 
4945 		/*
4946 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4947 		 * It has to be big-endian in memory and is organized
4948 		 * in 32 bit words, which are much easier to deal with
4949 		 * as words which are swizzled as needed.
4950 		 *
4951 		 * All we're filling here is the FC_RSP payload.
4952 		 * We may just have the chip synthesize it if
4953 		 * we have no residual and an OK status.
4954 		 *
4955 		 */
4956 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4957 
4958 		rsp[2] = status;
4959 		if (tgt->resid) {
4960 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4961 			rsp[3] = htobe32(tgt->resid);
4962 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4963 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4964 #endif
4965 		}
4966 		if (status == SCSI_STATUS_CHECK_COND) {
4967 			int i;
4968 
4969 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4970 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4971 			if (sense_data) {
4972 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4973 			} else {
4974 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4975 				    "TION but no sense data?\n");
4976 				memset(&rsp, 0, MPT_SENSE_SIZE);
4977 			}
4978 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4979 				rsp[i] = htobe32(rsp[i]);
4980 			}
4981 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4982 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4983 #endif
4984 		}
4985 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4986 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4987 #endif
4988 		rsp[2] = htobe32(rsp[2]);
4989 	} else if (mpt->is_sas) {
4990 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4991 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4992 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4993 	} else {
4994 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4995 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4996 		tp->StatusCode = status;
4997 		tp->QueueTag = htole16(sp->Tag);
4998 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4999 	}
5000 
5001 	tp->ReplyWord = htole32(tgt->reply_desc);
5002 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
5003 
5004 #ifdef	WE_CAN_USE_AUTO_REPOST
5005 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
5006 #endif
5007 	if (status == SCSI_STATUS_OK && resplen == 0) {
5008 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
5009 	} else {
5010 		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
5011 		fl =
5012 			MPI_SGE_FLAGS_HOST_TO_IOC	|
5013 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
5014 			MPI_SGE_FLAGS_LAST_ELEMENT	|
5015 			MPI_SGE_FLAGS_END_OF_LIST	|
5016 			MPI_SGE_FLAGS_END_OF_BUFFER;
5017 		fl <<= MPI_SGE_FLAGS_SHIFT;
5018 		fl |= resplen;
5019 		tp->StatusDataSGE.FlagsLength = htole32(fl);
5020 	}
5021 
5022 	mpt_lprt(mpt, MPT_PRT_DEBUG,
5023 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
5024 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
5025 	    req->serno, tgt->resid);
5026 	if (ccb) {
5027 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
5028 		mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
5029 	}
5030 	mpt_send_cmd(mpt, req);
5031 }
5032 
5033 static void
5034 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
5035     tgt_resource_t *trtp, int init_id)
5036 {
5037 	struct ccb_immed_notify *inot;
5038 	mpt_tgt_state_t *tgt;
5039 
5040 	tgt = MPT_TGT_STATE(mpt, req);
5041 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
5042 	if (inot == NULL) {
5043 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
5044 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
5045 		return;
5046 	}
5047 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
5048 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5049 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
5050 
5051 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
5052 	inot->sense_len = 0;
5053 	memset(inot->message_args, 0, sizeof (inot->message_args));
5054 	inot->initiator_id = init_id;	/* XXX */
5055 
5056 	/*
5057 	 * This is a somewhat grotesque attempt to map from task management
5058 	 * to old style SCSI messages. God help us all.
5059 	 */
5060 	switch (fc) {
5061 	case MPT_ABORT_TASK_SET:
5062 		inot->message_args[0] = MSG_ABORT_TAG;
5063 		break;
5064 	case MPT_CLEAR_TASK_SET:
5065 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
5066 		break;
5067 	case MPT_TARGET_RESET:
5068 		inot->message_args[0] = MSG_TARGET_RESET;
5069 		break;
5070 	case MPT_CLEAR_ACA:
5071 		inot->message_args[0] = MSG_CLEAR_ACA;
5072 		break;
5073 	case MPT_TERMINATE_TASK:
5074 		inot->message_args[0] = MSG_ABORT_TAG;
5075 		break;
5076 	default:
5077 		inot->message_args[0] = MSG_NOOP;
5078 		break;
5079 	}
5080 	tgt->ccb = (union ccb *) inot;
5081 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
5082 	MPTLOCK_2_CAMLOCK(mpt);
5083 	xpt_done((union ccb *)inot);
5084 	CAMLOCK_2_MPTLOCK(mpt);
5085 }
5086 
5087 static void
5088 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
5089 {
5090 	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
5091 	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
5092 	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
5093 	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
5094 	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
5095 	     '0',  '0',  '0',  '1'
5096 	};
5097 	struct ccb_accept_tio *atiop;
5098 	lun_id_t lun;
5099 	int tag_action = 0;
5100 	mpt_tgt_state_t *tgt;
5101 	tgt_resource_t *trtp = NULL;
5102 	U8 *lunptr;
5103 	U8 *vbuf;
5104 	U16 itag;
5105 	U16 ioindex;
5106 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
5107 	uint8_t *cdbp;
5108 
5109 	/*
5110 	 * Stash info for the current command where we can get at it later.
5111 	 */
5112 	vbuf = req->req_vbuf;
5113 	vbuf += MPT_RQSL(mpt);
5114 
5115 	/*
5116 	 * Get our state pointer set up.
5117 	 */
5118 	tgt = MPT_TGT_STATE(mpt, req);
5119 	if (tgt->state != TGT_STATE_LOADED) {
5120 		mpt_tgt_dump_req_state(mpt, req);
5121 		panic("bad target state in mpt_scsi_tgt_atio");
5122 	}
5123 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
5124 	tgt->state = TGT_STATE_IN_CAM;
5125 	tgt->reply_desc = reply_desc;
5126 	ioindex = GET_IO_INDEX(reply_desc);
5127 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5128 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
5129 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
5130 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
5131 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
5132 	}
5133 	if (mpt->is_fc) {
5134 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
5135 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
5136 		if (fc->FcpCntl[2]) {
5137 			/*
5138 			 * Task Management Request
5139 			 */
5140 			switch (fc->FcpCntl[2]) {
5141 			case 0x2:
5142 				fct = MPT_ABORT_TASK_SET;
5143 				break;
5144 			case 0x4:
5145 				fct = MPT_CLEAR_TASK_SET;
5146 				break;
5147 			case 0x20:
5148 				fct = MPT_TARGET_RESET;
5149 				break;
5150 			case 0x40:
5151 				fct = MPT_CLEAR_ACA;
5152 				break;
5153 			case 0x80:
5154 				fct = MPT_TERMINATE_TASK;
5155 				break;
5156 			default:
5157 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
5158 				    fc->FcpCntl[2]);
5159 				mpt_scsi_tgt_status(mpt, 0, req,
5160 				    SCSI_STATUS_OK, 0);
5161 				return;
5162 			}
5163 		} else {
5164 			switch (fc->FcpCntl[1]) {
5165 			case 0:
5166 				tag_action = MSG_SIMPLE_Q_TAG;
5167 				break;
5168 			case 1:
5169 				tag_action = MSG_HEAD_OF_Q_TAG;
5170 				break;
5171 			case 2:
5172 				tag_action = MSG_ORDERED_Q_TAG;
5173 				break;
5174 			default:
5175 				/*
5176 				 * Bah. Ignore Untagged Queing and ACA
5177 				 */
5178 				tag_action = MSG_SIMPLE_Q_TAG;
5179 				break;
5180 			}
5181 		}
5182 		tgt->resid = be32toh(fc->FcpDl);
5183 		cdbp = fc->FcpCdb;
5184 		lunptr = fc->FcpLun;
5185 		itag = be16toh(fc->OptionalOxid);
5186 	} else if (mpt->is_sas) {
5187 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5188 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5189 		cdbp = ssp->CDB;
5190 		lunptr = ssp->LogicalUnitNumber;
5191 		itag = ssp->InitiatorTag;
5192 	} else {
5193 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5194 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5195 		cdbp = sp->CDB;
5196 		lunptr = sp->LogicalUnitNumber;
5197 		itag = sp->Tag;
5198 	}
5199 
5200 	/*
5201 	 * Generate a simple lun
5202 	 */
5203 	switch (lunptr[0] & 0xc0) {
5204 	case 0x40:
5205 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5206 		break;
5207 	case 0:
5208 		lun = lunptr[1];
5209 		break;
5210 	default:
5211 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5212 		lun = 0xffff;
5213 		break;
5214 	}
5215 
5216 	/*
5217 	 * Deal with non-enabled or bad luns here.
5218 	 */
5219 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5220 	    mpt->trt[lun].enabled == 0) {
5221 		if (mpt->twildcard) {
5222 			trtp = &mpt->trt_wildcard;
5223 		} else if (fct == MPT_NIL_TMT_VALUE) {
5224 			/*
5225 			 * In this case, we haven't got an upstream listener
5226 			 * for either a specific lun or wildcard luns. We
5227 			 * have to make some sensible response. For regular
5228 			 * inquiry, just return some NOT HERE inquiry data.
5229 			 * For VPD inquiry, report illegal field in cdb.
5230 			 * For REQUEST SENSE, just return NO SENSE data.
5231 			 * REPORT LUNS gets illegal command.
5232 			 * All other commands get 'no such device'.
5233 			 */
5234 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5235 			size_t len;
5236 
5237 			memset(buf, 0, MPT_SENSE_SIZE);
5238 			cond = SCSI_STATUS_CHECK_COND;
5239 			buf[0] = 0xf0;
5240 			buf[2] = 0x5;
5241 			buf[7] = 0x8;
5242 			sp = buf;
5243 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5244 
5245 			switch (cdbp[0]) {
5246 			case INQUIRY:
5247 			{
5248 				if (cdbp[1] != 0) {
5249 					buf[12] = 0x26;
5250 					buf[13] = 0x01;
5251 					break;
5252 				}
5253 				len = min(tgt->resid, cdbp[4]);
5254 				len = min(len, sizeof (null_iqd));
5255 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5256 				    "local inquiry %ld bytes\n", (long) len);
5257 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5258 				    null_iqd, len);
5259 				return;
5260 			}
5261 			case REQUEST_SENSE:
5262 			{
5263 				buf[2] = 0x0;
5264 				len = min(tgt->resid, cdbp[4]);
5265 				len = min(len, sizeof (buf));
5266 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5267 				    "local reqsense %ld bytes\n", (long) len);
5268 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5269 				    buf, len);
5270 				return;
5271 			}
5272 			case REPORT_LUNS:
5273 				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5274 				buf[12] = 0x26;
5275 				return;
5276 			default:
5277 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5278 				    "CMD 0x%x to unmanaged lun %u\n",
5279 				    cdbp[0], lun);
5280 				buf[12] = 0x25;
5281 				break;
5282 			}
5283 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5284 			return;
5285 		}
5286 		/* otherwise, leave trtp NULL */
5287 	} else {
5288 		trtp = &mpt->trt[lun];
5289 	}
5290 
5291 	/*
5292 	 * Deal with any task management
5293 	 */
5294 	if (fct != MPT_NIL_TMT_VALUE) {
5295 		if (trtp == NULL) {
5296 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
5297 			    fct);
5298 			mpt_scsi_tgt_status(mpt, 0, req,
5299 			    SCSI_STATUS_OK, 0);
5300 		} else {
5301 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5302 			    GET_INITIATOR_INDEX(reply_desc));
5303 		}
5304 		return;
5305 	}
5306 
5307 
5308 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5309 	if (atiop == NULL) {
5310 		mpt_lprt(mpt, MPT_PRT_WARN,
5311 		    "no ATIOs for lun %u- sending back %s\n", lun,
5312 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
5313 		mpt_scsi_tgt_status(mpt, NULL, req,
5314 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5315 		    NULL);
5316 		return;
5317 	}
5318 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5319 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5320 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5321 	atiop->ccb_h.ccb_mpt_ptr = mpt;
5322 	atiop->ccb_h.status = CAM_CDB_RECVD;
5323 	atiop->ccb_h.target_lun = lun;
5324 	atiop->sense_len = 0;
5325 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5326 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5327 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5328 
5329 	/*
5330 	 * The tag we construct here allows us to find the
5331 	 * original request that the command came in with.
5332 	 *
5333 	 * This way we don't have to depend on anything but the
5334 	 * tag to find things when CCBs show back up from CAM.
5335 	 */
5336 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5337 	tgt->tag_id = atiop->tag_id;
5338 	if (tag_action) {
5339 		atiop->tag_action = tag_action;
5340 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5341 	}
5342 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5343 		int i;
5344 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5345 		    atiop->ccb_h.target_lun);
5346 		for (i = 0; i < atiop->cdb_len; i++) {
5347 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5348 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
5349 		}
5350 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5351 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5352 	}
5353 
5354 	MPTLOCK_2_CAMLOCK(mpt);
5355 	xpt_done((union ccb *)atiop);
5356 	CAMLOCK_2_MPTLOCK(mpt);
5357 }
5358 
5359 static void
5360 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5361 {
5362 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5363 
5364 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5365 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5366 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5367 	    tgt->tag_id, tgt->state);
5368 }
5369 
5370 static void
5371 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5372 {
5373 
5374 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5375 	    req->index, req->index, req->state);
5376 	mpt_tgt_dump_tgt_state(mpt, req);
5377 }
5378 
5379 static int
5380 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5381     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5382 {
5383 	int dbg;
5384 	union ccb *ccb;
5385 	U16 status;
5386 
5387 	if (reply_frame == NULL) {
5388 		/*
5389 		 * Figure out what the state of the command is.
5390 		 */
5391 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5392 
5393 #ifdef	INVARIANTS
5394 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5395 		if (tgt->req) {
5396 			mpt_req_not_spcl(mpt, tgt->req,
5397 			    "turbo scsi_tgt_reply associated req", __LINE__);
5398 		}
5399 #endif
5400 		switch(tgt->state) {
5401 		case TGT_STATE_LOADED:
5402 			/*
5403 			 * This is a new command starting.
5404 			 */
5405 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5406 			break;
5407 		case TGT_STATE_MOVING_DATA:
5408 		{
5409 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5410 
5411 			ccb = tgt->ccb;
5412 			if (tgt->req == NULL) {
5413 				panic("mpt: turbo target reply with null "
5414 				    "associated request moving data");
5415 				/* NOTREACHED */
5416 			}
5417 			if (ccb == NULL) {
5418 				if (tgt->is_local == 0) {
5419 					panic("mpt: turbo target reply with "
5420 					    "null associated ccb moving data");
5421 					/* NOTREACHED */
5422 				}
5423 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5424 				    "TARGET_ASSIST local done\n");
5425 				TAILQ_REMOVE(&mpt->request_pending_list,
5426 				    tgt->req, links);
5427 				mpt_free_request(mpt, tgt->req);
5428 				tgt->req = NULL;
5429 				mpt_scsi_tgt_status(mpt, NULL, req,
5430 				    0, NULL);
5431 				return (TRUE);
5432 			}
5433 			tgt->ccb = NULL;
5434 			tgt->nxfers++;
5435 			mpt_req_untimeout(req, mpt_timeout, ccb);
5436 			mpt_lprt(mpt, MPT_PRT_DEBUG,
5437 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5438 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5439 			/*
5440 			 * Free the Target Assist Request
5441 			 */
5442 			KASSERT(tgt->req->ccb == ccb,
5443 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5444 			    tgt->req->serno, tgt->req->ccb));
5445 			TAILQ_REMOVE(&mpt->request_pending_list,
5446 			    tgt->req, links);
5447 			mpt_free_request(mpt, tgt->req);
5448 			tgt->req = NULL;
5449 
5450 			/*
5451 			 * Do we need to send status now? That is, are
5452 			 * we done with all our data transfers?
5453 			 */
5454 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5455 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5456 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5457 				KASSERT(ccb->ccb_h.status,
5458 				    ("zero ccb sts at %d\n", __LINE__));
5459 				tgt->state = TGT_STATE_IN_CAM;
5460 				if (mpt->outofbeer) {
5461 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5462 					mpt->outofbeer = 0;
5463 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5464 				}
5465 				MPTLOCK_2_CAMLOCK(mpt);
5466 				xpt_done(ccb);
5467 				CAMLOCK_2_MPTLOCK(mpt);
5468 				break;
5469 			}
5470 			/*
5471 			 * Otherwise, send status (and sense)
5472 			 */
5473 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5474 				sp = sense;
5475 				memcpy(sp, &ccb->csio.sense_data,
5476 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5477 			}
5478 			mpt_scsi_tgt_status(mpt, ccb, req,
5479 			    ccb->csio.scsi_status, sp);
5480 			break;
5481 		}
5482 		case TGT_STATE_SENDING_STATUS:
5483 		case TGT_STATE_MOVING_DATA_AND_STATUS:
5484 		{
5485 			int ioindex;
5486 			ccb = tgt->ccb;
5487 
5488 			if (tgt->req == NULL) {
5489 				panic("mpt: turbo target reply with null "
5490 				    "associated request sending status");
5491 				/* NOTREACHED */
5492 			}
5493 
5494 			if (ccb) {
5495 				tgt->ccb = NULL;
5496 				if (tgt->state ==
5497 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5498 					tgt->nxfers++;
5499 				}
5500 				mpt_req_untimeout(req, mpt_timeout, ccb);
5501 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5502 					ccb->ccb_h.status |= CAM_SENT_SENSE;
5503 				}
5504 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5505 				    "TARGET_STATUS tag %x sts %x flgs %x req "
5506 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5507 				    ccb->ccb_h.flags, tgt->req);
5508 				/*
5509 				 * Free the Target Send Status Request
5510 				 */
5511 				KASSERT(tgt->req->ccb == ccb,
5512 				    ("tgt->req %p:%u tgt->req->ccb %p",
5513 				    tgt->req, tgt->req->serno, tgt->req->ccb));
5514 				/*
5515 				 * Notify CAM that we're done
5516 				 */
5517 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5518 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5519 				KASSERT(ccb->ccb_h.status,
5520 				    ("ZERO ccb sts at %d\n", __LINE__));
5521 				tgt->ccb = NULL;
5522 			} else {
5523 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5524 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
5525 				    tgt->req, tgt->req->serno);
5526 			}
5527 			TAILQ_REMOVE(&mpt->request_pending_list,
5528 			    tgt->req, links);
5529 			mpt_free_request(mpt, tgt->req);
5530 			tgt->req = NULL;
5531 
5532 			/*
5533 			 * And re-post the Command Buffer.
5534 			 * This will reset the state.
5535 			 */
5536 			ioindex = GET_IO_INDEX(reply_desc);
5537 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5538 			tgt->is_local = 0;
5539 			mpt_post_target_command(mpt, req, ioindex);
5540 
5541 			/*
5542 			 * And post a done for anyone who cares
5543 			 */
5544 			if (ccb) {
5545 				if (mpt->outofbeer) {
5546 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5547 					mpt->outofbeer = 0;
5548 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5549 				}
5550 				MPTLOCK_2_CAMLOCK(mpt);
5551 				xpt_done(ccb);
5552 				CAMLOCK_2_MPTLOCK(mpt);
5553 			}
5554 			break;
5555 		}
5556 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5557 			tgt->state = TGT_STATE_LOADED;
5558 			break;
5559 		default:
5560 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5561 			    "Reply Function\n", tgt->state);
5562 		}
5563 		return (TRUE);
5564 	}
5565 
5566 	status = le16toh(reply_frame->IOCStatus);
5567 	if (status != MPI_IOCSTATUS_SUCCESS) {
5568 		dbg = MPT_PRT_ERROR;
5569 	} else {
5570 		dbg = MPT_PRT_DEBUG1;
5571 	}
5572 
5573 	mpt_lprt(mpt, dbg,
5574 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5575 	     req, req->serno, reply_frame, reply_frame->Function, status);
5576 
5577 	switch (reply_frame->Function) {
5578 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5579 	{
5580 		mpt_tgt_state_t *tgt;
5581 #ifdef	INVARIANTS
5582 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5583 #endif
5584 		if (status != MPI_IOCSTATUS_SUCCESS) {
5585 			/*
5586 			 * XXX What to do?
5587 			 */
5588 			break;
5589 		}
5590 		tgt = MPT_TGT_STATE(mpt, req);
5591 		KASSERT(tgt->state == TGT_STATE_LOADING,
5592 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
5593 		mpt_assign_serno(mpt, req);
5594 		tgt->state = TGT_STATE_LOADED;
5595 		break;
5596 	}
5597 	case MPI_FUNCTION_TARGET_ASSIST:
5598 #ifdef	INVARIANTS
5599 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5600 #endif
5601 		mpt_prt(mpt, "target assist completion\n");
5602 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5603 		mpt_free_request(mpt, req);
5604 		break;
5605 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5606 #ifdef	INVARIANTS
5607 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5608 #endif
5609 		mpt_prt(mpt, "status send completion\n");
5610 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5611 		mpt_free_request(mpt, req);
5612 		break;
5613 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5614 	{
5615 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5616 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5617 		PTR_MSG_TARGET_MODE_ABORT abtp =
5618 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5619 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5620 #ifdef	INVARIANTS
5621 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5622 #endif
5623 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5624 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5625 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5626 		mpt_free_request(mpt, req);
5627 		break;
5628 	}
5629 	default:
5630 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5631 		    "0x%x\n", reply_frame->Function);
5632 		break;
5633 	}
5634 	return (TRUE);
5635 }
5636