xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 8847579c57d6aff2b3371c707dce7a2cee8389aa)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 
108 #include <sys/callout.h>
109 #include <sys/kthread.h>
110 
111 static void mpt_poll(struct cam_sim *);
112 static timeout_t mpt_timeout;
113 static void mpt_action(struct cam_sim *, union ccb *);
114 static int
115 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
116 static void mpt_setwidth(struct mpt_softc *, int, int);
117 static void mpt_setsync(struct mpt_softc *, int, int, int);
118 static int mpt_update_spi_config(struct mpt_softc *, int);
119 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
120 
121 static mpt_reply_handler_t mpt_scsi_reply_handler;
122 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
123 static mpt_reply_handler_t mpt_fc_els_reply_handler;
124 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
125 					MSG_DEFAULT_REPLY *);
126 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
127 static int mpt_fc_reset_link(struct mpt_softc *, int);
128 
129 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
130 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
131 static void mpt_recovery_thread(void *arg);
132 static void mpt_recover_commands(struct mpt_softc *mpt);
133 
134 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
135     u_int, u_int, u_int, int);
136 
137 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
138 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
139 static int mpt_add_els_buffers(struct mpt_softc *mpt);
140 static int mpt_add_target_commands(struct mpt_softc *mpt);
141 static void mpt_free_els_buffers(struct mpt_softc *mpt);
142 static void mpt_free_target_commands(struct mpt_softc *mpt);
143 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
145 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
146 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
147 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
148 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
149     uint8_t, uint8_t const *);
150 static void
151 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
152     tgt_resource_t *, int);
153 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
154 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
155 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
156 
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 
161 static mpt_probe_handler_t	mpt_cam_probe;
162 static mpt_attach_handler_t	mpt_cam_attach;
163 static mpt_enable_handler_t	mpt_cam_enable;
164 static mpt_event_handler_t	mpt_cam_event;
165 static mpt_reset_handler_t	mpt_cam_ioc_reset;
166 static mpt_detach_handler_t	mpt_cam_detach;
167 
168 static struct mpt_personality mpt_cam_personality =
169 {
170 	.name		= "mpt_cam",
171 	.probe		= mpt_cam_probe,
172 	.attach		= mpt_cam_attach,
173 	.enable		= mpt_cam_enable,
174 	.event		= mpt_cam_event,
175 	.reset		= mpt_cam_ioc_reset,
176 	.detach		= mpt_cam_detach,
177 };
178 
179 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
180 
181 int
182 mpt_cam_probe(struct mpt_softc *mpt)
183 {
184 	/*
185 	 * Only attach to nodes that support the initiator or target
186 	 * role or have RAID physical devices that need CAM pass-thru support.
187 	 */
188 	if ((mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_INITIATOR) != 0
189 	 || (mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_TARGET) != 0
190 	 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
191 		return (0);
192 	}
193 	return (ENODEV);
194 }
195 
196 int
197 mpt_cam_attach(struct mpt_softc *mpt)
198 {
199 	struct cam_devq *devq;
200 	mpt_handler_t	 handler;
201 	int		 maxq;
202 	int		 error;
203 
204 	TAILQ_INIT(&mpt->request_timeout_list);
205 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
206 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
207 
208 	handler.reply_handler = mpt_scsi_reply_handler;
209 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
210 				     &scsi_io_handler_id);
211 	if (error != 0) {
212 		goto cleanup0;
213 	}
214 
215 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
216 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
217 				     &scsi_tmf_handler_id);
218 	if (error != 0) {
219 		goto cleanup0;
220 	}
221 
222 	/*
223 	 * If we're fibre channel and could support target mode, we register
224 	 * an ELS reply handler and give it resources.
225 	 */
226 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
227 		handler.reply_handler = mpt_fc_els_reply_handler;
228 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
229 		    &fc_els_handler_id);
230 		if (error != 0) {
231 			goto cleanup0;
232 		}
233 		if (mpt_add_els_buffers(mpt) == FALSE) {
234 			error = ENOMEM;
235 			goto cleanup0;
236 		}
237 		maxq -= mpt->els_cmds_allocated;
238 	}
239 
240 	/*
241 	 * If we support target mode, we register a reply handler for it,
242 	 * but don't add resources until we actually enable target mode.
243 	 */
244 	if ((mpt->role & MPT_ROLE_TARGET) != 0) {
245 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
246 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
247 		    &mpt->scsi_tgt_handler_id);
248 		if (error != 0) {
249 			goto cleanup0;
250 		}
251 	}
252 
253 	/*
254 	 * We keep one request reserved for timeout TMF requests.
255 	 */
256 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
257 	if (mpt->tmf_req == NULL) {
258 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
259 		error = ENOMEM;
260 		goto cleanup0;
261 	}
262 
263 	/*
264 	 * Mark the request as free even though not on the free list.
265 	 * There is only one TMF request allowed to be outstanding at
266 	 * a time and the TMF routines perform their own allocation
267 	 * tracking using the standard state flags.
268 	 */
269 	mpt->tmf_req->state = REQ_STATE_FREE;
270 	maxq--;
271 
272 	if (mpt_spawn_recovery_thread(mpt) != 0) {
273 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
274 		error = ENOMEM;
275 		goto cleanup0;
276 	}
277 
278 	/*
279 	 * The rest of this is CAM foo, for which we need to drop our lock
280 	 */
281 	MPTLOCK_2_CAMLOCK(mpt);
282 
283 	/*
284 	 * Create the device queue for our SIM(s).
285 	 */
286 	devq = cam_simq_alloc(maxq);
287 	if (devq == NULL) {
288 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
289 		error = ENOMEM;
290 		goto cleanup;
291 	}
292 
293 	/*
294 	 * Construct our SIM entry.
295 	 */
296 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
297 	    mpt->unit, 1, maxq, devq);
298 	if (mpt->sim == NULL) {
299 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
300 		cam_simq_free(devq);
301 		error = ENOMEM;
302 		goto cleanup;
303 	}
304 
305 	/*
306 	 * Register exactly this bus.
307 	 */
308 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
309 		mpt_prt(mpt, "Bus registration Failed!\n");
310 		error = ENOMEM;
311 		goto cleanup;
312 	}
313 
314 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
315 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
316 		mpt_prt(mpt, "Unable to allocate Path!\n");
317 		error = ENOMEM;
318 		goto cleanup;
319 	}
320 
321 	/*
322 	 * Only register a second bus for RAID physical
323 	 * devices if the controller supports RAID.
324 	 */
325 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
326 		CAMLOCK_2_MPTLOCK(mpt);
327 		return (0);
328 	}
329 
330 	/*
331 	 * Create a "bus" to export all hidden disks to CAM.
332 	 */
333 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
334 	    mpt->unit, 1, maxq, devq);
335 	if (mpt->phydisk_sim == NULL) {
336 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
337 		error = ENOMEM;
338 		goto cleanup;
339 	}
340 
341 	/*
342 	 * Register this bus.
343 	 */
344 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
345 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
346 		error = ENOMEM;
347 		goto cleanup;
348 	}
349 
350 	if (xpt_create_path(&mpt->phydisk_path, NULL,
351 	    cam_sim_path(mpt->phydisk_sim),
352 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
353 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
354 		error = ENOMEM;
355 		goto cleanup;
356 	}
357 	CAMLOCK_2_MPTLOCK(mpt);
358 	return (0);
359 
360 cleanup:
361 	CAMLOCK_2_MPTLOCK(mpt);
362 cleanup0:
363 	mpt_cam_detach(mpt);
364 	return (error);
365 }
366 
367 /*
368  * Read FC configuration information
369  */
370 static int
371 mpt_read_config_info_fc(struct mpt_softc *mpt)
372 {
373 	char *topology = NULL;
374 	int rv;
375 
376 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
377 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
378 	if (rv) {
379 		return (-1);
380 	}
381 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
382 		 mpt->mpt_fcport_page0.Header.PageVersion,
383 		 mpt->mpt_fcport_page0.Header.PageLength,
384 		 mpt->mpt_fcport_page0.Header.PageNumber,
385 		 mpt->mpt_fcport_page0.Header.PageType);
386 
387 
388 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
389 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
390 	if (rv) {
391 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
392 		return (-1);
393 	}
394 
395 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
396 
397 	switch (mpt->mpt_fcport_page0.Flags &
398 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
399 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
400 		mpt->mpt_fcport_speed = 0;
401 		topology = "<NO LOOP>";
402 		break;
403 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
404 		topology = "N-Port";
405 		break;
406 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
407 		topology = "NL-Port";
408 		break;
409 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
410 		topology = "F-Port";
411 		break;
412 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
413 		topology = "FL-Port";
414 		break;
415 	default:
416 		mpt->mpt_fcport_speed = 0;
417 		topology = "?";
418 		break;
419 	}
420 
421 	mpt_lprt(mpt, MPT_PRT_INFO,
422 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
423 	    "Speed %u-Gbit\n", topology,
424 	    mpt->mpt_fcport_page0.WWNN.High,
425 	    mpt->mpt_fcport_page0.WWNN.Low,
426 	    mpt->mpt_fcport_page0.WWPN.High,
427 	    mpt->mpt_fcport_page0.WWPN.Low,
428 	    mpt->mpt_fcport_speed);
429 
430 	return (0);
431 }
432 
433 /*
434  * Set FC configuration information.
435  */
436 static int
437 mpt_set_initial_config_fc(struct mpt_softc *mpt)
438 {
439 #if	0
440 	CONFIG_PAGE_FC_PORT_1 fc;
441 	U32 fl;
442 	int r, doit = 0;
443 
444 	if ((mpt->role & MPT_ROLE_TARGET) == 0) {
445 		return (0);
446 	}
447 
448 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
449 	    &fc.Header, FALSE, 5000);
450 	if (r) {
451 		return (mpt_fc_reset_link(mpt, 1));
452 	}
453 
454 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 0,
455 	    &fc.Header, sizeof (fc), FALSE, 5000);
456 	if (r) {
457 		return (mpt_fc_reset_link(mpt, 1));
458 	}
459 
460 	fl = le32toh(fc.Flags);
461 	if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
462 		fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
463 		doit = 1;
464 	}
465 	if (doit) {
466 		const char *cc;
467 
468 		mpt_lprt(mpt, MPT_PRT_INFO,
469 		    "FC Port Page 1: New Flags %x \n", fl);
470 		fc.Flags = htole32(fl);
471 		r = mpt_write_cfg_page(mpt,
472 		    MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, 0, &fc.Header,
473 		    sizeof(fc), FALSE, 5000);
474 		if (r != 0) {
475 			cc = "FC PORT PAGE1 UPDATE: FAILED\n";
476 		} else {
477 			cc = "FC PORT PAGE1 UPDATED: SYSTEM NEEDS RESET\n";
478 		}
479 		mpt_prt(mpt, cc);
480 	}
481 #endif
482 	return (0);
483 }
484 
485 /*
486  * Read SAS configuration information. Nothing to do yet.
487  */
488 static int
489 mpt_read_config_info_sas(struct mpt_softc *mpt)
490 {
491 	return (0);
492 }
493 
494 /*
495  * Set SAS configuration information. Nothing to do yet.
496  */
497 static int
498 mpt_set_initial_config_sas(struct mpt_softc *mpt)
499 {
500 	return (0);
501 }
502 
503 /*
504  * Read SCSI configuration information
505  */
506 static int
507 mpt_read_config_info_spi(struct mpt_softc *mpt)
508 {
509 	int rv, i;
510 
511 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
512 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
513 	if (rv) {
514 		return (-1);
515 	}
516 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
517 	    mpt->mpt_port_page0.Header.PageVersion,
518 	    mpt->mpt_port_page0.Header.PageLength,
519 	    mpt->mpt_port_page0.Header.PageNumber,
520 	    mpt->mpt_port_page0.Header.PageType);
521 
522 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
523 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
524 	if (rv) {
525 		return (-1);
526 	}
527 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
528 	    mpt->mpt_port_page1.Header.PageVersion,
529 	    mpt->mpt_port_page1.Header.PageLength,
530 	    mpt->mpt_port_page1.Header.PageNumber,
531 	    mpt->mpt_port_page1.Header.PageType);
532 
533 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
534 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
535 	if (rv) {
536 		return (-1);
537 	}
538 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
539 	    mpt->mpt_port_page2.Header.PageVersion,
540 	    mpt->mpt_port_page2.Header.PageLength,
541 	    mpt->mpt_port_page2.Header.PageNumber,
542 	    mpt->mpt_port_page2.Header.PageType);
543 
544 	for (i = 0; i < 16; i++) {
545 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
546 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
547 		if (rv) {
548 			return (-1);
549 		}
550 		mpt_lprt(mpt, MPT_PRT_DEBUG,
551 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
552 		    mpt->mpt_dev_page0[i].Header.PageVersion,
553 		    mpt->mpt_dev_page0[i].Header.PageLength,
554 		    mpt->mpt_dev_page0[i].Header.PageNumber,
555 		    mpt->mpt_dev_page0[i].Header.PageType);
556 
557 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
558 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
559 		if (rv) {
560 			return (-1);
561 		}
562 		mpt_lprt(mpt, MPT_PRT_DEBUG,
563 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
564 		    mpt->mpt_dev_page1[i].Header.PageVersion,
565 		    mpt->mpt_dev_page1[i].Header.PageLength,
566 		    mpt->mpt_dev_page1[i].Header.PageNumber,
567 		    mpt->mpt_dev_page1[i].Header.PageType);
568 	}
569 
570 	/*
571 	 * At this point, we don't *have* to fail. As long as we have
572 	 * valid config header information, we can (barely) lurch
573 	 * along.
574 	 */
575 
576 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
577 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
578 	if (rv) {
579 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
580 	} else {
581 		mpt_lprt(mpt, MPT_PRT_DEBUG,
582 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
583 		    mpt->mpt_port_page0.Capabilities,
584 		    mpt->mpt_port_page0.PhysicalInterface);
585 	}
586 
587 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
588 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
589 	if (rv) {
590 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
591 	} else {
592 		mpt_lprt(mpt, MPT_PRT_DEBUG,
593 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
594 		    mpt->mpt_port_page1.Configuration,
595 		    mpt->mpt_port_page1.OnBusTimerValue);
596 	}
597 
598 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
599 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
600 	if (rv) {
601 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
602 	} else {
603 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
604 		    "Port Page 2: Flags %x Settings %x\n",
605 		    mpt->mpt_port_page2.PortFlags,
606 		    mpt->mpt_port_page2.PortSettings);
607 		for (i = 0; i < 16; i++) {
608 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
609 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
610 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
611 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
612 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
613 		}
614 	}
615 
616 	for (i = 0; i < 16; i++) {
617 		rv = mpt_read_cur_cfg_page(mpt, i,
618 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
619 		    FALSE, 5000);
620 		if (rv) {
621 			mpt_prt(mpt,
622 			    "cannot read SPI Target %d Device Page 0\n", i);
623 			continue;
624 		}
625 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
626 		    "target %d page 0: Negotiated Params %x Information %x\n",
627 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
628 		    mpt->mpt_dev_page0[i].Information);
629 
630 		rv = mpt_read_cur_cfg_page(mpt, i,
631 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
632 		    FALSE, 5000);
633 		if (rv) {
634 			mpt_prt(mpt,
635 			    "cannot read SPI Target %d Device Page 1\n", i);
636 			continue;
637 		}
638 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
639 		    "target %d page 1: Requested Params %x Configuration %x\n",
640 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
641 		    mpt->mpt_dev_page1[i].Configuration);
642 	}
643 	return (0);
644 }
645 
646 /*
647  * Validate SPI configuration information.
648  *
649  * In particular, validate SPI Port Page 1.
650  */
651 static int
652 mpt_set_initial_config_spi(struct mpt_softc *mpt)
653 {
654 	int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
655 	int error;
656 
657 	mpt->mpt_disc_enable = 0xff;
658 	mpt->mpt_tag_enable = 0;
659 
660 	if (mpt->mpt_port_page1.Configuration != pp1val) {
661 		CONFIG_PAGE_SCSI_PORT_1 tmp;
662 
663 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
664 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
665 		tmp = mpt->mpt_port_page1;
666 		tmp.Configuration = pp1val;
667 		error = mpt_write_cur_cfg_page(mpt, 0,
668 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
669 		if (error) {
670 			return (-1);
671 		}
672 		error = mpt_read_cur_cfg_page(mpt, 0,
673 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
674 		if (error) {
675 			return (-1);
676 		}
677 		if (tmp.Configuration != pp1val) {
678 			mpt_prt(mpt,
679 			    "failed to reset SPI Port Page 1 Config value\n");
680 			return (-1);
681 		}
682 		mpt->mpt_port_page1 = tmp;
683 	}
684 
685 	/*
686 	 * The purpose of this exercise is to get
687 	 * all targets back to async/narrow.
688 	 *
689 	 * We skip this step if the BIOS has already negotiated
690 	 * speeds with the targets and does not require us to
691 	 * do Domain Validation.
692 	 */
693 	i = mpt->mpt_port_page2.PortSettings &
694 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
695 	j = mpt->mpt_port_page2.PortFlags &
696 	    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
697 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* &&
698 	    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) {
699 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
700 		    "honoring BIOS transfer negotiations\n");
701 	} else {
702 		for (i = 0; i < 16; i++) {
703 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
704 			mpt->mpt_dev_page1[i].Configuration = 0;
705 			(void) mpt_update_spi_config(mpt, i);
706 		}
707 	}
708 	return (0);
709 }
710 
711 int
712 mpt_cam_enable(struct mpt_softc *mpt)
713 {
714 	if (mpt->is_fc) {
715 		if (mpt_read_config_info_fc(mpt)) {
716 			return (EIO);
717 		}
718 		if (mpt_set_initial_config_fc(mpt)) {
719 			return (EIO);
720 		}
721 	} else if (mpt->is_sas) {
722 		if (mpt_read_config_info_sas(mpt)) {
723 			return (EIO);
724 		}
725 		if (mpt_set_initial_config_sas(mpt)) {
726 			return (EIO);
727 		}
728 	} else if (mpt->is_spi) {
729 		if (mpt_read_config_info_spi(mpt)) {
730 			return (EIO);
731 		}
732 		if (mpt_set_initial_config_spi(mpt)) {
733 			return (EIO);
734 		}
735 	}
736 	return (0);
737 }
738 
739 void
740 mpt_cam_detach(struct mpt_softc *mpt)
741 {
742 	mpt_handler_t handler;
743 
744 	mpt_terminate_recovery_thread(mpt);
745 
746 	handler.reply_handler = mpt_scsi_reply_handler;
747 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
748 			       scsi_io_handler_id);
749 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
750 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
751 			       scsi_tmf_handler_id);
752 	handler.reply_handler = mpt_fc_els_reply_handler;
753 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
754 			       fc_els_handler_id);
755 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
756 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
757 			       mpt->scsi_tgt_handler_id);
758 
759 	if (mpt->tmf_req != NULL) {
760 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
761 		mpt_free_request(mpt, mpt->tmf_req);
762 		mpt->tmf_req = NULL;
763 	}
764 
765 	if (mpt->sim != NULL) {
766 		MPTLOCK_2_CAMLOCK(mpt);
767 		xpt_free_path(mpt->path);
768 		xpt_bus_deregister(cam_sim_path(mpt->sim));
769 		cam_sim_free(mpt->sim, TRUE);
770 		mpt->sim = NULL;
771 		CAMLOCK_2_MPTLOCK(mpt);
772 	}
773 
774 	if (mpt->phydisk_sim != NULL) {
775 		MPTLOCK_2_CAMLOCK(mpt);
776 		xpt_free_path(mpt->phydisk_path);
777 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
778 		cam_sim_free(mpt->phydisk_sim, TRUE);
779 		mpt->phydisk_sim = NULL;
780 		CAMLOCK_2_MPTLOCK(mpt);
781 	}
782 }
783 
784 /* This routine is used after a system crash to dump core onto the swap device.
785  */
786 static void
787 mpt_poll(struct cam_sim *sim)
788 {
789 	struct mpt_softc *mpt;
790 
791 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
792 	MPT_LOCK(mpt);
793 	mpt_intr(mpt);
794 	MPT_UNLOCK(mpt);
795 }
796 
797 /*
798  * Watchdog timeout routine for SCSI requests.
799  */
800 static void
801 mpt_timeout(void *arg)
802 {
803 	union ccb	 *ccb;
804 	struct mpt_softc *mpt;
805 	request_t	 *req;
806 
807 	ccb = (union ccb *)arg;
808 	mpt = ccb->ccb_h.ccb_mpt_ptr;
809 
810 	MPT_LOCK(mpt);
811 	req = ccb->ccb_h.ccb_req_ptr;
812 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
813 	    req->serno, ccb, req->ccb);
814 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
815 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
816 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
817 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
818 		req->state |= REQ_STATE_TIMEDOUT;
819 		mpt_wakeup_recovery_thread(mpt);
820 	}
821 	MPT_UNLOCK(mpt);
822 }
823 
824 /*
825  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
826  *
827  * Takes a list of physical segments and builds the SGL for SCSI IO command
828  * and forwards the commard to the IOC after one last check that CAM has not
829  * aborted the transaction.
830  */
831 static void
832 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
833 {
834 	request_t *req, *trq;
835 	char *mpt_off;
836 	union ccb *ccb;
837 	struct mpt_softc *mpt;
838 	int seg, first_lim;
839 	uint32_t flags, nxt_off;
840 	void *sglp = NULL;
841 	MSG_REQUEST_HEADER *hdrp;
842 	SGE_SIMPLE64 *se;
843 	SGE_CHAIN64 *ce;
844 	int istgt = 0;
845 
846 	req = (request_t *)arg;
847 	ccb = req->ccb;
848 
849 	mpt = ccb->ccb_h.ccb_mpt_ptr;
850 	req = ccb->ccb_h.ccb_req_ptr;
851 
852 	hdrp = req->req_vbuf;
853 	mpt_off = req->req_vbuf;
854 
855 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
856 		error = EFBIG;
857 	}
858 
859 	if (error == 0) {
860 		switch (hdrp->Function) {
861 		case MPI_FUNCTION_SCSI_IO_REQUEST:
862 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
863 			istgt = 0;
864 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
865 			break;
866 		case MPI_FUNCTION_TARGET_ASSIST:
867 			istgt = 1;
868 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
869 			break;
870 		default:
871 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
872 			    hdrp->Function);
873 			error = EINVAL;
874 			break;
875 		}
876 	}
877 
878 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
879 		error = EFBIG;
880 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
881 		    nseg, mpt->max_seg_cnt);
882 	}
883 
884 bad:
885 	if (error != 0) {
886 		if (error != EFBIG && error != ENOMEM) {
887 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
888 		}
889 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
890 			cam_status status;
891 			mpt_freeze_ccb(ccb);
892 			if (error == EFBIG) {
893 				status = CAM_REQ_TOO_BIG;
894 			} else if (error == ENOMEM) {
895 				if (mpt->outofbeer == 0) {
896 					mpt->outofbeer = 1;
897 					xpt_freeze_simq(mpt->sim, 1);
898 					mpt_lprt(mpt, MPT_PRT_DEBUG,
899 					    "FREEZEQ\n");
900 				}
901 				status = CAM_REQUEUE_REQ;
902 			} else {
903 				status = CAM_REQ_CMP_ERR;
904 			}
905 			mpt_set_ccb_status(ccb, status);
906 		}
907 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
908 			request_t *cmd_req =
909 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
910 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
911 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
912 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
913 		}
914 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
915 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
916 		xpt_done(ccb);
917 		CAMLOCK_2_MPTLOCK(mpt);
918 		mpt_free_request(mpt, req);
919 		MPTLOCK_2_CAMLOCK(mpt);
920 		return;
921 	}
922 
923 	/*
924 	 * No data to transfer?
925 	 * Just make a single simple SGL with zero length.
926 	 */
927 
928 	if (mpt->verbose >= MPT_PRT_DEBUG) {
929 		int tidx = ((char *)sglp) - mpt_off;
930 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
931 	}
932 
933 	if (nseg == 0) {
934 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
935 		MPI_pSGE_SET_FLAGS(se1,
936 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
937 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
938 		goto out;
939 	}
940 
941 
942 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
943 	if (istgt == 0) {
944 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
945 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
946 		}
947 	} else {
948 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
949 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
950 		}
951 	}
952 
953 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
954 		bus_dmasync_op_t op;
955 		if (istgt == 0) {
956 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
957 				op = BUS_DMASYNC_PREREAD;
958 			} else {
959 				op = BUS_DMASYNC_PREWRITE;
960 			}
961 		} else {
962 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
963 				op = BUS_DMASYNC_PREWRITE;
964 			} else {
965 				op = BUS_DMASYNC_PREREAD;
966 			}
967 		}
968 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
969 	}
970 
971 	/*
972 	 * Okay, fill in what we can at the end of the command frame.
973 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
974 	 * the command frame.
975 	 *
976 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
977 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
978 	 * that.
979 	 */
980 
981 	if (nseg < MPT_NSGL_FIRST(mpt)) {
982 		first_lim = nseg;
983 	} else {
984 		/*
985 		 * Leave room for CHAIN element
986 		 */
987 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
988 	}
989 
990 	se = (SGE_SIMPLE64 *) sglp;
991 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
992 		uint32_t tf;
993 
994 		memset(se, 0, sizeof (*se));
995 		se->Address.Low = dm_segs->ds_addr;
996 		if (sizeof(bus_addr_t) > 4) {
997 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
998 		}
999 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1000 		tf = flags;
1001 		if (seg == first_lim - 1) {
1002 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1003 		}
1004 		if (seg == nseg - 1) {
1005 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1006 				MPI_SGE_FLAGS_END_OF_BUFFER;
1007 		}
1008 		MPI_pSGE_SET_FLAGS(se, tf);
1009 	}
1010 
1011 	if (seg == nseg) {
1012 		goto out;
1013 	}
1014 
1015 	/*
1016 	 * Tell the IOC where to find the first chain element.
1017 	 */
1018 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1019 	nxt_off = MPT_RQSL(mpt);
1020 	trq = req;
1021 
1022 	/*
1023 	 * Make up the rest of the data segments out of a chain element
1024 	 * (contiained in the current request frame) which points to
1025 	 * SIMPLE64 elements in the next request frame, possibly ending
1026 	 * with *another* chain element (if there's more).
1027 	 */
1028 	while (seg < nseg) {
1029 		int this_seg_lim;
1030 		uint32_t tf, cur_off;
1031 		bus_addr_t chain_list_addr;
1032 
1033 		/*
1034 		 * Point to the chain descriptor. Note that the chain
1035 		 * descriptor is at the end of the *previous* list (whether
1036 		 * chain or simple).
1037 		 */
1038 		ce = (SGE_CHAIN64 *) se;
1039 
1040 		/*
1041 		 * Before we change our current pointer, make  sure we won't
1042 		 * overflow the request area with this frame. Note that we
1043 		 * test against 'greater than' here as it's okay in this case
1044 		 * to have next offset be just outside the request area.
1045 		 */
1046 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1047 			nxt_off = MPT_REQUEST_AREA;
1048 			goto next_chain;
1049 		}
1050 
1051 		/*
1052 		 * Set our SGE element pointer to the beginning of the chain
1053 		 * list and update our next chain list offset.
1054 		 */
1055 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1056 		cur_off = nxt_off;
1057 		nxt_off += MPT_RQSL(mpt);
1058 
1059 		/*
1060 		 * Now initialized the chain descriptor.
1061 		 */
1062 		memset(ce, 0, sizeof (*ce));
1063 
1064 		/*
1065 		 * Get the physical address of the chain list.
1066 		 */
1067 		chain_list_addr = trq->req_pbuf;
1068 		chain_list_addr += cur_off;
1069 		if (sizeof (bus_addr_t) > 4) {
1070 			ce->Address.High =
1071 			    (uint32_t) ((uint64_t)chain_list_addr >> 32);
1072 		}
1073 		ce->Address.Low = (uint32_t) chain_list_addr;
1074 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1075 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1076 
1077 		/*
1078 		 * If we have more than a frame's worth of segments left,
1079 		 * set up the chain list to have the last element be another
1080 		 * chain descriptor.
1081 		 */
1082 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1083 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1084 			/*
1085 			 * The length of the chain is the length in bytes of the
1086 			 * number of segments plus the next chain element.
1087 			 *
1088 			 * The next chain descriptor offset is the length,
1089 			 * in words, of the number of segments.
1090 			 */
1091 			ce->Length = (this_seg_lim - seg) *
1092 			    sizeof (SGE_SIMPLE64);
1093 			ce->NextChainOffset = ce->Length >> 2;
1094 			ce->Length += sizeof (SGE_CHAIN64);
1095 		} else {
1096 			this_seg_lim = nseg;
1097 			ce->Length = (this_seg_lim - seg) *
1098 			    sizeof (SGE_SIMPLE64);
1099 		}
1100 
1101 		/*
1102 		 * Fill in the chain list SGE elements with our segment data.
1103 		 *
1104 		 * If we're the last element in this chain list, set the last
1105 		 * element flag. If we're the completely last element period,
1106 		 * set the end of list and end of buffer flags.
1107 		 */
1108 		while (seg < this_seg_lim) {
1109 			memset(se, 0, sizeof (*se));
1110 			se->Address.Low = dm_segs->ds_addr;
1111 			if (sizeof (bus_addr_t) > 4) {
1112 				se->Address.High =
1113 				    ((uint64_t)dm_segs->ds_addr) >> 32;
1114 			}
1115 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1116 			tf = flags;
1117 			if (seg ==  this_seg_lim - 1) {
1118 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1119 			}
1120 			if (seg == nseg - 1) {
1121 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1122 					MPI_SGE_FLAGS_END_OF_BUFFER;
1123 			}
1124 			MPI_pSGE_SET_FLAGS(se, tf);
1125 			se++;
1126 			seg++;
1127 			dm_segs++;
1128 		}
1129 
1130     next_chain:
1131 		/*
1132 		 * If we have more segments to do and we've used up all of
1133 		 * the space in a request area, go allocate another one
1134 		 * and chain to that.
1135 		 */
1136 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1137 			request_t *nrq;
1138 
1139 			CAMLOCK_2_MPTLOCK(mpt);
1140 			nrq = mpt_get_request(mpt, FALSE);
1141 			MPTLOCK_2_CAMLOCK(mpt);
1142 
1143 			if (nrq == NULL) {
1144 				error = ENOMEM;
1145 				goto bad;
1146 			}
1147 
1148 			/*
1149 			 * Append the new request area on the tail of our list.
1150 			 */
1151 			if ((trq = req->chain) == NULL) {
1152 				req->chain = nrq;
1153 			} else {
1154 				while (trq->chain != NULL) {
1155 					trq = trq->chain;
1156 				}
1157 				trq->chain = nrq;
1158 			}
1159 			trq = nrq;
1160 			mpt_off = trq->req_vbuf;
1161 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1162 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1163 			}
1164 			nxt_off = 0;
1165 		}
1166 	}
1167 out:
1168 
1169 	/*
1170 	 * Last time we need to check if this CCB needs to be aborted.
1171 	 */
1172 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1173 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1174 			request_t *cmd_req =
1175 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1176 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1177 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1178 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1179 		}
1180 		mpt_prt(mpt,
1181 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1182 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1183 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1184 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1185 		}
1186 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1187 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1188 		xpt_done(ccb);
1189 		CAMLOCK_2_MPTLOCK(mpt);
1190 		mpt_free_request(mpt, req);
1191 		MPTLOCK_2_CAMLOCK(mpt);
1192 		return;
1193 	}
1194 
1195 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1196 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1197 		ccb->ccb_h.timeout_ch =
1198 			timeout(mpt_timeout, (caddr_t)ccb,
1199 				(ccb->ccb_h.timeout * hz) / 1000);
1200 	} else {
1201 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1202 	}
1203 	if (mpt->verbose > MPT_PRT_DEBUG) {
1204 		int nc = 0;
1205 		mpt_print_request(req->req_vbuf);
1206 		for (trq = req->chain; trq; trq = trq->chain) {
1207 			printf("  Additional Chain Area %d\n", nc++);
1208 			mpt_dump_sgl(trq->req_vbuf, 0);
1209 		}
1210 	}
1211 
1212 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1213 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1214 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1215 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1216 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1217 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1218 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1219 		} else {
1220 			tgt->state = TGT_STATE_MOVING_DATA;
1221 		}
1222 #else
1223 		tgt->state = TGT_STATE_MOVING_DATA;
1224 #endif
1225 	}
1226 	CAMLOCK_2_MPTLOCK(mpt);
1227 	mpt_send_cmd(mpt, req);
1228 	MPTLOCK_2_CAMLOCK(mpt);
1229 }
1230 
1231 static void
1232 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1233 {
1234 	request_t *req, *trq;
1235 	char *mpt_off;
1236 	union ccb *ccb;
1237 	struct mpt_softc *mpt;
1238 	int seg, first_lim;
1239 	uint32_t flags, nxt_off;
1240 	void *sglp = NULL;
1241 	MSG_REQUEST_HEADER *hdrp;
1242 	SGE_SIMPLE32 *se;
1243 	SGE_CHAIN32 *ce;
1244 	int istgt = 0;
1245 
1246 	req = (request_t *)arg;
1247 	ccb = req->ccb;
1248 
1249 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1250 	req = ccb->ccb_h.ccb_req_ptr;
1251 
1252 	hdrp = req->req_vbuf;
1253 	mpt_off = req->req_vbuf;
1254 
1255 
1256 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1257 		error = EFBIG;
1258 	}
1259 
1260 	if (error == 0) {
1261 		switch (hdrp->Function) {
1262 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1263 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1264 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1265 			break;
1266 		case MPI_FUNCTION_TARGET_ASSIST:
1267 			istgt = 1;
1268 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1269 			break;
1270 		default:
1271 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1272 			    hdrp->Function);
1273 			error = EINVAL;
1274 			break;
1275 		}
1276 	}
1277 
1278 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1279 		error = EFBIG;
1280 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1281 		    nseg, mpt->max_seg_cnt);
1282 	}
1283 
1284 bad:
1285 	if (error != 0) {
1286 		if (error != EFBIG && error != ENOMEM) {
1287 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1288 		}
1289 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1290 			cam_status status;
1291 			mpt_freeze_ccb(ccb);
1292 			if (error == EFBIG) {
1293 				status = CAM_REQ_TOO_BIG;
1294 			} else if (error == ENOMEM) {
1295 				if (mpt->outofbeer == 0) {
1296 					mpt->outofbeer = 1;
1297 					xpt_freeze_simq(mpt->sim, 1);
1298 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1299 					    "FREEZEQ\n");
1300 				}
1301 				status = CAM_REQUEUE_REQ;
1302 			} else {
1303 				status = CAM_REQ_CMP_ERR;
1304 			}
1305 			mpt_set_ccb_status(ccb, status);
1306 		}
1307 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1308 			request_t *cmd_req =
1309 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1310 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1311 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1312 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1313 		}
1314 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1315 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1316 		xpt_done(ccb);
1317 		CAMLOCK_2_MPTLOCK(mpt);
1318 		mpt_free_request(mpt, req);
1319 		MPTLOCK_2_CAMLOCK(mpt);
1320 		return;
1321 	}
1322 
1323 	/*
1324 	 * No data to transfer?
1325 	 * Just make a single simple SGL with zero length.
1326 	 */
1327 
1328 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1329 		int tidx = ((char *)sglp) - mpt_off;
1330 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1331 	}
1332 
1333 	if (nseg == 0) {
1334 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1335 		MPI_pSGE_SET_FLAGS(se1,
1336 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1337 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1338 		goto out;
1339 	}
1340 
1341 
1342 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1343 	if (istgt == 0) {
1344 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1345 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1346 		}
1347 	} else {
1348 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1349 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1350 		}
1351 	}
1352 
1353 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1354 		bus_dmasync_op_t op;
1355 		if (istgt) {
1356 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1357 				op = BUS_DMASYNC_PREREAD;
1358 			} else {
1359 				op = BUS_DMASYNC_PREWRITE;
1360 			}
1361 		} else {
1362 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1363 				op = BUS_DMASYNC_PREWRITE;
1364 			} else {
1365 				op = BUS_DMASYNC_PREREAD;
1366 			}
1367 		}
1368 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1369 	}
1370 
1371 	/*
1372 	 * Okay, fill in what we can at the end of the command frame.
1373 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1374 	 * the command frame.
1375 	 *
1376 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1377 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1378 	 * that.
1379 	 */
1380 
1381 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1382 		first_lim = nseg;
1383 	} else {
1384 		/*
1385 		 * Leave room for CHAIN element
1386 		 */
1387 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1388 	}
1389 
1390 	se = (SGE_SIMPLE32 *) sglp;
1391 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1392 		uint32_t tf;
1393 
1394 		memset(se, 0,sizeof (*se));
1395 		se->Address = dm_segs->ds_addr;
1396 
1397 
1398 
1399 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1400 		tf = flags;
1401 		if (seg == first_lim - 1) {
1402 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1403 		}
1404 		if (seg == nseg - 1) {
1405 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1406 				MPI_SGE_FLAGS_END_OF_BUFFER;
1407 		}
1408 		MPI_pSGE_SET_FLAGS(se, tf);
1409 	}
1410 
1411 	if (seg == nseg) {
1412 		goto out;
1413 	}
1414 
1415 	/*
1416 	 * Tell the IOC where to find the first chain element.
1417 	 */
1418 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1419 	nxt_off = MPT_RQSL(mpt);
1420 	trq = req;
1421 
1422 	/*
1423 	 * Make up the rest of the data segments out of a chain element
1424 	 * (contiained in the current request frame) which points to
1425 	 * SIMPLE32 elements in the next request frame, possibly ending
1426 	 * with *another* chain element (if there's more).
1427 	 */
1428 	while (seg < nseg) {
1429 		int this_seg_lim;
1430 		uint32_t tf, cur_off;
1431 		bus_addr_t chain_list_addr;
1432 
1433 		/*
1434 		 * Point to the chain descriptor. Note that the chain
1435 		 * descriptor is at the end of the *previous* list (whether
1436 		 * chain or simple).
1437 		 */
1438 		ce = (SGE_CHAIN32 *) se;
1439 
1440 		/*
1441 		 * Before we change our current pointer, make  sure we won't
1442 		 * overflow the request area with this frame. Note that we
1443 		 * test against 'greater than' here as it's okay in this case
1444 		 * to have next offset be just outside the request area.
1445 		 */
1446 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1447 			nxt_off = MPT_REQUEST_AREA;
1448 			goto next_chain;
1449 		}
1450 
1451 		/*
1452 		 * Set our SGE element pointer to the beginning of the chain
1453 		 * list and update our next chain list offset.
1454 		 */
1455 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1456 		cur_off = nxt_off;
1457 		nxt_off += MPT_RQSL(mpt);
1458 
1459 		/*
1460 		 * Now initialized the chain descriptor.
1461 		 */
1462 		memset(ce, 0, sizeof (*ce));
1463 
1464 		/*
1465 		 * Get the physical address of the chain list.
1466 		 */
1467 		chain_list_addr = trq->req_pbuf;
1468 		chain_list_addr += cur_off;
1469 
1470 
1471 
1472 		ce->Address = chain_list_addr;
1473 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1474 
1475 
1476 		/*
1477 		 * If we have more than a frame's worth of segments left,
1478 		 * set up the chain list to have the last element be another
1479 		 * chain descriptor.
1480 		 */
1481 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1482 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1483 			/*
1484 			 * The length of the chain is the length in bytes of the
1485 			 * number of segments plus the next chain element.
1486 			 *
1487 			 * The next chain descriptor offset is the length,
1488 			 * in words, of the number of segments.
1489 			 */
1490 			ce->Length = (this_seg_lim - seg) *
1491 			    sizeof (SGE_SIMPLE32);
1492 			ce->NextChainOffset = ce->Length >> 2;
1493 			ce->Length += sizeof (SGE_CHAIN32);
1494 		} else {
1495 			this_seg_lim = nseg;
1496 			ce->Length = (this_seg_lim - seg) *
1497 			    sizeof (SGE_SIMPLE32);
1498 		}
1499 
1500 		/*
1501 		 * Fill in the chain list SGE elements with our segment data.
1502 		 *
1503 		 * If we're the last element in this chain list, set the last
1504 		 * element flag. If we're the completely last element period,
1505 		 * set the end of list and end of buffer flags.
1506 		 */
1507 		while (seg < this_seg_lim) {
1508 			memset(se, 0, sizeof (*se));
1509 			se->Address = dm_segs->ds_addr;
1510 
1511 
1512 
1513 
1514 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1515 			tf = flags;
1516 			if (seg ==  this_seg_lim - 1) {
1517 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1518 			}
1519 			if (seg == nseg - 1) {
1520 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1521 					MPI_SGE_FLAGS_END_OF_BUFFER;
1522 			}
1523 			MPI_pSGE_SET_FLAGS(se, tf);
1524 			se++;
1525 			seg++;
1526 			dm_segs++;
1527 		}
1528 
1529     next_chain:
1530 		/*
1531 		 * If we have more segments to do and we've used up all of
1532 		 * the space in a request area, go allocate another one
1533 		 * and chain to that.
1534 		 */
1535 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1536 			request_t *nrq;
1537 
1538 			CAMLOCK_2_MPTLOCK(mpt);
1539 			nrq = mpt_get_request(mpt, FALSE);
1540 			MPTLOCK_2_CAMLOCK(mpt);
1541 
1542 			if (nrq == NULL) {
1543 				error = ENOMEM;
1544 				goto bad;
1545 			}
1546 
1547 			/*
1548 			 * Append the new request area on the tail of our list.
1549 			 */
1550 			if ((trq = req->chain) == NULL) {
1551 				req->chain = nrq;
1552 			} else {
1553 				while (trq->chain != NULL) {
1554 					trq = trq->chain;
1555 				}
1556 				trq->chain = nrq;
1557 			}
1558 			trq = nrq;
1559 			mpt_off = trq->req_vbuf;
1560 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1561 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1562 			}
1563 			nxt_off = 0;
1564 		}
1565 	}
1566 out:
1567 
1568 	/*
1569 	 * Last time we need to check if this CCB needs to be aborted.
1570 	 */
1571 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1572 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1573 			request_t *cmd_req =
1574 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1575 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1576 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1577 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1578 		}
1579 		mpt_prt(mpt,
1580 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1581 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1582 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1583 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1584 		}
1585 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1586 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1587 		xpt_done(ccb);
1588 		CAMLOCK_2_MPTLOCK(mpt);
1589 		mpt_free_request(mpt, req);
1590 		MPTLOCK_2_CAMLOCK(mpt);
1591 		return;
1592 	}
1593 
1594 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1595 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1596 		ccb->ccb_h.timeout_ch =
1597 			timeout(mpt_timeout, (caddr_t)ccb,
1598 				(ccb->ccb_h.timeout * hz) / 1000);
1599 	} else {
1600 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1601 	}
1602 	if (mpt->verbose > MPT_PRT_DEBUG) {
1603 		int nc = 0;
1604 		mpt_print_request(req->req_vbuf);
1605 		for (trq = req->chain; trq; trq = trq->chain) {
1606 			printf("  Additional Chain Area %d\n", nc++);
1607 			mpt_dump_sgl(trq->req_vbuf, 0);
1608 		}
1609 	}
1610 
1611 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1612 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1613 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1614 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1615 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1616 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1617 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1618 		} else {
1619 			tgt->state = TGT_STATE_MOVING_DATA;
1620 		}
1621 #else
1622 		tgt->state = TGT_STATE_MOVING_DATA;
1623 #endif
1624 	}
1625 	CAMLOCK_2_MPTLOCK(mpt);
1626 	mpt_send_cmd(mpt, req);
1627 	MPTLOCK_2_CAMLOCK(mpt);
1628 }
1629 
1630 static void
1631 mpt_start(struct cam_sim *sim, union ccb *ccb)
1632 {
1633 	request_t *req;
1634 	struct mpt_softc *mpt;
1635 	MSG_SCSI_IO_REQUEST *mpt_req;
1636 	struct ccb_scsiio *csio = &ccb->csio;
1637 	struct ccb_hdr *ccbh = &ccb->ccb_h;
1638 	bus_dmamap_callback_t *cb;
1639 	target_id_t tgt;
1640 	int raid_passthru;
1641 
1642 	/* Get the pointer for the physical addapter */
1643 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1644 	raid_passthru = (sim == mpt->phydisk_sim);
1645 
1646 	CAMLOCK_2_MPTLOCK(mpt);
1647 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
1648 		if (mpt->outofbeer == 0) {
1649 			mpt->outofbeer = 1;
1650 			xpt_freeze_simq(mpt->sim, 1);
1651 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1652 		}
1653 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1654 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
1655 		MPTLOCK_2_CAMLOCK(mpt);
1656 		xpt_done(ccb);
1657 		return;
1658 	}
1659 #ifdef	INVARIANTS
1660 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
1661 #endif
1662 	MPTLOCK_2_CAMLOCK(mpt);
1663 
1664 	if (sizeof (bus_addr_t) > 4) {
1665 		cb = mpt_execute_req_a64;
1666 	} else {
1667 		cb = mpt_execute_req;
1668 	}
1669 
1670 	/*
1671 	 * Link the ccb and the request structure so we can find
1672 	 * the other knowing either the request or the ccb
1673 	 */
1674 	req->ccb = ccb;
1675 	ccb->ccb_h.ccb_req_ptr = req;
1676 
1677 	/* Now we build the command for the IOC */
1678 	mpt_req = req->req_vbuf;
1679 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
1680 
1681 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1682 	if (raid_passthru) {
1683 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1684 		CAMLOCK_2_MPTLOCK(mpt);
1685 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1686 			MPTLOCK_2_CAMLOCK(mpt);
1687 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1688 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
1689 			xpt_done(ccb);
1690 			return;
1691 		}
1692 		MPTLOCK_2_CAMLOCK(mpt);
1693 		mpt_req->Bus = 0;	/* we never set bus here */
1694 	} else {
1695 		tgt = ccb->ccb_h.target_id;
1696 		mpt_req->Bus = 0;	/* XXX */
1697 
1698 	}
1699 	mpt_req->SenseBufferLength =
1700 		(csio->sense_len < MPT_SENSE_SIZE) ?
1701 		 csio->sense_len : MPT_SENSE_SIZE;
1702 
1703 	/*
1704 	 * We use the message context to find the request structure when we
1705 	 * Get the command completion interrupt from the IOC.
1706 	 */
1707 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1708 
1709 	/* Which physical device to do the I/O on */
1710 	mpt_req->TargetID = tgt;
1711 
1712 	/* We assume a single level LUN type */
1713 	if (ccb->ccb_h.target_lun >= 256) {
1714 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1715 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1716 	} else {
1717 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1718 	}
1719 
1720 	/* Set the direction of the transfer */
1721 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1722 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1723 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1724 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1725 	} else {
1726 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1727 	}
1728 
1729 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1730 		switch(ccb->csio.tag_action) {
1731 		case MSG_HEAD_OF_Q_TAG:
1732 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
1733 			break;
1734 		case MSG_ACA_TASK:
1735 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
1736 			break;
1737 		case MSG_ORDERED_Q_TAG:
1738 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
1739 			break;
1740 		case MSG_SIMPLE_Q_TAG:
1741 		default:
1742 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1743 			break;
1744 		}
1745 	} else {
1746 		if (mpt->is_fc || mpt->is_sas) {
1747 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1748 		} else {
1749 			/* XXX No such thing for a target doing packetized. */
1750 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
1751 		}
1752 	}
1753 
1754 	if (mpt->is_spi) {
1755 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1756 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
1757 		}
1758 	}
1759 
1760 	/* Copy the scsi command block into place */
1761 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1762 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
1763 	} else {
1764 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
1765 	}
1766 
1767 	mpt_req->CDBLength = csio->cdb_len;
1768 	mpt_req->DataLength = csio->dxfer_len;
1769 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
1770 
1771 	/*
1772 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
1773 	 */
1774 	if (mpt->verbose == MPT_PRT_DEBUG) {
1775 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
1776 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
1777 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
1778 		if (mpt_req->Control != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
1779 			mpt_prtc(mpt, "(%s %u byte%s ",
1780 			    (mpt_req->Control == MPI_SCSIIO_CONTROL_READ)?
1781 			    "read" : "write",  csio->dxfer_len,
1782 			    (csio->dxfer_len == 1)? ")" : "s)");
1783 		}
1784 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
1785 		    ccb->ccb_h.target_lun, req, req->serno);
1786 	}
1787 
1788 	/*
1789 	 * If we have any data to send with this command map it into bus space.
1790 	 */
1791 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1792 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1793 			/*
1794 			 * We've been given a pointer to a single buffer.
1795 			 */
1796 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1797 				/*
1798 				 * Virtual address that needs to translated into
1799 				 * one or more physical address ranges.
1800 				 */
1801 				int error;
1802 				int s = splsoftvm();
1803 				error = bus_dmamap_load(mpt->buffer_dmat,
1804 				    req->dmap, csio->data_ptr, csio->dxfer_len,
1805 				    cb, req, 0);
1806 				splx(s);
1807 				if (error == EINPROGRESS) {
1808 					/*
1809 					 * So as to maintain ordering,
1810 					 * freeze the controller queue
1811 					 * until our mapping is
1812 					 * returned.
1813 					 */
1814 					xpt_freeze_simq(mpt->sim, 1);
1815 					ccbh->status |= CAM_RELEASE_SIMQ;
1816 				}
1817 			} else {
1818 				/*
1819 				 * We have been given a pointer to single
1820 				 * physical buffer.
1821 				 */
1822 				struct bus_dma_segment seg;
1823 				seg.ds_addr =
1824 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1825 				seg.ds_len = csio->dxfer_len;
1826 				(*cb)(req, &seg, 1, 0);
1827 			}
1828 		} else {
1829 			/*
1830 			 * We have been given a list of addresses.
1831 			 * This case could be easily supported but they are not
1832 			 * currently generated by the CAM subsystem so there
1833 			 * is no point in wasting the time right now.
1834 			 */
1835 			struct bus_dma_segment *segs;
1836 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1837 				(*cb)(req, NULL, 0, EFAULT);
1838 			} else {
1839 				/* Just use the segments provided */
1840 				segs = (struct bus_dma_segment *)csio->data_ptr;
1841 				(*cb)(req, segs, csio->sglist_cnt, 0);
1842 			}
1843 		}
1844 	} else {
1845 		(*cb)(req, NULL, 0, 0);
1846 	}
1847 }
1848 
1849 static int
1850 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
1851     int sleep_ok)
1852 {
1853 	int   error;
1854 	uint16_t status;
1855 	uint8_t response;
1856 
1857 	error = mpt_scsi_send_tmf(mpt,
1858 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
1859 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
1860 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1861 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
1862 	    0,	/* XXX How do I get the channel ID? */
1863 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
1864 	    lun != CAM_LUN_WILDCARD ? lun : 0,
1865 	    0, sleep_ok);
1866 
1867 	if (error != 0) {
1868 		/*
1869 		 * mpt_scsi_send_tmf hard resets on failure, so no
1870 		 * need to do so here.
1871 		 */
1872 		mpt_prt(mpt,
1873 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
1874 		return (EIO);
1875 	}
1876 
1877 	/* Wait for bus reset to be processed by the IOC. */
1878 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
1879 	    REQ_STATE_DONE, sleep_ok, 5000);
1880 
1881 	status = mpt->tmf_req->IOCStatus;
1882 	response = mpt->tmf_req->ResponseCode;
1883 	mpt->tmf_req->state = REQ_STATE_FREE;
1884 
1885 	if (error) {
1886 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
1887 		    "Resetting controller.\n");
1888 		mpt_reset(mpt, TRUE);
1889 		return (ETIMEDOUT);
1890 	}
1891 
1892 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1893 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
1894 		    "Resetting controller.\n", status);
1895 		mpt_reset(mpt, TRUE);
1896 		return (EIO);
1897 	}
1898 
1899 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
1900 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
1901 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
1902 		    "Resetting controller.\n", response);
1903 		mpt_reset(mpt, TRUE);
1904 		return (EIO);
1905 	}
1906 	return (0);
1907 }
1908 
1909 static int
1910 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
1911 {
1912 	int r = 0;
1913 	request_t *req;
1914 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
1915 
1916  	req = mpt_get_request(mpt, FALSE);
1917 	if (req == NULL) {
1918 		return (ENOMEM);
1919 	}
1920 	fc = req->req_vbuf;
1921 	memset(fc, 0, sizeof(*fc));
1922 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
1923 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
1924 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
1925 	mpt_send_cmd(mpt, req);
1926 	if (dowait) {
1927 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
1928 		    REQ_STATE_DONE, FALSE, 60 * 1000);
1929 		if (r == 0) {
1930 			mpt_free_request(mpt, req);
1931 		}
1932 	}
1933 	return (r);
1934 }
1935 
1936 static int
1937 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
1938 	      MSG_EVENT_NOTIFY_REPLY *msg)
1939 {
1940 	switch(msg->Event & 0xFF) {
1941 	case MPI_EVENT_UNIT_ATTENTION:
1942 		mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
1943 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1944 		break;
1945 
1946 	case MPI_EVENT_IOC_BUS_RESET:
1947 		/* We generated a bus reset */
1948 		mpt_prt(mpt, "IOC Bus Reset Port: %d\n",
1949 		    (msg->Data[0] >> 8) & 0xff);
1950 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
1951 		break;
1952 
1953 	case MPI_EVENT_EXT_BUS_RESET:
1954 		/* Someone else generated a bus reset */
1955 		mpt_prt(mpt, "External Bus Reset Detected\n");
1956 		/*
1957 		 * These replies don't return EventData like the MPI
1958 		 * spec says they do
1959 		 */
1960 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
1961 		break;
1962 
1963 	case MPI_EVENT_RESCAN:
1964 		/*
1965 		 * In general this means a device has been added to the loop.
1966 		 */
1967 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
1968 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
1969 		break;
1970 
1971 	case MPI_EVENT_LINK_STATUS_CHANGE:
1972 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
1973 		    (msg->Data[1] >> 8) & 0xff,
1974 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
1975 		break;
1976 
1977 	case MPI_EVENT_LOOP_STATE_CHANGE:
1978 		switch ((msg->Data[0] >> 16) & 0xff) {
1979 		case 0x01:
1980 			mpt_prt(mpt,
1981 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
1982 			    "(Loop Initialization)\n",
1983 			    (msg->Data[1] >> 8) & 0xff,
1984 			    (msg->Data[0] >> 8) & 0xff,
1985 			    (msg->Data[0]     ) & 0xff);
1986 			switch ((msg->Data[0] >> 8) & 0xff) {
1987 			case 0xF7:
1988 				if ((msg->Data[0] & 0xff) == 0xF7) {
1989 					mpt_prt(mpt, "Device needs AL_PA\n");
1990 				} else {
1991 					mpt_prt(mpt, "Device %02x doesn't like "
1992 					    "FC performance\n",
1993 					    msg->Data[0] & 0xFF);
1994 				}
1995 				break;
1996 			case 0xF8:
1997 				if ((msg->Data[0] & 0xff) == 0xF7) {
1998 					mpt_prt(mpt, "Device had loop failure "
1999 					    "at its receiver prior to acquiring"
2000 					    " AL_PA\n");
2001 				} else {
2002 					mpt_prt(mpt, "Device %02x detected loop"
2003 					    " failure at its receiver\n",
2004 					    msg->Data[0] & 0xFF);
2005 				}
2006 				break;
2007 			default:
2008 				mpt_prt(mpt, "Device %02x requests that device "
2009 				    "%02x reset itself\n",
2010 				    msg->Data[0] & 0xFF,
2011 				    (msg->Data[0] >> 8) & 0xFF);
2012 				break;
2013 			}
2014 			break;
2015 		case 0x02:
2016 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2017 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2018 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2019 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2020 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2021 			break;
2022 		case 0x03:
2023 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2024 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2025 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2026 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
2027 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
2028 			break;
2029 		default:
2030 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2031 			    "FC event (%02x %02x %02x)\n",
2032 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2033 			    (msg->Data[0] >> 16) & 0xff, /* Event */
2034 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2035 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2036 		}
2037 		break;
2038 
2039 	case MPI_EVENT_LOGOUT:
2040 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2041 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
2042 		break;
2043 	case MPI_EVENT_EVENT_CHANGE:
2044 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2045 		    "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
2046 		break;
2047 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2048 		/*
2049 		 * Devices are attachin'.....
2050 		 */
2051 		mpt_prt(mpt,
2052 		    "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n");
2053 		break;
2054 	default:
2055 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2056 		    msg->Event & 0xFF);
2057 		return (0);
2058 	}
2059 	return (1);
2060 }
2061 
2062 /*
2063  * Reply path for all SCSI I/O requests, called from our
2064  * interrupt handler by extracting our handler index from
2065  * the MsgContext field of the reply from the IOC.
2066  *
2067  * This routine is optimized for the common case of a
2068  * completion without error.  All exception handling is
2069  * offloaded to non-inlined helper routines to minimize
2070  * cache footprint.
2071  */
2072 static int
2073 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2074     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2075 {
2076 	MSG_SCSI_IO_REQUEST *scsi_req;
2077 	union ccb *ccb;
2078 	target_id_t tgt;
2079 
2080 	if (req->state == REQ_STATE_FREE) {
2081 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2082 		return (TRUE);
2083 	}
2084 
2085 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2086 	ccb = req->ccb;
2087 	if (ccb == NULL) {
2088 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2089 		    req, req->serno);
2090 		return (TRUE);
2091 	}
2092 
2093 	tgt = scsi_req->TargetID;
2094 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2095 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2096 
2097 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2098 		bus_dmasync_op_t op;
2099 
2100 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2101 			op = BUS_DMASYNC_POSTREAD;
2102 		else
2103 			op = BUS_DMASYNC_POSTWRITE;
2104 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2105 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2106 	}
2107 
2108 	if (reply_frame == NULL) {
2109 		/*
2110 		 * Context only reply, completion without error status.
2111 		 */
2112 		ccb->csio.resid = 0;
2113 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2114 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2115 	} else {
2116 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2117 	}
2118 
2119 	if (mpt->outofbeer) {
2120 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2121 		mpt->outofbeer = 0;
2122 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2123 	}
2124 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2125 		struct scsi_inquiry_data *iq =
2126 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2127 		if (scsi_req->Function ==
2128 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2129 			/*
2130 			 * Fake out the device type so that only the
2131 			 * pass-thru device will attach.
2132 			 */
2133 			iq->device &= ~0x1F;
2134 			iq->device |= T_NODEVICE;
2135 		}
2136 	}
2137 	if (mpt->verbose == MPT_PRT_DEBUG) {
2138 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2139 		    req, req->serno);
2140 	}
2141 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2142 	MPTLOCK_2_CAMLOCK(mpt);
2143 	xpt_done(ccb);
2144 	CAMLOCK_2_MPTLOCK(mpt);
2145 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2146 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2147 	} else {
2148 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2149 		    req, req->serno);
2150 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2151 	}
2152 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2153 	    ("CCB req needed wakeup"));
2154 #ifdef	INVARIANTS
2155 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2156 #endif
2157 	mpt_free_request(mpt, req);
2158 	return (TRUE);
2159 }
2160 
2161 static int
2162 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2163     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2164 {
2165 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2166 
2167 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2168 #ifdef	INVARIANTS
2169 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2170 #endif
2171 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2172 	/* Record IOC Status and Response Code of TMF for any waiters. */
2173 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2174 	req->ResponseCode = tmf_reply->ResponseCode;
2175 
2176 	mpt_lprt(mpt, MPT_PRT_INFO, "TMF complete: req %p:%u status 0x%x\n",
2177 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2178 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2179 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2180 		req->state |= REQ_STATE_DONE;
2181 		wakeup(req);
2182 	} else {
2183 		mpt->tmf_req->state = REQ_STATE_FREE;
2184 	}
2185 	return (TRUE);
2186 }
2187 
2188 /*
2189  * XXX: Move to definitions file
2190  */
2191 #define	ELS	0x22
2192 #define	FC4LS	0x32
2193 #define	ABTS	0x81
2194 #define	BA_ACC	0x84
2195 
2196 #define	LS_RJT	0x01
2197 #define	LS_ACC	0x02
2198 #define	PLOGI	0x03
2199 #define	LOGO	0x05
2200 #define SRR	0x14
2201 #define PRLI	0x20
2202 #define PRLO	0x21
2203 #define ADISC	0x52
2204 #define RSCN	0x61
2205 
2206 static void
2207 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2208     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2209 {
2210 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2211 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2212 
2213 	/*
2214 	 * We are going to reuse the ELS request to send this response back.
2215 	 */
2216 	rsp = &tmp;
2217 	memset(rsp, 0, sizeof(*rsp));
2218 
2219 #ifdef	USE_IMMEDIATE_LINK_DATA
2220 	/*
2221 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2222 	 */
2223 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2224 #endif
2225 	rsp->RspLength = length;
2226 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2227 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2228 
2229 	/*
2230 	 * Copy over information from the original reply frame to
2231 	 * it's correct place in the response.
2232 	 */
2233 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2234 
2235 	/*
2236 	 * And now copy back the temporary area to the original frame.
2237 	 */
2238 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2239 	rsp = req->req_vbuf;
2240 
2241 #ifdef	USE_IMMEDIATE_LINK_DATA
2242 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2243 #else
2244 {
2245 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2246 	bus_addr_t paddr = req->req_pbuf;
2247 	paddr += MPT_RQSL(mpt);
2248 
2249 	se->FlagsLength =
2250 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2251 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2252 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2253 		MPI_SGE_FLAGS_END_OF_LIST	|
2254 		MPI_SGE_FLAGS_END_OF_BUFFER;
2255 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2256 	se->FlagsLength |= (length);
2257 	se->Address = (uint32_t) paddr;
2258 }
2259 #endif
2260 
2261 	/*
2262 	 * Send it on...
2263 	 */
2264 	mpt_send_cmd(mpt, req);
2265 }
2266 
2267 static int
2268 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2269     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2270 {
2271 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2272 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2273 	U8 rctl;
2274 	U8 type;
2275 	U8 cmd;
2276 	U16 status = le16toh(reply_frame->IOCStatus);
2277 	U32 *elsbuf;
2278 	int ioindex;
2279 	int do_refresh = TRUE;
2280 
2281 #ifdef	INVARIANTS
2282 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2283 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2284 	    req, req->serno, rp->Function));
2285 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2286 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2287 	} else {
2288 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2289 	}
2290 #endif
2291 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2292 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2293 	    req, req->serno, reply_frame, reply_frame->Function);
2294 
2295 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2296 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2297 		    status, reply_frame->Function);
2298 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2299 			/*
2300 			 * XXX: to get around shutdown issue
2301 			 */
2302 			mpt->disabled = 1;
2303 			return (TRUE);
2304 		}
2305 		return (TRUE);
2306 	}
2307 
2308 	/*
2309 	 * If the function of a link service response, we recycle the
2310 	 * response to be a refresh for a new link service request.
2311 	 *
2312 	 * The request pointer is bogus in this case and we have to fetch
2313 	 * it based upon the TransactionContext.
2314 	 */
2315 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2316 		/* Freddie Uncle Charlie Katie */
2317 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2318 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2319 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2320 				break;
2321 			}
2322 
2323 		KASSERT(ioindex < mpt->els_cmds_allocated,
2324 		    ("can't find my mommie!"));
2325 
2326 		/* remove from active list as we're going to re-post it */
2327 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2328 		req->state &= ~REQ_STATE_QUEUED;
2329 		req->state |= REQ_STATE_DONE;
2330 		mpt_fc_post_els(mpt, req, ioindex);
2331 		return (TRUE);
2332 	}
2333 
2334 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2335 		/* remove from active list as we're done */
2336 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2337 		req->state &= ~REQ_STATE_QUEUED;
2338 		req->state |= REQ_STATE_DONE;
2339 		if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2340 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2341 			    "Async Primitive Send Complete\n");
2342 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2343 			mpt_free_request(mpt, req);
2344 		} else {
2345 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2346 			    "Sync Primitive Send Complete\n");
2347 			wakeup(req);
2348 		}
2349 		return (TRUE);
2350 	}
2351 
2352 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2353 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2354 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2355 		    rp->MsgLength, rp->MsgFlags);
2356 		return (TRUE);
2357 	}
2358 
2359 	if (rp->MsgLength <= 5) {
2360 		/*
2361 		 * This is just a ack of an original ELS buffer post
2362 		 */
2363 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2364 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2365 		return (TRUE);
2366 	}
2367 
2368 
2369 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2370 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2371 
2372 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2373 	cmd = be32toh(elsbuf[0]) >> 24;
2374 
2375 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2376 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2377 		return (TRUE);
2378 	}
2379 
2380 	ioindex = le32toh(rp->TransactionContext);
2381 	req = mpt->els_cmd_ptrs[ioindex];
2382 
2383 	if (rctl == ELS && type == 1) {
2384 		switch (cmd) {
2385 		case PRLI:
2386 			/*
2387 			 * Send back a PRLI ACC
2388 			 */
2389 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2390 			    le32toh(rp->Wwn.PortNameHigh),
2391 			    le32toh(rp->Wwn.PortNameLow));
2392 			elsbuf[0] = htobe32(0x02100014);
2393 			elsbuf[1] |= htobe32(0x00000100);
2394 			elsbuf[4] = htobe32(0x00000002);
2395 			if (mpt->role & MPT_ROLE_TARGET)
2396 				elsbuf[4] |= htobe32(0x00000010);
2397 			if (mpt->role & MPT_ROLE_INITIATOR)
2398 				elsbuf[4] |= htobe32(0x00000020);
2399 			/* remove from active list as we're done */
2400 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2401 			req->state &= ~REQ_STATE_QUEUED;
2402 			req->state |= REQ_STATE_DONE;
2403 			mpt_fc_els_send_response(mpt, req, rp, 20);
2404 			do_refresh = FALSE;
2405 			break;
2406 		case PRLO:
2407 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2408 			elsbuf[0] = htobe32(0x02100014);
2409 			elsbuf[1] = htobe32(0x08000100);
2410 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2411 			    le32toh(rp->Wwn.PortNameHigh),
2412 			    le32toh(rp->Wwn.PortNameLow));
2413 			/* remove from active list as we're done */
2414 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2415 			req->state &= ~REQ_STATE_QUEUED;
2416 			req->state |= REQ_STATE_DONE;
2417 			mpt_fc_els_send_response(mpt, req, rp, 20);
2418 			do_refresh = FALSE;
2419 			break;
2420 		default:
2421 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2422 			break;
2423 		}
2424 	} else if (rctl == ABTS && type == 0) {
2425 		uint16_t rx_id = le16toh(rp->Rxid);
2426 		uint16_t ox_id = le16toh(rp->Oxid);
2427 		request_t *tgt_req = NULL;
2428 
2429 		mpt_prt(mpt,
2430 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2431 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2432 		    le32toh(rp->Wwn.PortNameLow));
2433 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2434 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2435 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2436 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2437 		} else {
2438 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2439 		}
2440 		if (tgt_req) {
2441 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2442 			uint8_t *vbuf;
2443 			union ccb *ccb = tgt->ccb;
2444 			uint32_t ct_id;
2445 
2446 			vbuf = tgt_req->req_vbuf;
2447 			vbuf += MPT_RQSL(mpt);
2448 
2449 			/*
2450 			 * Check to make sure we have the correct command
2451 			 * The reply descriptor in the target state should
2452 			 * should contain an IoIndex that should match the
2453 			 * RX_ID.
2454 			 *
2455 			 * It'd be nice to have OX_ID to crosscheck with
2456 			 * as well.
2457 			 */
2458 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2459 
2460 			if (ct_id != rx_id) {
2461 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2462 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2463 				    rx_id, ct_id);
2464 				goto skip;
2465 			}
2466 
2467 			ccb = tgt->ccb;
2468 			if (ccb) {
2469 				mpt_prt(mpt,
2470 				    "CCB (%p): lun %u flags %x status %x\n",
2471 				    ccb, ccb->ccb_h.target_lun,
2472 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2473 			}
2474 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2475 			    "%x nxfers %x\n", tgt->state,
2476 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2477 			    tgt->nxfers);
2478   skip:
2479 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2480 				mpt_prt(mpt, "unable to start TargetAbort\n");
2481 			}
2482 		} else {
2483 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2484 		}
2485 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2486 		elsbuf[0] = htobe32(0);
2487 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2488 		elsbuf[2] = htobe32(0x000ffff);
2489 		/*
2490 		 * Dork with the reply frame so that the reponse to it
2491 		 * will be correct.
2492 		 */
2493 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2494 		/* remove from active list as we're done */
2495 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2496 		req->state &= ~REQ_STATE_QUEUED;
2497 		req->state |= REQ_STATE_DONE;
2498 		mpt_fc_els_send_response(mpt, req, rp, 12);
2499 		do_refresh = FALSE;
2500 	} else {
2501 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2502 	}
2503 	if (do_refresh == TRUE) {
2504 		/* remove from active list as we're done */
2505 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2506 		req->state &= ~REQ_STATE_QUEUED;
2507 		req->state |= REQ_STATE_DONE;
2508 		mpt_fc_post_els(mpt, req, ioindex);
2509 	}
2510 	return (TRUE);
2511 }
2512 
2513 /*
2514  * Clean up all SCSI Initiator personality state in response
2515  * to a controller reset.
2516  */
2517 static void
2518 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2519 {
2520 	/*
2521 	 * The pending list is already run down by
2522 	 * the generic handler.  Perform the same
2523 	 * operation on the timed out request list.
2524 	 */
2525 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
2526 				   MPI_IOCSTATUS_INVALID_STATE);
2527 
2528 	/*
2529 	 * XXX: We need to repost ELS and Target Command Buffers?
2530 	 */
2531 
2532 	/*
2533 	 * Inform the XPT that a bus reset has occurred.
2534 	 */
2535 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
2536 }
2537 
2538 /*
2539  * Parse additional completion information in the reply
2540  * frame for SCSI I/O requests.
2541  */
2542 static int
2543 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
2544 			     MSG_DEFAULT_REPLY *reply_frame)
2545 {
2546 	union ccb *ccb;
2547 	MSG_SCSI_IO_REPLY *scsi_io_reply;
2548 	u_int ioc_status;
2549 	u_int sstate;
2550 	u_int loginfo;
2551 
2552 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
2553 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
2554 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
2555 		("MPT SCSI I/O Handler called with incorrect reply type"));
2556 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
2557 		("MPT SCSI I/O Handler called with continuation reply"));
2558 
2559 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
2560 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
2561 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
2562 	ioc_status &= MPI_IOCSTATUS_MASK;
2563 	sstate = scsi_io_reply->SCSIState;
2564 
2565 	ccb = req->ccb;
2566 	ccb->csio.resid =
2567 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
2568 
2569 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
2570 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
2571 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2572 		ccb->csio.sense_resid =
2573 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
2574 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
2575 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
2576 	}
2577 
2578 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
2579 		/*
2580 		 * Tag messages rejected, but non-tagged retry
2581 		 * was successful.
2582 XXXX
2583 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
2584 		 */
2585 	}
2586 
2587 	switch(ioc_status) {
2588 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2589 		/*
2590 		 * XXX
2591 		 * Linux driver indicates that a zero
2592 		 * transfer length with this error code
2593 		 * indicates a CRC error.
2594 		 *
2595 		 * No need to swap the bytes for checking
2596 		 * against zero.
2597 		 */
2598 		if (scsi_io_reply->TransferCount == 0) {
2599 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2600 			break;
2601 		}
2602 		/* FALLTHROUGH */
2603 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
2604 	case MPI_IOCSTATUS_SUCCESS:
2605 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
2606 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
2607 			/*
2608 			 * Status was never returned for this transaction.
2609 			 */
2610 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
2611 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
2612 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
2613 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
2614 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
2615 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
2616 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
2617 
2618 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
2619 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2620 		} else
2621 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2622 		break;
2623 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
2624 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
2625 		break;
2626 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
2627 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2628 		break;
2629 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2630 		/*
2631 		 * Since selection timeouts and "device really not
2632 		 * there" are grouped into this error code, report
2633 		 * selection timeout.  Selection timeouts are
2634 		 * typically retried before giving up on the device
2635 		 * whereas "device not there" errors are considered
2636 		 * unretryable.
2637 		 */
2638 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2639 		break;
2640 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2641 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
2642 		break;
2643 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
2644 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
2645 		break;
2646 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
2647 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
2648 		break;
2649 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2650 		ccb->ccb_h.status = CAM_UA_TERMIO;
2651 		break;
2652 	case MPI_IOCSTATUS_INVALID_STATE:
2653 		/*
2654 		 * The IOC has been reset.  Emulate a bus reset.
2655 		 */
2656 		/* FALLTHROUGH */
2657 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
2658 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2659 		break;
2660 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
2661 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
2662 		/*
2663 		 * Don't clobber any timeout status that has
2664 		 * already been set for this transaction.  We
2665 		 * want the SCSI layer to be able to differentiate
2666 		 * between the command we aborted due to timeout
2667 		 * and any innocent bystanders.
2668 		 */
2669 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
2670 			break;
2671 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
2672 		break;
2673 
2674 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2675 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
2676 		break;
2677 	case MPI_IOCSTATUS_BUSY:
2678 		mpt_set_ccb_status(ccb, CAM_BUSY);
2679 		break;
2680 	case MPI_IOCSTATUS_INVALID_FUNCTION:
2681 	case MPI_IOCSTATUS_INVALID_SGL:
2682 	case MPI_IOCSTATUS_INTERNAL_ERROR:
2683 	case MPI_IOCSTATUS_INVALID_FIELD:
2684 	default:
2685 		/* XXX
2686 		 * Some of the above may need to kick
2687 		 * of a recovery action!!!!
2688 		 */
2689 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2690 		break;
2691 	}
2692 
2693 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2694 		mpt_freeze_ccb(ccb);
2695 	}
2696 
2697 	return (TRUE);
2698 }
2699 
2700 static void
2701 mpt_action(struct cam_sim *sim, union ccb *ccb)
2702 {
2703 	struct mpt_softc *mpt;
2704 	struct ccb_trans_settings *cts;
2705 	target_id_t tgt;
2706 	lun_id_t lun;
2707 	int raid_passthru;
2708 
2709 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2710 
2711 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
2712 	KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
2713 	raid_passthru = (sim == mpt->phydisk_sim);
2714 
2715 	tgt = ccb->ccb_h.target_id;
2716 	lun = ccb->ccb_h.target_lun;
2717 	if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ &&
2718 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
2719 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
2720 		CAMLOCK_2_MPTLOCK(mpt);
2721 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2722 			MPTLOCK_2_CAMLOCK(mpt);
2723 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2724 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2725 			xpt_done(ccb);
2726 			return;
2727 		}
2728 		MPTLOCK_2_CAMLOCK(mpt);
2729 	}
2730 	ccb->ccb_h.ccb_mpt_ptr = mpt;
2731 
2732 	switch (ccb->ccb_h.func_code) {
2733 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2734 		/*
2735 		 * Do a couple of preliminary checks...
2736 		 */
2737 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2738 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2739 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2740 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2741 				break;
2742 			}
2743 		}
2744 		/* Max supported CDB length is 16 bytes */
2745 		/* XXX Unless we implement the new 32byte message type */
2746 		if (ccb->csio.cdb_len >
2747 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
2748 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2749 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2750 			break;
2751 		}
2752 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2753 		mpt_start(sim, ccb);
2754 		return;
2755 
2756 	case XPT_RESET_BUS:
2757 	case XPT_RESET_DEV:
2758 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2759 			ccb->ccb_h.func_code == XPT_RESET_BUS ?
2760 			"XPT_RESET_BUS\n" : "XPT_RESET_DEV\n");
2761 
2762 		CAMLOCK_2_MPTLOCK(mpt);
2763 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
2764 		MPTLOCK_2_CAMLOCK(mpt);
2765 
2766 		/*
2767 		 * mpt_bus_reset is always successful in that it
2768 		 * will fall back to a hard reset should a bus
2769 		 * reset attempt fail.
2770 		 */
2771 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2772 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2773 		break;
2774 
2775 	case XPT_ABORT:
2776 	{
2777 		union ccb *accb = ccb->cab.abort_ccb;
2778 		CAMLOCK_2_MPTLOCK(mpt);
2779 		switch (accb->ccb_h.func_code) {
2780 		case XPT_ACCEPT_TARGET_IO:
2781 		case XPT_IMMED_NOTIFY:
2782 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
2783 			break;
2784 		case XPT_CONT_TARGET_IO:
2785 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
2786 			ccb->ccb_h.status = CAM_UA_ABORT;
2787 			break;
2788 		case XPT_SCSI_IO:
2789 			ccb->ccb_h.status = CAM_UA_ABORT;
2790 			break;
2791 		default:
2792 			ccb->ccb_h.status = CAM_REQ_INVALID;
2793 			break;
2794 		}
2795 		MPTLOCK_2_CAMLOCK(mpt);
2796 		break;
2797 	}
2798 
2799 #ifdef	CAM_NEW_TRAN_CODE
2800 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
2801 #else
2802 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
2803 #endif
2804 #define	DP_DISC_ENABLE	0x1
2805 #define	DP_DISC_DISABL	0x2
2806 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
2807 
2808 #define	DP_TQING_ENABLE	0x4
2809 #define	DP_TQING_DISABL	0x8
2810 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
2811 
2812 #define	DP_WIDE		0x10
2813 #define	DP_NARROW	0x20
2814 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
2815 
2816 #define	DP_SYNC		0x40
2817 
2818 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2819 	{
2820 #ifdef	CAM_NEW_TRAN_CODE
2821 		struct ccb_trans_settings_scsi *scsi;
2822 		struct ccb_trans_settings_spi *spi;
2823 #endif
2824 		uint8_t dval;
2825 		u_int period;
2826 		u_int offset;
2827 		int i, j;
2828 
2829 		cts = &ccb->cts;
2830 
2831 		if (mpt->is_fc || mpt->is_sas) {
2832 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2833 			break;
2834 		}
2835 
2836 		/*
2837 		 * Skip attempting settings on RAID volume disks.
2838 		 * Other devices on the bus get the normal treatment.
2839 		 */
2840 		if (mpt->phydisk_sim && raid_passthru == 0 &&
2841 		    mpt_is_raid_volume(mpt, tgt) != 0) {
2842 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
2843 			    "skipping transfer settings for RAID volumes\n");
2844 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2845 			break;
2846 		}
2847 
2848 		i = mpt->mpt_port_page2.PortSettings &
2849 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
2850 		j = mpt->mpt_port_page2.PortFlags &
2851 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
2852 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
2853 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
2854 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
2855 			    "honoring BIOS transfer negotiations\n");
2856 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2857 			break;
2858 		}
2859 
2860 		dval = 0;
2861 		period = 0;
2862 		offset = 0;
2863 
2864 #ifndef	CAM_NEW_TRAN_CODE
2865 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
2866 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
2867 			    DP_DISC_ENABLE : DP_DISC_DISABL;
2868 		}
2869 
2870 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
2871 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
2872 			    DP_TQING_ENABLE : DP_TQING_DISABL;
2873 		}
2874 
2875 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
2876 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
2877 		}
2878 
2879 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2880 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
2881 			dval |= DP_SYNC;
2882 			period = cts->sync_period;
2883 			offset = cts->sync_offset;
2884 		}
2885 #else
2886 		scsi = &cts->proto_specific.scsi;
2887 		spi = &cts->xport_specific.spi;
2888 
2889 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2890 			dval |= (spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
2891 			    DP_DISC_ENABLE : DP_DISC_DISABL;
2892 		}
2893 
2894 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2895 			dval |= (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
2896 			    DP_TQING_ENABLE : DP_TQING_DISABL;
2897 		}
2898 
2899 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2900 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
2901 			    DP_WIDE : DP_NARROW;
2902 		}
2903 
2904 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2905 		    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2906 		    (spi->sync_period && spi->sync_offset)) {
2907 			dval |= DP_SYNC;
2908 			period = spi->sync_period;
2909 			offset = spi->sync_offset;
2910 		}
2911 #endif
2912 		CAMLOCK_2_MPTLOCK(mpt);
2913 		if (dval & DP_DISC_ENABLE) {
2914 			mpt->mpt_disc_enable |= (1 << tgt);
2915 		} else if (dval & DP_DISC_DISABL) {
2916 			mpt->mpt_disc_enable &= ~(1 << tgt);
2917 		}
2918 		if (dval & DP_TQING_ENABLE) {
2919 			mpt->mpt_tag_enable |= (1 << tgt);
2920 		} else if (dval & DP_TQING_DISABL) {
2921 			mpt->mpt_tag_enable &= ~(1 << tgt);
2922 		}
2923 		if (dval & DP_WIDTH) {
2924 			mpt_setwidth(mpt, tgt, 1);
2925 		}
2926 		if (dval & DP_SYNC) {
2927 			mpt_setsync(mpt, tgt, period, offset);
2928 		}
2929 
2930 		if (mpt_update_spi_config(mpt, tgt)) {
2931 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2932 		} else {
2933 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2934 		}
2935 		MPTLOCK_2_CAMLOCK(mpt);
2936 		break;
2937 	}
2938 	case XPT_GET_TRAN_SETTINGS:
2939 		cts = &ccb->cts;
2940 		if (mpt->is_fc) {
2941 #ifndef	CAM_NEW_TRAN_CODE
2942 			/*
2943 			 * a lot of normal SCSI things don't make sense.
2944 			 */
2945 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2946 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2947 			/*
2948 			 * How do you measure the width of a high
2949 			 * speed serial bus? Well, in bytes.
2950 			 *
2951 			 * Offset and period make no sense, though, so we set
2952 			 * (above) a 'base' transfer speed to be gigabit.
2953 			 */
2954 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2955 #else
2956 			struct ccb_trans_settings_fc *fc =
2957 			    &cts->xport_specific.fc;
2958 
2959 			cts->protocol = PROTO_SCSI;
2960 			cts->protocol_version = SCSI_REV_2;
2961 			cts->transport = XPORT_FC;
2962 			cts->transport_version = 0;
2963 
2964 			fc->valid = CTS_FC_VALID_SPEED;
2965 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
2966 			/* XXX: need a port database for each target */
2967 #endif
2968 		} else if (mpt->is_sas) {
2969 #ifndef	CAM_NEW_TRAN_CODE
2970 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2971 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2972 			/*
2973 			 * How do you measure the width of a high
2974 			 * speed serial bus? Well, in bytes.
2975 			 *
2976 			 * Offset and period make no sense, though, so we set
2977 			 * (above) a 'base' transfer speed to be gigabit.
2978 			 */
2979 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2980 #else
2981 			struct ccb_trans_settings_sas *sas =
2982 			    &cts->xport_specific.sas;
2983 
2984 			cts->protocol = PROTO_SCSI;
2985 			cts->protocol_version = SCSI_REV_3;
2986 			cts->transport = XPORT_SAS;
2987 			cts->transport_version = 0;
2988 
2989 			sas->valid = CTS_SAS_VALID_SPEED;
2990 			sas->bitrate = 300000;	/* XXX: Default 3Gbps */
2991 #endif
2992 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
2993 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2994 			break;
2995 		}
2996 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2997 		break;
2998 
2999 	case XPT_CALC_GEOMETRY:
3000 	{
3001 		struct ccb_calc_geometry *ccg;
3002 
3003 		ccg = &ccb->ccg;
3004 		if (ccg->block_size == 0) {
3005 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3006 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3007 			break;
3008 		}
3009 		mpt_calc_geometry(ccg, /*extended*/1);
3010 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3011 		break;
3012 	}
3013 	case XPT_PATH_INQ:		/* Path routing inquiry */
3014 	{
3015 		struct ccb_pathinq *cpi = &ccb->cpi;
3016 
3017 		cpi->version_num = 1;
3018 		cpi->target_sprt = 0;
3019 		cpi->hba_eng_cnt = 0;
3020 		cpi->max_target = mpt->mpt_max_devices - 1;
3021 		/*
3022 		 * XXX: FC cards report MAX_DEVICES of 512- but we
3023 		 * XXX: seem to hang when going higher than 255.
3024 		 */
3025 		if (cpi->max_target > 255)
3026 			cpi->max_target = 255;
3027 		/*
3028 		 * XXX: VMware ESX reports > 16 devices and then dies
3029 		 * XXX: when we probe.
3030 		 */
3031 		if (mpt->is_spi && cpi->max_target > 15)
3032 			cpi->max_target = 15;
3033 		cpi->max_lun = 7;
3034 		cpi->initiator_id = mpt->mpt_ini_id;
3035 
3036 		cpi->bus_id = cam_sim_bus(sim);
3037 		/*
3038 		 * Actual speed for each device varies.
3039 		 *
3040 		 * The base speed is the speed of the underlying connection.
3041 		 * This is strictly determined for SPI (async, narrow). If
3042 		 * link is up for Fibre Channel, then speed can be gotten
3043 		 * from that.
3044 		 */
3045 		if (mpt->is_fc) {
3046 			cpi->hba_misc = PIM_NOBUSRESET;
3047 			cpi->base_transfer_speed =
3048 			    mpt->mpt_fcport_speed * 100000;
3049 			cpi->hba_inquiry = PI_TAG_ABLE;
3050 		} else if (mpt->is_sas) {
3051 			cpi->hba_misc = PIM_NOBUSRESET;
3052 			cpi->base_transfer_speed = 300000;
3053 			cpi->hba_inquiry = PI_TAG_ABLE;
3054 		} else {
3055 			cpi->hba_misc = PIM_SEQSCAN;
3056 			cpi->base_transfer_speed = 3300;
3057 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3058 		}
3059 
3060 		/*
3061 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3062 		 * wide, restrict it to one lun and have it *not* be a bus
3063 		 * that can have a SCSI bus reset.
3064 		 */
3065 		if (raid_passthru) {
3066 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3067 			cpi->initiator_id = cpi->max_target + 1;
3068 			cpi->max_lun = 0;
3069 			cpi->hba_misc |= PIM_NOBUSRESET;
3070 		}
3071 
3072 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3073 			cpi->hba_misc |= PIM_NOINITIATOR;
3074 		}
3075 		if ((mpt->role & MPT_ROLE_TARGET) != 0) {
3076 			cpi->target_sprt =
3077 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3078 		} else {
3079 			cpi->target_sprt = 0;
3080 		}
3081 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3082 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3083 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3084 		cpi->unit_number = cam_sim_unit(sim);
3085 		cpi->ccb_h.status = CAM_REQ_CMP;
3086 		break;
3087 	}
3088 	case XPT_EN_LUN:		/* Enable LUN as a target */
3089 	{
3090 		int result;
3091 
3092 		CAMLOCK_2_MPTLOCK(mpt);
3093 		if (ccb->cel.enable)
3094 			result = mpt_enable_lun(mpt,
3095 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3096 		else
3097 			result = mpt_disable_lun(mpt,
3098 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3099 		MPTLOCK_2_CAMLOCK(mpt);
3100 		if (result == 0) {
3101 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3102 		} else {
3103 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3104 		}
3105 		break;
3106 	}
3107 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3108 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3109 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3110 	{
3111 		tgt_resource_t *trtp;
3112 		lun_id_t lun = ccb->ccb_h.target_lun;
3113 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3114 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3115 		ccb->ccb_h.flags = 0;
3116 
3117 		if (lun == CAM_LUN_WILDCARD) {
3118 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3119 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3120 				break;
3121 			}
3122 			trtp = &mpt->trt_wildcard;
3123 		} else if (lun >= MPT_MAX_LUNS) {
3124 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3125 			break;
3126 		} else {
3127 			trtp = &mpt->trt[lun];
3128 		}
3129 		CAMLOCK_2_MPTLOCK(mpt);
3130 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3131 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3132 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3133 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3134 			    sim_links.stqe);
3135 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3136 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3137 			    "Put FREE INOT lun %d\n", lun);
3138 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3139 			    sim_links.stqe);
3140 		} else {
3141 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3142 		}
3143 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3144 		MPTLOCK_2_CAMLOCK(mpt);
3145 		return;
3146 	}
3147 	case XPT_CONT_TARGET_IO:
3148 		CAMLOCK_2_MPTLOCK(mpt);
3149 		mpt_target_start_io(mpt, ccb);
3150 		MPTLOCK_2_CAMLOCK(mpt);
3151 		return;
3152 
3153 	default:
3154 		ccb->ccb_h.status = CAM_REQ_INVALID;
3155 		break;
3156 	}
3157 	xpt_done(ccb);
3158 }
3159 
3160 static int
3161 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3162 {
3163 #ifdef	CAM_NEW_TRAN_CODE
3164 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3165 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3166 #endif
3167 	target_id_t tgt;
3168 	uint8_t dval, pval, oval;
3169 	int rv;
3170 
3171 	if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3172 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3173 			return (-1);
3174 		}
3175 	} else {
3176 		tgt = cts->ccb_h.target_id;
3177 	}
3178 
3179 	/*
3180 	 * XXX: We aren't looking Port Page 2 BIOS settings here.
3181 	 * XXX: For goal settings, we pick the max from port page 0
3182 	 *
3183 	 * For current settings we read the current settings out from
3184 	 * device page 0 for that target.
3185 	 */
3186 	if (IS_CURRENT_SETTINGS(cts)) {
3187 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3188 		dval = 0;
3189 
3190 		CAMLOCK_2_MPTLOCK(mpt);
3191 		tmp = mpt->mpt_dev_page0[tgt];
3192 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3193 		    sizeof(tmp), FALSE, 5000);
3194 		if (rv) {
3195 			MPTLOCK_2_CAMLOCK(mpt);
3196 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3197 			return (rv);
3198 		}
3199 		MPTLOCK_2_CAMLOCK(mpt);
3200 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3201 		    DP_WIDE : DP_NARROW;
3202 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3203 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3204 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3205 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3206 		oval = (tmp.NegotiatedParameters >> 16) & 0xff;
3207 		pval = (tmp.NegotiatedParameters >>  8) & 0xff;
3208 		mpt->mpt_dev_page0[tgt] = tmp;
3209 	} else {
3210 		/*
3211 		 * XXX: Just make theoretical maximum.
3212 		 */
3213 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE;
3214 		oval = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
3215 		pval = (mpt->mpt_port_page0.Capabilities >>  8) & 0xff;
3216 	}
3217 #ifndef	CAM_NEW_TRAN_CODE
3218 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3219 	if (dval & DP_DISC_ENABLE) {
3220 		cts->flags |= CCB_TRANS_DISC_ENB;
3221 	}
3222 	if (dval & DP_TQING_ENABLE) {
3223 		cts->flags |= CCB_TRANS_TAG_ENB;
3224 	}
3225 	if (dval & DP_WIDE) {
3226 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3227 	} else {
3228 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3229 	}
3230 	cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
3231 	    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3232 	if (oval) {
3233 		cts->sync_period = pval;
3234 		cts->sync_offset = oval;
3235 		cts->valid |=
3236 		    CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID;
3237 	}
3238 #else
3239 	cts->protocol = PROTO_SCSI;
3240 	cts->protocol_version = SCSI_REV_2;
3241 	cts->transport = XPORT_SPI;
3242 	cts->transport_version = 2;
3243 
3244 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
3245 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
3246 	if (dval & DP_DISC_ENABLE) {
3247 		spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3248 	}
3249 	if (dval & DP_TQING_ENABLE) {
3250 		scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3251 	}
3252 	if (oval && pval) {
3253 		spi->sync_offset = oval;
3254 		spi->sync_period = pval;
3255 		spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3256 		spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3257 	}
3258 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3259 	if (dval & DP_WIDE) {
3260 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3261 	} else {
3262 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3263 	}
3264 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3265 		scsi->valid = CTS_SCSI_VALID_TQ;
3266 		spi->valid |= CTS_SPI_VALID_DISC;
3267 	} else {
3268 		scsi->valid = 0;
3269 	}
3270 #endif
3271 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3272 	    "mpt_get_spi_settings[%d]: %s 0x%x period 0x%x offset %d\n", tgt,
3273 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3274 	return (0);
3275 }
3276 
3277 static void
3278 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3279 {
3280 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3281 
3282 	ptr = &mpt->mpt_dev_page1[tgt];
3283 	if (onoff) {
3284 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3285 	} else {
3286 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3287 	}
3288 }
3289 
3290 static void
3291 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3292 {
3293 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3294 
3295 	ptr = &mpt->mpt_dev_page1[tgt];
3296 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3297 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3298 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3299 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3300 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3301 	ptr->RequestedParameters |= (period << 8) | (offset << 16);
3302 	if (period < 0xa) {
3303 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3304 	}
3305 	if (period < 0x9) {
3306 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3307 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3308 	}
3309 }
3310 
3311 static int
3312 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3313 {
3314 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3315 	int rv;
3316 
3317 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3318 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3319 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3320 	tmp = mpt->mpt_dev_page1[tgt];
3321 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3322 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3323 	if (rv) {
3324 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3325 		return (-1);
3326 	}
3327 	return (0);
3328 }
3329 
3330 static void
3331 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3332 {
3333 #if __FreeBSD_version >= 500000
3334 	cam_calc_geometry(ccg, extended);
3335 #else
3336 	uint32_t size_mb;
3337 	uint32_t secs_per_cylinder;
3338 
3339 	if (ccg->block_size == 0) {
3340 		ccg->ccb_h.status = CAM_REQ_INVALID;
3341 		return;
3342 	}
3343 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3344 	if (size_mb > 1024 && extended) {
3345 		ccg->heads = 255;
3346 		ccg->secs_per_track = 63;
3347 	} else {
3348 		ccg->heads = 64;
3349 		ccg->secs_per_track = 32;
3350 	}
3351 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3352 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3353 	ccg->ccb_h.status = CAM_REQ_CMP;
3354 #endif
3355 }
3356 
3357 /****************************** Timeout Recovery ******************************/
3358 static int
3359 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3360 {
3361 	int error;
3362 
3363 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3364 	    &mpt->recovery_thread, /*flags*/0,
3365 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3366 	return (error);
3367 }
3368 
3369 static void
3370 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3371 {
3372 	if (mpt->recovery_thread == NULL) {
3373 		return;
3374 	}
3375 	mpt->shutdwn_recovery = 1;
3376 	wakeup(mpt);
3377 	/*
3378 	 * Sleep on a slightly different location
3379 	 * for this interlock just for added safety.
3380 	 */
3381 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3382 }
3383 
3384 static void
3385 mpt_recovery_thread(void *arg)
3386 {
3387 	struct mpt_softc *mpt;
3388 
3389 #if __FreeBSD_version >= 500000
3390 	mtx_lock(&Giant);
3391 #endif
3392 	mpt = (struct mpt_softc *)arg;
3393 	MPT_LOCK(mpt);
3394 	for (;;) {
3395 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3396 			if (mpt->shutdwn_recovery == 0) {
3397 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3398 			}
3399 		}
3400 		if (mpt->shutdwn_recovery != 0) {
3401 			break;
3402 		}
3403 		mpt_recover_commands(mpt);
3404 	}
3405 	mpt->recovery_thread = NULL;
3406 	wakeup(&mpt->recovery_thread);
3407 	MPT_UNLOCK(mpt);
3408 #if __FreeBSD_version >= 500000
3409 	mtx_unlock(&Giant);
3410 #endif
3411 	kthread_exit(0);
3412 }
3413 
3414 static int
3415 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3416     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3417 {
3418 	MSG_SCSI_TASK_MGMT *tmf_req;
3419 	int		    error;
3420 
3421 	/*
3422 	 * Wait for any current TMF request to complete.
3423 	 * We're only allowed to issue one TMF at a time.
3424 	 */
3425 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3426 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3427 	if (error != 0) {
3428 		mpt_reset(mpt, TRUE);
3429 		return (ETIMEDOUT);
3430 	}
3431 
3432 	mpt_assign_serno(mpt, mpt->tmf_req);
3433 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3434 
3435 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3436 	memset(tmf_req, 0, sizeof(*tmf_req));
3437 	tmf_req->TargetID = target;
3438 	tmf_req->Bus = channel;
3439 	tmf_req->ChainOffset = 0;
3440 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3441 	tmf_req->Reserved = 0;
3442 	tmf_req->TaskType = type;
3443 	tmf_req->Reserved1 = 0;
3444 	tmf_req->MsgFlags = flags;
3445 	tmf_req->MsgContext =
3446 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3447 	memset(&tmf_req->LUN, 0,
3448 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
3449 	if (lun > 256) {
3450 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3451 		tmf_req->LUN[1] = lun & 0xff;
3452 	} else {
3453 		tmf_req->LUN[1] = lun;
3454 	}
3455 	tmf_req->TaskMsgContext = abort_ctx;
3456 
3457 	mpt_lprt(mpt, MPT_PRT_INFO,
3458 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3459 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3460 	if (mpt->verbose > MPT_PRT_DEBUG) {
3461 		mpt_print_request(tmf_req);
3462 	}
3463 
3464 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3465 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3466 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3467 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3468 	if (error != MPT_OK) {
3469 		mpt_reset(mpt, TRUE);
3470 	}
3471 	return (error);
3472 }
3473 
3474 /*
3475  * When a command times out, it is placed on the requeust_timeout_list
3476  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3477  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3478  * the timedout transactions.  The next TMF is issued either by the
3479  * completion handler of the current TMF waking our recovery thread,
3480  * or the TMF timeout handler causing a hard reset sequence.
3481  */
3482 static void
3483 mpt_recover_commands(struct mpt_softc *mpt)
3484 {
3485 	request_t	   *req;
3486 	union ccb	   *ccb;
3487 	int		    error;
3488 
3489 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3490 		/*
3491 		 * No work to do- leave.
3492 		 */
3493 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3494 		return;
3495 	}
3496 
3497 	/*
3498 	 * Flush any commands whose completion coincides with their timeout.
3499 	 */
3500 	mpt_intr(mpt);
3501 
3502 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3503 		/*
3504 		 * The timedout commands have already
3505 		 * completed.  This typically means
3506 		 * that either the timeout value was on
3507 		 * the hairy edge of what the device
3508 		 * requires or - more likely - interrupts
3509 		 * are not happening.
3510 		 */
3511 		mpt_prt(mpt, "Timedout requests already complete. "
3512 		    "Interrupts may not be functioning.\n");
3513 		mpt_enable_ints(mpt);
3514 		return;
3515 	}
3516 
3517 	/*
3518 	 * We have no visibility into the current state of the
3519 	 * controller, so attempt to abort the commands in the
3520 	 * order they timed-out. For initiator commands, we
3521 	 * depend on the reply handler pulling requests off
3522 	 * the timeout list.
3523 	 */
3524 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3525 		uint16_t status;
3526 		uint8_t response;
3527 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3528 
3529 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3530 		    req, req->serno, hdrp->Function);
3531 		ccb = req->ccb;
3532 		if (ccb == NULL) {
3533 			mpt_prt(mpt, "null ccb in timed out request. "
3534 			    "Resetting Controller.\n");
3535 			mpt_reset(mpt, TRUE);
3536 			continue;
3537 		}
3538 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3539 
3540 		/*
3541 		 * Check to see if this is not an initiator command and
3542 		 * deal with it differently if it is.
3543 		 */
3544 		switch (hdrp->Function) {
3545 		case MPI_FUNCTION_SCSI_IO_REQUEST:
3546 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
3547 			break;
3548 		default:
3549 			/*
3550 			 * XXX: FIX ME: need to abort target assists...
3551 			 */
3552 			mpt_prt(mpt, "just putting it back on the pend q\n");
3553 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
3554 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
3555 			    links);
3556 			continue;
3557 		}
3558 
3559 		error = mpt_scsi_send_tmf(mpt,
3560 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3561 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
3562 		    htole32(req->index | scsi_io_handler_id), TRUE);
3563 
3564 		if (error != 0) {
3565 			/*
3566 			 * mpt_scsi_send_tmf hard resets on failure, so no
3567 			 * need to do so here.  Our queue should be emptied
3568 			 * by the hard reset.
3569 			 */
3570 			continue;
3571 		}
3572 
3573 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
3574 		    REQ_STATE_DONE, TRUE, 500);
3575 
3576 		status = mpt->tmf_req->IOCStatus;
3577 		response = mpt->tmf_req->ResponseCode;
3578 		mpt->tmf_req->state = REQ_STATE_FREE;
3579 
3580 		if (error != 0) {
3581 			/*
3582 			 * If we've errored out,, reset the controller.
3583 			 */
3584 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
3585 			    "Resetting controller\n");
3586 			mpt_reset(mpt, TRUE);
3587 			continue;
3588 		}
3589 
3590 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
3591 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
3592 			    "Resetting controller.\n", status);
3593 			mpt_reset(mpt, TRUE);
3594 			continue;
3595 		}
3596 
3597 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
3598 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
3599 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
3600 			    "Resetting controller.\n", response);
3601 			mpt_reset(mpt, TRUE);
3602 			continue;
3603 		}
3604 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
3605 	}
3606 }
3607 
3608 /************************ Target Mode Support ****************************/
3609 static void
3610 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
3611 {
3612 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3613 	PTR_SGE_TRANSACTION32 tep;
3614 	PTR_SGE_SIMPLE32 se;
3615 	bus_addr_t paddr;
3616 
3617 	paddr = req->req_pbuf;
3618 	paddr += MPT_RQSL(mpt);
3619 
3620 	fc = req->req_vbuf;
3621 	memset(fc, 0, MPT_REQUEST_AREA);
3622 	fc->BufferCount = 1;
3623 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3624 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
3625 
3626 	/*
3627 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
3628 	 * consist of a TE SGL element (with details length of zero)
3629 	 * followe by a SIMPLE SGL element which holds the address
3630 	 * of the buffer.
3631 	 */
3632 
3633 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3634 
3635 	tep->ContextSize = 4;
3636 	tep->Flags = 0;
3637 	tep->TransactionContext[0] = htole32(ioindex);
3638 
3639 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3640 	se->FlagsLength =
3641 		MPI_SGE_FLAGS_HOST_TO_IOC	|
3642 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
3643 		MPI_SGE_FLAGS_LAST_ELEMENT	|
3644 		MPI_SGE_FLAGS_END_OF_LIST	|
3645 		MPI_SGE_FLAGS_END_OF_BUFFER;
3646 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
3647 	se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3648 	se->Address = (uint32_t) paddr;
3649 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3650 	    "add ELS index %d ioindex %d for %p:%u\n",
3651 	    req->index, ioindex, req, req->serno);
3652 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
3653 	    ("mpt_fc_post_els: request not locked"));
3654 	mpt_send_cmd(mpt, req);
3655 }
3656 
3657 static void
3658 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3659 {
3660 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3661 	PTR_CMD_BUFFER_DESCRIPTOR cb;
3662 	bus_addr_t paddr;
3663 
3664 	paddr = req->req_pbuf;
3665 	paddr += MPT_RQSL(mpt);
3666 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3667 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
3668 
3669 	fc = req->req_vbuf;
3670 	fc->BufferCount = 1;
3671 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3672 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3673 
3674 	cb = &fc->Buffer[0];
3675 	cb->IoIndex = htole16(ioindex);
3676 	cb->u.PhysicalAddress32 = (U32) paddr;
3677 
3678 	mpt_check_doorbell(mpt);
3679 	mpt_send_cmd(mpt, req);
3680 }
3681 
3682 static int
3683 mpt_add_els_buffers(struct mpt_softc *mpt)
3684 {
3685 	int i;
3686 
3687 	if (mpt->is_fc == 0) {
3688 		return (TRUE);
3689 	}
3690 
3691 	if (mpt->els_cmds_allocated) {
3692 		return (TRUE);
3693 	}
3694 
3695 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
3696 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3697 
3698 	if (mpt->els_cmd_ptrs == NULL) {
3699 		return (FALSE);
3700 	}
3701 
3702 	/*
3703 	 * Feed the chip some ELS buffer resources
3704 	 */
3705 	for (i = 0; i < MPT_MAX_ELS; i++) {
3706 		request_t *req = mpt_get_request(mpt, FALSE);
3707 		if (req == NULL) {
3708 			break;
3709 		}
3710 		req->state |= REQ_STATE_LOCKED;
3711 		mpt->els_cmd_ptrs[i] = req;
3712 		mpt_fc_post_els(mpt, req, i);
3713 	}
3714 
3715 	if (i == 0) {
3716 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
3717 		free(mpt->els_cmd_ptrs, M_DEVBUF);
3718 		mpt->els_cmd_ptrs = NULL;
3719 		return (FALSE);
3720 	}
3721 	if (i != MPT_MAX_ELS) {
3722 		mpt_lprt(mpt, MPT_PRT_INFO,
3723 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
3724 	}
3725 	mpt->els_cmds_allocated = i;
3726 	return(TRUE);
3727 }
3728 
3729 static int
3730 mpt_add_target_commands(struct mpt_softc *mpt)
3731 {
3732 	int i, max;
3733 
3734 	if (mpt->tgt_cmd_ptrs) {
3735 		return (TRUE);
3736 	}
3737 
3738 	max = MPT_MAX_REQUESTS(mpt) >> 1;
3739 	if (max > mpt->mpt_max_tgtcmds) {
3740 		max = mpt->mpt_max_tgtcmds;
3741 	}
3742 	mpt->tgt_cmd_ptrs =
3743 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
3744 	if (mpt->tgt_cmd_ptrs == NULL) {
3745 		mpt_prt(mpt,
3746 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
3747 		return (FALSE);
3748 	}
3749 
3750 	for (i = 0; i < max; i++) {
3751 		request_t *req;
3752 
3753 		req = mpt_get_request(mpt, FALSE);
3754 		if (req == NULL) {
3755 			break;
3756 		}
3757 		req->state |= REQ_STATE_LOCKED;
3758 		mpt->tgt_cmd_ptrs[i] = req;
3759 		mpt_post_target_command(mpt, req, i);
3760 	}
3761 
3762 
3763 	if (i == 0) {
3764 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
3765 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
3766 		mpt->tgt_cmd_ptrs = NULL;
3767 		return (FALSE);
3768 	}
3769 
3770 	mpt->tgt_cmds_allocated = i;
3771 
3772 	if (i < max) {
3773 		mpt_lprt(mpt, MPT_PRT_INFO,
3774 		    "added %d of %d target bufs\n", i, max);
3775 	}
3776 	return (i);
3777 }
3778 
3779 static void
3780 mpt_free_els_buffers(struct mpt_softc *mpt)
3781 {
3782 	mpt_prt(mpt, "fix me! need to implement mpt_free_els_buffers");
3783 }
3784 
3785 static void
3786 mpt_free_target_commands(struct mpt_softc *mpt)
3787 {
3788 	mpt_prt(mpt, "fix me! need to implement mpt_free_target_commands");
3789 }
3790 
3791 
3792 static int
3793 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3794 {
3795 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3796 		mpt->twildcard = 1;
3797 	} else if (lun >= MPT_MAX_LUNS) {
3798 		return (EINVAL);
3799 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3800 		return (EINVAL);
3801 	}
3802 	if (mpt->tenabled == 0) {
3803 		/*
3804 		 * Try to add some target command resources
3805 		 */
3806 		if (mpt_add_target_commands(mpt) == FALSE) {
3807 			mpt_free_els_buffers(mpt);
3808 			return (ENOMEM);
3809 		}
3810 		if (mpt->is_fc) {
3811 			(void) mpt_fc_reset_link(mpt, 0);
3812 		}
3813 		mpt->tenabled = 1;
3814 	}
3815 	if (lun == CAM_LUN_WILDCARD) {
3816 		mpt->trt_wildcard.enabled = 1;
3817 	} else {
3818 		mpt->trt[lun].enabled = 1;
3819 	}
3820 	return (0);
3821 }
3822 
3823 static int
3824 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3825 {
3826 	int i;
3827 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3828 		mpt->twildcard = 0;
3829 	} else if (lun >= MPT_MAX_LUNS) {
3830 		return (EINVAL);
3831 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3832 		return (EINVAL);
3833 	}
3834 	if (lun == CAM_LUN_WILDCARD) {
3835 		mpt->trt_wildcard.enabled = 0;
3836 	} else {
3837 		mpt->trt[lun].enabled = 0;
3838 	}
3839 	for (i = 0; i < MPT_MAX_LUNS; i++) {
3840 		if (mpt->trt[lun].enabled) {
3841 			break;
3842 		}
3843 	}
3844 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
3845 		mpt_free_els_buffers(mpt);
3846 		mpt_free_target_commands(mpt);
3847 		if (mpt->is_fc) {
3848 			(void) mpt_fc_reset_link(mpt, 0);
3849 		}
3850 		mpt->tenabled = 0;
3851 	}
3852 	return (0);
3853 }
3854 
3855 /*
3856  * Called with MPT lock held
3857  */
3858 static void
3859 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
3860 {
3861 	struct ccb_scsiio *csio = &ccb->csio;
3862 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
3863 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
3864 
3865 	switch (tgt->state) {
3866 	case TGT_STATE_IN_CAM:
3867 		break;
3868 	case TGT_STATE_MOVING_DATA:
3869 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3870 		xpt_freeze_simq(mpt->sim, 1);
3871 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3872 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3873 		MPTLOCK_2_CAMLOCK(mpt);
3874 		xpt_done(ccb);
3875 		CAMLOCK_2_MPTLOCK(mpt);
3876 		return;
3877 	default:
3878 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
3879 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
3880 		mpt_tgt_dump_req_state(mpt, cmd_req);
3881 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3882 		MPTLOCK_2_CAMLOCK(mpt);
3883 		xpt_done(ccb);
3884 		CAMLOCK_2_MPTLOCK(mpt);
3885 		return;
3886 	}
3887 
3888 	if (csio->dxfer_len) {
3889 		bus_dmamap_callback_t *cb;
3890 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
3891 		request_t *req;
3892 
3893 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
3894 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
3895 
3896 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
3897 			if (mpt->outofbeer == 0) {
3898 				mpt->outofbeer = 1;
3899 				xpt_freeze_simq(mpt->sim, 1);
3900 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
3901 			}
3902 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3903 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3904 			MPTLOCK_2_CAMLOCK(mpt);
3905 			xpt_done(ccb);
3906 			CAMLOCK_2_MPTLOCK(mpt);
3907 			return;
3908 		}
3909 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
3910 		if (sizeof (bus_addr_t) > 4) {
3911 			cb = mpt_execute_req_a64;
3912 		} else {
3913 			cb = mpt_execute_req;
3914 		}
3915 
3916 		req->ccb = ccb;
3917 		ccb->ccb_h.ccb_req_ptr = req;
3918 
3919 		/*
3920 		 * Record the currently active ccb and the
3921 		 * request for it in our target state area.
3922 		 */
3923 		tgt->ccb = ccb;
3924 		tgt->req = req;
3925 
3926 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
3927 		ta = req->req_vbuf;
3928 
3929 		if (mpt->is_sas == 0) {
3930 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
3931 			     cmd_req->req_vbuf;
3932 			ta->QueueTag = ssp->InitiatorTag;
3933 		} else if (mpt->is_spi) {
3934 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
3935 			     cmd_req->req_vbuf;
3936 			ta->QueueTag = sp->Tag;
3937 		}
3938 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
3939 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3940 		ta->ReplyWord = htole32(tgt->reply_desc);
3941 		if (csio->ccb_h.target_lun > 256) {
3942 			ta->LUN[0] =
3943 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
3944 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
3945 		} else {
3946 			ta->LUN[1] = csio->ccb_h.target_lun;
3947 		}
3948 
3949 		ta->RelativeOffset = tgt->bytes_xfered;
3950 		ta->DataLength = ccb->csio.dxfer_len;
3951 		if (ta->DataLength > tgt->resid) {
3952 			ta->DataLength = tgt->resid;
3953 		}
3954 
3955 		/*
3956 		 * XXX Should be done after data transfer completes?
3957 		 */
3958 		tgt->resid -= csio->dxfer_len;
3959 		tgt->bytes_xfered += csio->dxfer_len;
3960 
3961 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
3962 			ta->TargetAssistFlags |=
3963 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
3964 		}
3965 
3966 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
3967 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
3968 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
3969 			ta->TargetAssistFlags |=
3970 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
3971 		}
3972 #endif
3973 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
3974 
3975 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3976 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
3977 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
3978 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
3979 
3980 		MPTLOCK_2_CAMLOCK(mpt);
3981 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
3982 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
3983 				int error;
3984 				int s = splsoftvm();
3985 				error = bus_dmamap_load(mpt->buffer_dmat,
3986 				    req->dmap, csio->data_ptr, csio->dxfer_len,
3987 				    cb, req, 0);
3988 				splx(s);
3989 				if (error == EINPROGRESS) {
3990 					xpt_freeze_simq(mpt->sim, 1);
3991 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3992 				}
3993 			} else {
3994 				/*
3995 				 * We have been given a pointer to single
3996 				 * physical buffer.
3997 				 */
3998 				struct bus_dma_segment seg;
3999 				seg.ds_addr = (bus_addr_t)
4000 				    (vm_offset_t)csio->data_ptr;
4001 				seg.ds_len = csio->dxfer_len;
4002 				(*cb)(req, &seg, 1, 0);
4003 			}
4004 		} else {
4005 			/*
4006 			 * We have been given a list of addresses.
4007 			 * This case could be easily supported but they are not
4008 			 * currently generated by the CAM subsystem so there
4009 			 * is no point in wasting the time right now.
4010 			 */
4011 			struct bus_dma_segment *sgs;
4012 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4013 				(*cb)(req, NULL, 0, EFAULT);
4014 			} else {
4015 				/* Just use the segments provided */
4016 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4017 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4018 			}
4019 		}
4020 		CAMLOCK_2_MPTLOCK(mpt);
4021 	} else {
4022 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4023 
4024 		/*
4025 		 * XXX: I don't know why this seems to happen, but
4026 		 * XXX: completing the CCB seems to make things happy.
4027 		 * XXX: This seems to happen if the initiator requests
4028 		 * XXX: enough data that we have to do multiple CTIOs.
4029 		 */
4030 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4031 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4032 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4033 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4034 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4035 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4036 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4037 			MPTLOCK_2_CAMLOCK(mpt);
4038 			xpt_done(ccb);
4039 			CAMLOCK_2_MPTLOCK(mpt);
4040 			return;
4041 		}
4042 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4043 			sp = sense;
4044 			memcpy(sp, &csio->sense_data,
4045 			   min(csio->sense_len, MPT_SENSE_SIZE));
4046 		}
4047 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4048 	}
4049 }
4050 
4051 /*
4052  * Abort queued up CCBs
4053  */
4054 static cam_status
4055 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4056 {
4057 	struct mpt_hdr_stailq *lp;
4058 	struct ccb_hdr *srch;
4059 	int found = 0;
4060 	union ccb *accb = ccb->cab.abort_ccb;
4061 	tgt_resource_t *trtp;
4062 
4063 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4064 
4065 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4066 		trtp = &mpt->trt_wildcard;
4067 	} else {
4068 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4069 	}
4070 
4071 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4072 		lp = &trtp->atios;
4073 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4074 		lp = &trtp->inots;
4075 	} else {
4076 		return (CAM_REQ_INVALID);
4077 	}
4078 
4079 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4080 		if (srch == &accb->ccb_h) {
4081 			found = 1;
4082 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4083 			break;
4084 		}
4085 	}
4086 	if (found) {
4087 		accb->ccb_h.status = CAM_REQ_ABORTED;
4088 		xpt_done(accb);
4089 		return (CAM_REQ_CMP);
4090 	}
4091 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4092 	return (CAM_PATH_INVALID);
4093 }
4094 
4095 /*
4096  * Ask the MPT to abort the current target command
4097  */
4098 static int
4099 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4100 {
4101 	int error;
4102 	request_t *req;
4103 	PTR_MSG_TARGET_MODE_ABORT abtp;
4104 
4105 	req = mpt_get_request(mpt, FALSE);
4106 	if (req == NULL) {
4107 		return (-1);
4108 	}
4109 	abtp = req->req_vbuf;
4110 	memset(abtp, 0, sizeof (*abtp));
4111 
4112 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4113 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4114 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4115 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4116 	error = 0;
4117 	if (mpt->is_fc || mpt->is_sas) {
4118 		mpt_send_cmd(mpt, req);
4119 	} else {
4120 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4121 	}
4122 	return (error);
4123 }
4124 
4125 /*
4126  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4127  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4128  * FC929 to set bogus FC_RSP fields (nonzero residuals
4129  * but w/o RESID fields set). This causes QLogic initiators
4130  * to think maybe that a frame was lost.
4131  *
4132  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4133  * we use allocated requests to do TARGET_ASSIST and we
4134  * need to know when to release them.
4135  */
4136 
4137 static void
4138 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4139     uint8_t status, uint8_t const *sense_data)
4140 {
4141 	uint8_t *cmd_vbuf;
4142 	mpt_tgt_state_t *tgt;
4143 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4144 	request_t *req;
4145 	bus_addr_t paddr;
4146 	int resplen = 0;
4147 
4148 	cmd_vbuf = cmd_req->req_vbuf;
4149 	cmd_vbuf += MPT_RQSL(mpt);
4150 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4151 
4152 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4153 		if (mpt->outofbeer == 0) {
4154 			mpt->outofbeer = 1;
4155 			xpt_freeze_simq(mpt->sim, 1);
4156 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4157 		}
4158 		if (ccb) {
4159 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4160 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4161 			MPTLOCK_2_CAMLOCK(mpt);
4162 			xpt_done(ccb);
4163 			CAMLOCK_2_MPTLOCK(mpt);
4164 		} else {
4165 			mpt_prt(mpt,
4166 			    "XXXX could not allocate status req- dropping\n");
4167 		}
4168 		return;
4169 	}
4170 	req->ccb = ccb;
4171 	if (ccb) {
4172 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4173 		ccb->ccb_h.ccb_req_ptr = req;
4174 	}
4175 
4176 	/*
4177 	 * Record the currently active ccb, if any, and the
4178 	 * request for it in our target state area.
4179 	 */
4180 	tgt->ccb = ccb;
4181 	tgt->req = req;
4182 	tgt->state = TGT_STATE_SENDING_STATUS;
4183 
4184 	tp = req->req_vbuf;
4185 	paddr = req->req_pbuf;
4186 	paddr += MPT_RQSL(mpt);
4187 
4188 	memset(tp, 0, sizeof (*tp));
4189 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4190 	if (mpt->is_fc) {
4191 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4192 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4193 		uint8_t *sts_vbuf;
4194 		uint32_t *rsp;
4195 
4196 		sts_vbuf = req->req_vbuf;
4197 		sts_vbuf += MPT_RQSL(mpt);
4198 		rsp = (uint32_t *) sts_vbuf;
4199 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4200 
4201 		/*
4202 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4203 		 * It has to be big-endian in memory and is organized
4204 		 * in 32 bit words, which are much easier to deal with
4205 		 * as words which are swizzled as needed.
4206 		 *
4207 		 * All we're filling here is the FC_RSP payload.
4208 		 * We may just have the chip synthesize it if
4209 		 * we have no residual and an OK status.
4210 		 *
4211 		 */
4212 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4213 
4214 		rsp[2] = status;
4215 		if (tgt->resid) {
4216 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4217 			rsp[3] = htobe32(tgt->resid);
4218 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4219 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4220 #endif
4221 		}
4222 		if (status == SCSI_STATUS_CHECK_COND) {
4223 			int i;
4224 
4225 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4226 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4227 			if (sense_data) {
4228 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4229 			} else {
4230 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4231 				    "TION but no sense data?\n");
4232 				memset(&rsp, 0, MPT_SENSE_SIZE);
4233 			}
4234 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4235 				rsp[i] = htobe32(rsp[i]);
4236 			}
4237 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4238 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4239 #endif
4240 		}
4241 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4242 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4243 #endif
4244 		rsp[2] = htobe32(rsp[2]);
4245 	} else if (mpt->is_sas) {
4246 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4247 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4248 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4249 	} else {
4250 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4251 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4252 		tp->StatusCode = status;
4253 		tp->QueueTag = htole16(sp->Tag);
4254 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4255 	}
4256 
4257 	tp->ReplyWord = htole32(tgt->reply_desc);
4258 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4259 
4260 #ifdef	WE_CAN_USE_AUTO_REPOST
4261 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4262 #endif
4263 	if (status == SCSI_STATUS_OK && resplen == 0) {
4264 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4265 	} else {
4266 		tp->StatusDataSGE.u.Address32 = (uint32_t) paddr;
4267 		tp->StatusDataSGE.FlagsLength =
4268 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4269 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4270 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4271 			MPI_SGE_FLAGS_END_OF_LIST	|
4272 			MPI_SGE_FLAGS_END_OF_BUFFER;
4273 		tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
4274 		tp->StatusDataSGE.FlagsLength |= resplen;
4275 	}
4276 
4277 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4278 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4279 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4280 	    req->serno, tgt->resid);
4281 	if (ccb) {
4282 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4283 		ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
4284 	}
4285 	mpt_send_cmd(mpt, req);
4286 }
4287 
4288 static void
4289 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4290     tgt_resource_t *trtp, int init_id)
4291 {
4292 	struct ccb_immed_notify *inot;
4293 	mpt_tgt_state_t *tgt;
4294 
4295 	tgt = MPT_TGT_STATE(mpt, req);
4296 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4297 	if (inot == NULL) {
4298 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4299 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4300 		return;
4301 	}
4302 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4303 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4304 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4305 
4306 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4307 	inot->sense_len = 0;
4308 	memset(inot->message_args, 0, sizeof (inot->message_args));
4309 	inot->initiator_id = init_id;	/* XXX */
4310 
4311 	/*
4312 	 * This is a somewhat grotesque attempt to map from task management
4313 	 * to old style SCSI messages. God help us all.
4314 	 */
4315 	switch (fc) {
4316 	case MPT_ABORT_TASK_SET:
4317 		inot->message_args[0] = MSG_ABORT_TAG;
4318 		break;
4319 	case MPT_CLEAR_TASK_SET:
4320 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4321 		break;
4322 	case MPT_TARGET_RESET:
4323 		inot->message_args[0] = MSG_TARGET_RESET;
4324 		break;
4325 	case MPT_CLEAR_ACA:
4326 		inot->message_args[0] = MSG_CLEAR_ACA;
4327 		break;
4328 	case MPT_TERMINATE_TASK:
4329 		inot->message_args[0] = MSG_ABORT_TAG;
4330 		break;
4331 	default:
4332 		inot->message_args[0] = MSG_NOOP;
4333 		break;
4334 	}
4335 	tgt->ccb = (union ccb *) inot;
4336 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4337 	MPTLOCK_2_CAMLOCK(mpt);
4338 	xpt_done((union ccb *)inot);
4339 	CAMLOCK_2_MPTLOCK(mpt);
4340 }
4341 
4342 static void
4343 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4344 {
4345 	struct ccb_accept_tio *atiop;
4346 	lun_id_t lun;
4347 	int tag_action = 0;
4348 	mpt_tgt_state_t *tgt;
4349 	tgt_resource_t *trtp = NULL;
4350 	U8 *lunptr;
4351 	U8 *vbuf;
4352 	U16 itag;
4353 	U16 ioindex;
4354 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4355 	uint8_t *cdbp;
4356 
4357 	/*
4358 	 * First, DMA sync the received command- which is in the *request*
4359 	 * phys area.
4360 	 * XXX: We could optimize this for a range
4361 	 */
4362 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
4363 	    BUS_DMASYNC_POSTREAD);
4364 
4365 	/*
4366 	 * Stash info for the current command where we can get at it later.
4367 	 */
4368 	vbuf = req->req_vbuf;
4369 	vbuf += MPT_RQSL(mpt);
4370 
4371 	/*
4372 	 * Get our state pointer set up.
4373 	 */
4374 	tgt = MPT_TGT_STATE(mpt, req);
4375 	if (tgt->state != TGT_STATE_LOADED) {
4376 		mpt_tgt_dump_req_state(mpt, req);
4377 		panic("bad target state in mpt_scsi_tgt_atio");
4378 	}
4379 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4380 	tgt->state = TGT_STATE_IN_CAM;
4381 	tgt->reply_desc = reply_desc;
4382 	ioindex = GET_IO_INDEX(reply_desc);
4383 
4384 	if (mpt->is_fc) {
4385 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4386 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4387 		if (fc->FcpCntl[2]) {
4388 			/*
4389 			 * Task Management Request
4390 			 */
4391 			switch (fc->FcpCntl[2]) {
4392 			case 0x2:
4393 				fct = MPT_ABORT_TASK_SET;
4394 				break;
4395 			case 0x4:
4396 				fct = MPT_CLEAR_TASK_SET;
4397 				break;
4398 			case 0x20:
4399 				fct = MPT_TARGET_RESET;
4400 				break;
4401 			case 0x40:
4402 				fct = MPT_CLEAR_ACA;
4403 				break;
4404 			case 0x80:
4405 				fct = MPT_TERMINATE_TASK;
4406 				break;
4407 			default:
4408 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4409 				    fc->FcpCntl[2]);
4410 				mpt_scsi_tgt_status(mpt, 0, req,
4411 				    SCSI_STATUS_OK, 0);
4412 				return;
4413 			}
4414 		} else {
4415 			switch (fc->FcpCntl[1]) {
4416 			case 0:
4417 				tag_action = MSG_SIMPLE_Q_TAG;
4418 				break;
4419 			case 1:
4420 				tag_action = MSG_HEAD_OF_Q_TAG;
4421 				break;
4422 			case 2:
4423 				tag_action = MSG_ORDERED_Q_TAG;
4424 				break;
4425 			default:
4426 				/*
4427 				 * Bah. Ignore Untagged Queing and ACA
4428 				 */
4429 				tag_action = MSG_SIMPLE_Q_TAG;
4430 				break;
4431 			}
4432 		}
4433 		tgt->resid = be32toh(fc->FcpDl);
4434 		cdbp = fc->FcpCdb;
4435 		lunptr = fc->FcpLun;
4436 		itag = be16toh(fc->OptionalOxid);
4437 	} else if (mpt->is_sas) {
4438 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4439 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4440 		cdbp = ssp->CDB;
4441 		lunptr = ssp->LogicalUnitNumber;
4442 		itag = ssp->InitiatorTag;
4443 	} else {
4444 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4445 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4446 		cdbp = sp->CDB;
4447 		lunptr = sp->LogicalUnitNumber;
4448 		itag = sp->Tag;
4449 	}
4450 
4451 	/*
4452 	 * Generate a simple lun
4453 	 */
4454 	switch (lunptr[0] & 0xc0) {
4455 	case 0x40:
4456 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4457 		break;
4458 	case 0:
4459 		lun = lunptr[1];
4460 		break;
4461 	default:
4462 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4463 		lun = 0xffff;
4464 		break;
4465 	}
4466 
4467 	/*
4468 	 * Deal with non-enabled or bad luns here.
4469 	 */
4470 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4471 	    mpt->trt[lun].enabled == 0) {
4472 		if (mpt->twildcard) {
4473 			trtp = &mpt->trt_wildcard;
4474 		} else if (fct != MPT_NIL_TMT_VALUE) {
4475 			const uint8_t sp[MPT_SENSE_SIZE] = {
4476 				0xf0, 0, 0x5, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0x25
4477 			};
4478 			mpt_scsi_tgt_status(mpt, NULL, req,
4479 			    SCSI_STATUS_CHECK_COND, sp);
4480 			return;
4481 		}
4482 	} else {
4483 		trtp = &mpt->trt[lun];
4484 	}
4485 
4486 	/*
4487 	 * Deal with any task management
4488 	 */
4489 	if (fct != MPT_NIL_TMT_VALUE) {
4490 		if (trtp == NULL) {
4491 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
4492 			    fct);
4493 			mpt_scsi_tgt_status(mpt, 0, req,
4494 			    SCSI_STATUS_OK, 0);
4495 		} else {
4496 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
4497 			    GET_INITIATOR_INDEX(reply_desc));
4498 		}
4499 		return;
4500 	}
4501 
4502 
4503 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
4504 	if (atiop == NULL) {
4505 		mpt_lprt(mpt, MPT_PRT_WARN,
4506 		    "no ATIOs for lun %u- sending back %s\n", lun,
4507 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
4508 		mpt_scsi_tgt_status(mpt, NULL, req,
4509 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
4510 		    NULL);
4511 		return;
4512 	}
4513 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
4514 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4515 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
4516 	atiop->ccb_h.ccb_mpt_ptr = mpt;
4517 	atiop->ccb_h.status = CAM_CDB_RECVD;
4518 	atiop->ccb_h.target_lun = lun;
4519 	atiop->sense_len = 0;
4520 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
4521 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
4522 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
4523 
4524 	/*
4525 	 * The tag we construct here allows us to find the
4526 	 * original request that the command came in with.
4527 	 *
4528 	 * This way we don't have to depend on anything but the
4529 	 * tag to find things when CCBs show back up from CAM.
4530 	 */
4531 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4532 	tgt->tag_id = atiop->tag_id;
4533 	if (tag_action) {
4534 		atiop->tag_action = tag_action;
4535 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
4536 	}
4537 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4538 		int i;
4539 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
4540 		    atiop->ccb_h.target_lun);
4541 		for (i = 0; i < atiop->cdb_len; i++) {
4542 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
4543 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
4544 		}
4545 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
4546 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
4547 	}
4548 
4549 	MPTLOCK_2_CAMLOCK(mpt);
4550 	xpt_done((union ccb *)atiop);
4551 	CAMLOCK_2_MPTLOCK(mpt);
4552 }
4553 
4554 static void
4555 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
4556 {
4557 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4558 
4559 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
4560 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
4561 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
4562 	    tgt->tag_id, tgt->state);
4563 }
4564 
4565 static void
4566 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
4567 {
4568 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
4569 	    req->index, req->index, req->state);
4570 	mpt_tgt_dump_tgt_state(mpt, req);
4571 }
4572 
4573 static int
4574 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
4575     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4576 {
4577 	int dbg;
4578 	union ccb *ccb;
4579 	U16 status;
4580 
4581 	if (reply_frame == NULL) {
4582 		/*
4583 		 * Figure out what the state of the command is.
4584 		 */
4585 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4586 
4587 #ifdef	INVARIANTS
4588 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
4589 		if (tgt->req) {
4590 			mpt_req_not_spcl(mpt, tgt->req,
4591 			    "turbo scsi_tgt_reply associated req", __LINE__);
4592 		}
4593 #endif
4594 		switch(tgt->state) {
4595 		case TGT_STATE_LOADED:
4596 			/*
4597 			 * This is a new command starting.
4598 			 */
4599 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
4600 			break;
4601 		case TGT_STATE_MOVING_DATA:
4602 		{
4603 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4604 
4605 			ccb = tgt->ccb;
4606 			if (tgt->req == NULL) {
4607 				panic("mpt: turbo target reply with null "
4608 				    "associated request moving data");
4609 				/* NOTREACHED */
4610 			}
4611 			if (ccb == NULL) {
4612 				panic("mpt: turbo target reply with null "
4613 				    "associated ccb moving data");
4614 				/* NOTREACHED */
4615 			}
4616 			tgt->ccb = NULL;
4617 			tgt->nxfers++;
4618 			untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
4619 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4620 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
4621 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
4622 			/*
4623 			 * Free the Target Assist Request
4624 			 */
4625 			KASSERT(tgt->req->ccb == ccb,
4626 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
4627 			    tgt->req->serno, tgt->req->ccb));
4628 			TAILQ_REMOVE(&mpt->request_pending_list,
4629 			    tgt->req, links);
4630 			mpt_free_request(mpt, tgt->req);
4631 			tgt->req = NULL;
4632 
4633 			/*
4634 			 * Do we need to send status now? That is, are
4635 			 * we done with all our data transfers?
4636 			 */
4637 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4638 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4639 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4640 				KASSERT(ccb->ccb_h.status,
4641 				    ("zero ccb sts at %d\n", __LINE__));
4642 				tgt->state = TGT_STATE_IN_CAM;
4643 				if (mpt->outofbeer) {
4644 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4645 					mpt->outofbeer = 0;
4646 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4647 				}
4648 				MPTLOCK_2_CAMLOCK(mpt);
4649 				xpt_done(ccb);
4650 				CAMLOCK_2_MPTLOCK(mpt);
4651 				break;
4652 			}
4653 			/*
4654 			 * Otherwise, send status (and sense)
4655 			 */
4656 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4657 				sp = sense;
4658 				memcpy(sp, &ccb->csio.sense_data,
4659 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
4660 			}
4661 			mpt_scsi_tgt_status(mpt, ccb, req,
4662 			    ccb->csio.scsi_status, sp);
4663 			break;
4664 		}
4665 		case TGT_STATE_SENDING_STATUS:
4666 		case TGT_STATE_MOVING_DATA_AND_STATUS:
4667 		{
4668 			int ioindex;
4669 			ccb = tgt->ccb;
4670 
4671 			if (tgt->req == NULL) {
4672 				panic("mpt: turbo target reply with null "
4673 				    "associated request sending status");
4674 				/* NOTREACHED */
4675 			}
4676 
4677 			if (ccb) {
4678 				tgt->ccb = NULL;
4679 				if (tgt->state ==
4680 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
4681 					tgt->nxfers++;
4682 				}
4683 				untimeout(mpt_timeout, ccb,
4684 				    ccb->ccb_h.timeout_ch);
4685 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4686 					ccb->ccb_h.status |= CAM_SENT_SENSE;
4687 				}
4688 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4689 				    "TARGET_STATUS tag %x sts %x flgs %x req "
4690 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
4691 				    ccb->ccb_h.flags, tgt->req);
4692 				/*
4693 				 * Free the Target Send Status Request
4694 				 */
4695 				KASSERT(tgt->req->ccb == ccb,
4696 				    ("tgt->req %p:%u tgt->req->ccb %p",
4697 				    tgt->req, tgt->req->serno, tgt->req->ccb));
4698 				/*
4699 				 * Notify CAM that we're done
4700 				 */
4701 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4702 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4703 				KASSERT(ccb->ccb_h.status,
4704 				    ("ZERO ccb sts at %d\n", __LINE__));
4705 				tgt->ccb = NULL;
4706 			} else {
4707 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4708 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
4709 				    tgt->req, tgt->req->serno);
4710 			}
4711 			TAILQ_REMOVE(&mpt->request_pending_list,
4712 			    tgt->req, links);
4713 			mpt_free_request(mpt, tgt->req);
4714 			tgt->req = NULL;
4715 
4716 			/*
4717 			 * And re-post the Command Buffer.
4718 			 * This wil reset the state.
4719 			 */
4720 			ioindex = GET_IO_INDEX(reply_desc);
4721 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4722 			mpt_post_target_command(mpt, req, ioindex);
4723 
4724 			/*
4725 			 * And post a done for anyone who cares
4726 			 */
4727 			if (ccb) {
4728 				if (mpt->outofbeer) {
4729 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4730 					mpt->outofbeer = 0;
4731 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4732 				}
4733 				MPTLOCK_2_CAMLOCK(mpt);
4734 				xpt_done(ccb);
4735 				CAMLOCK_2_MPTLOCK(mpt);
4736 			}
4737 			break;
4738 		}
4739 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
4740 			tgt->state = TGT_STATE_LOADED;
4741 			break;
4742 		default:
4743 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
4744 			    "Reply Function\n", tgt->state);
4745 		}
4746 		return (TRUE);
4747 	}
4748 
4749 	status = le16toh(reply_frame->IOCStatus);
4750 	if (status != MPI_IOCSTATUS_SUCCESS) {
4751 		dbg = MPT_PRT_ERROR;
4752 	} else {
4753 		dbg = MPT_PRT_DEBUG1;
4754 	}
4755 
4756 	mpt_lprt(mpt, dbg,
4757 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
4758 	     req, req->serno, reply_frame, reply_frame->Function, status);
4759 
4760 	switch (reply_frame->Function) {
4761 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
4762 	{
4763 		mpt_tgt_state_t *tgt;
4764 #ifdef	INVARIANTS
4765 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
4766 #endif
4767 		if (status != MPI_IOCSTATUS_SUCCESS) {
4768 			/*
4769 			 * XXX What to do?
4770 			 */
4771 			break;
4772 		}
4773 		tgt = MPT_TGT_STATE(mpt, req);
4774 		KASSERT(tgt->state == TGT_STATE_LOADING,
4775 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
4776 		mpt_assign_serno(mpt, req);
4777 		tgt->state = TGT_STATE_LOADED;
4778 		break;
4779 	}
4780 	case MPI_FUNCTION_TARGET_ASSIST:
4781 #ifdef	INVARIANTS
4782 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
4783 #endif
4784 		mpt_prt(mpt, "target assist completion\n");
4785 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4786 		mpt_free_request(mpt, req);
4787 		break;
4788 	case MPI_FUNCTION_TARGET_STATUS_SEND:
4789 #ifdef	INVARIANTS
4790 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
4791 #endif
4792 		mpt_prt(mpt, "status send completion\n");
4793 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4794 		mpt_free_request(mpt, req);
4795 		break;
4796 	case MPI_FUNCTION_TARGET_MODE_ABORT:
4797 	{
4798 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
4799 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
4800 		PTR_MSG_TARGET_MODE_ABORT abtp =
4801 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
4802 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
4803 #ifdef	INVARIANTS
4804 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
4805 #endif
4806 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
4807 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
4808 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4809 		mpt_free_request(mpt, req);
4810 		break;
4811 	}
4812 	default:
4813 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
4814 		    "0x%x\n", reply_frame->Function);
4815 		break;
4816 	}
4817 	return (TRUE);
4818 }
4819