xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision a743df5c964d81a7c920cf257e87cb42ab993d58)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 
108 #include <sys/callout.h>
109 #include <sys/kthread.h>
110 
111 static void mpt_poll(struct cam_sim *);
112 static timeout_t mpt_timeout;
113 static void mpt_action(struct cam_sim *, union ccb *);
114 static int
115 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
116 static void mpt_setwidth(struct mpt_softc *, int, int);
117 static void mpt_setsync(struct mpt_softc *, int, int, int);
118 static int mpt_update_spi_config(struct mpt_softc *, int);
119 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
120 
121 static mpt_reply_handler_t mpt_scsi_reply_handler;
122 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
123 static mpt_reply_handler_t mpt_fc_els_reply_handler;
124 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
125 					MSG_DEFAULT_REPLY *);
126 static int mpt_bus_reset(struct mpt_softc *, int);
127 static int mpt_fc_reset_link(struct mpt_softc *, int);
128 
129 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
130 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
131 static void mpt_recovery_thread(void *arg);
132 static void mpt_recover_commands(struct mpt_softc *mpt);
133 
134 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
135     u_int, u_int, u_int, int);
136 
137 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
138 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
139 static int mpt_add_els_buffers(struct mpt_softc *mpt);
140 static int mpt_add_target_commands(struct mpt_softc *mpt);
141 static void mpt_free_els_buffers(struct mpt_softc *mpt);
142 static void mpt_free_target_commands(struct mpt_softc *mpt);
143 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
145 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
146 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
147 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
148 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
149     uint8_t, uint8_t const *);
150 static void
151 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
152     tgt_resource_t *, int);
153 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
154 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
155 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
156 
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 
161 static mpt_probe_handler_t	mpt_cam_probe;
162 static mpt_attach_handler_t	mpt_cam_attach;
163 static mpt_enable_handler_t	mpt_cam_enable;
164 static mpt_event_handler_t	mpt_cam_event;
165 static mpt_reset_handler_t	mpt_cam_ioc_reset;
166 static mpt_detach_handler_t	mpt_cam_detach;
167 
168 static struct mpt_personality mpt_cam_personality =
169 {
170 	.name		= "mpt_cam",
171 	.probe		= mpt_cam_probe,
172 	.attach		= mpt_cam_attach,
173 	.enable		= mpt_cam_enable,
174 	.event		= mpt_cam_event,
175 	.reset		= mpt_cam_ioc_reset,
176 	.detach		= mpt_cam_detach,
177 };
178 
179 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
180 
181 int
182 mpt_cam_probe(struct mpt_softc *mpt)
183 {
184 	/*
185 	 * Only attach to nodes that support the initiator or target
186 	 * role or have RAID physical devices that need CAM pass-thru support.
187 	 */
188 	if ((mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_INITIATOR) != 0
189 	 || (mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_TARGET) != 0
190 	 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
191 		return (0);
192 	}
193 	return (ENODEV);
194 }
195 
196 int
197 mpt_cam_attach(struct mpt_softc *mpt)
198 {
199 	struct cam_devq *devq;
200 	mpt_handler_t	 handler;
201 	int		 maxq;
202 	int		 error;
203 
204 	TAILQ_INIT(&mpt->request_timeout_list);
205 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
206 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
207 
208 	handler.reply_handler = mpt_scsi_reply_handler;
209 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
210 				     &scsi_io_handler_id);
211 	if (error != 0) {
212 		goto cleanup0;
213 	}
214 
215 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
216 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
217 				     &scsi_tmf_handler_id);
218 	if (error != 0) {
219 		goto cleanup0;
220 	}
221 
222 	/*
223 	 * If we're fibre channel and could support target mode, we register
224 	 * an ELS reply handler and give it resources.
225 	 */
226 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
227 		handler.reply_handler = mpt_fc_els_reply_handler;
228 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
229 		    &fc_els_handler_id);
230 		if (error != 0) {
231 			goto cleanup0;
232 		}
233 		if (mpt_add_els_buffers(mpt) == FALSE) {
234 			error = ENOMEM;
235 			goto cleanup0;
236 		}
237 		maxq -= mpt->els_cmds_allocated;
238 	}
239 
240 	/*
241 	 * If we support target mode, we register a reply handler for it,
242 	 * but don't add resources until we actually enable target mode.
243 	 */
244 	if ((mpt->role & MPT_ROLE_TARGET) != 0) {
245 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
246 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
247 		    &mpt->scsi_tgt_handler_id);
248 		if (error != 0) {
249 			goto cleanup0;
250 		}
251 	}
252 
253 	/*
254 	 * We keep one request reserved for timeout TMF requests.
255 	 */
256 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
257 	if (mpt->tmf_req == NULL) {
258 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
259 		error = ENOMEM;
260 		goto cleanup0;
261 	}
262 
263 	/*
264 	 * Mark the request as free even though not on the free list.
265 	 * There is only one TMF request allowed to be outstanding at
266 	 * a time and the TMF routines perform their own allocation
267 	 * tracking using the standard state flags.
268 	 */
269 	mpt->tmf_req->state = REQ_STATE_FREE;
270 	maxq--;
271 
272 	if (mpt_spawn_recovery_thread(mpt) != 0) {
273 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
274 		error = ENOMEM;
275 		goto cleanup0;
276 	}
277 
278 	/*
279 	 * The rest of this is CAM foo, for which we need to drop our lock
280 	 */
281 	MPTLOCK_2_CAMLOCK(mpt);
282 
283 	/*
284 	 * Create the device queue for our SIM(s).
285 	 */
286 	devq = cam_simq_alloc(maxq);
287 	if (devq == NULL) {
288 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
289 		error = ENOMEM;
290 		goto cleanup;
291 	}
292 
293 	/*
294 	 * Construct our SIM entry.
295 	 */
296 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
297 	    mpt->unit, 1, maxq, devq);
298 	if (mpt->sim == NULL) {
299 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
300 		cam_simq_free(devq);
301 		error = ENOMEM;
302 		goto cleanup;
303 	}
304 
305 	/*
306 	 * Register exactly this bus.
307 	 */
308 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
309 		mpt_prt(mpt, "Bus registration Failed!\n");
310 		error = ENOMEM;
311 		goto cleanup;
312 	}
313 
314 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
315 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
316 		mpt_prt(mpt, "Unable to allocate Path!\n");
317 		error = ENOMEM;
318 		goto cleanup;
319 	}
320 
321 	/*
322 	 * Only register a second bus for RAID physical
323 	 * devices if the controller supports RAID.
324 	 */
325 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
326 		CAMLOCK_2_MPTLOCK(mpt);
327 		return (0);
328 	}
329 
330 	/*
331 	 * Create a "bus" to export all hidden disks to CAM.
332 	 */
333 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
334 	    mpt->unit, 1, maxq, devq);
335 	if (mpt->phydisk_sim == NULL) {
336 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
337 		error = ENOMEM;
338 		goto cleanup;
339 	}
340 
341 	/*
342 	 * Register this bus.
343 	 */
344 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
345 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
346 		error = ENOMEM;
347 		goto cleanup;
348 	}
349 
350 	if (xpt_create_path(&mpt->phydisk_path, NULL,
351 	    cam_sim_path(mpt->phydisk_sim),
352 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
353 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
354 		error = ENOMEM;
355 		goto cleanup;
356 	}
357 	CAMLOCK_2_MPTLOCK(mpt);
358 	return (0);
359 
360 cleanup:
361 	CAMLOCK_2_MPTLOCK(mpt);
362 cleanup0:
363 	mpt_cam_detach(mpt);
364 	return (error);
365 }
366 
367 /*
368  * Read FC configuration information
369  */
370 static int
371 mpt_read_config_info_fc(struct mpt_softc *mpt)
372 {
373 	char *topology = NULL;
374 	int rv, speed = 0;
375 
376 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
377 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
378 	if (rv) {
379 		return (-1);
380 	}
381 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
382 		 mpt->mpt_fcport_page0.Header.PageVersion,
383 		 mpt->mpt_fcport_page0.Header.PageLength,
384 		 mpt->mpt_fcport_page0.Header.PageNumber,
385 		 mpt->mpt_fcport_page0.Header.PageType);
386 
387 
388 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
389 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
390 	if (rv) {
391 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
392 		return (-1);
393 	}
394 
395 	speed = mpt->mpt_fcport_page0.CurrentSpeed;
396 
397 	switch (mpt->mpt_fcport_page0.Flags &
398 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
399 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
400 		speed = 0;
401 		topology = "<NO LOOP>";
402 		break;
403 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
404 		topology = "N-Port";
405 		break;
406 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
407 		topology = "NL-Port";
408 		break;
409 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
410 		topology = "F-Port";
411 		break;
412 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
413 		topology = "FL-Port";
414 		break;
415 	default:
416 		speed = 0;
417 		topology = "?";
418 		break;
419 	}
420 
421 	mpt_lprt(mpt, MPT_PRT_INFO,
422 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
423 	    "Speed %u-Gbit\n", topology,
424 	    mpt->mpt_fcport_page0.WWNN.High,
425 	    mpt->mpt_fcport_page0.WWNN.Low,
426 	    mpt->mpt_fcport_page0.WWPN.High,
427 	    mpt->mpt_fcport_page0.WWPN.Low,
428 	    speed);
429 
430 	return (0);
431 }
432 
433 /*
434  * Set FC configuration information.
435  */
436 static int
437 mpt_set_initial_config_fc(struct mpt_softc *mpt)
438 {
439 #if	0
440 	CONFIG_PAGE_FC_PORT_1 fc;
441 	U32 fl;
442 	int r, doit = 0;
443 
444 	if ((mpt->role & MPT_ROLE_TARGET) == 0) {
445 		return (0);
446 	}
447 
448 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
449 	    &fc.Header, FALSE, 5000);
450 	if (r) {
451 		return (mpt_fc_reset_link(mpt, 1));
452 	}
453 
454 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 0,
455 	    &fc.Header, sizeof (fc), FALSE, 5000);
456 	if (r) {
457 		return (mpt_fc_reset_link(mpt, 1));
458 	}
459 
460 	fl = le32toh(fc.Flags);
461 	if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
462 		fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
463 		doit = 1;
464 	}
465 	if (doit) {
466 		const char *cc;
467 
468 		mpt_lprt(mpt, MPT_PRT_INFO,
469 		    "FC Port Page 1: New Flags %x \n", fl);
470 		fc.Flags = htole32(fl);
471 		r = mpt_write_cfg_page(mpt,
472 		    MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, 0, &fc.Header,
473 		    sizeof(fc), FALSE, 5000);
474 		if (r != 0) {
475 			cc = "FC PORT PAGE1 UPDATE: FAILED\n";
476 		} else {
477 			cc = "FC PORT PAGE1 UPDATED: SYSTEM NEEDS RESET\n";
478 		}
479 		mpt_prt(mpt, cc);
480 	}
481 #endif
482 	return (0);
483 }
484 
485 /*
486  * Read SAS configuration information. Nothing to do yet.
487  */
488 static int
489 mpt_read_config_info_sas(struct mpt_softc *mpt)
490 {
491 	return (0);
492 }
493 
494 /*
495  * Set SAS configuration information. Nothing to do yet.
496  */
497 static int
498 mpt_set_initial_config_sas(struct mpt_softc *mpt)
499 {
500 	return (0);
501 }
502 
503 /*
504  * Read SCSI configuration information
505  */
506 static int
507 mpt_read_config_info_spi(struct mpt_softc *mpt)
508 {
509 	int rv, i;
510 
511 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
512 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
513 	if (rv) {
514 		return (-1);
515 	}
516 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
517 	    mpt->mpt_port_page0.Header.PageVersion,
518 	    mpt->mpt_port_page0.Header.PageLength,
519 	    mpt->mpt_port_page0.Header.PageNumber,
520 	    mpt->mpt_port_page0.Header.PageType);
521 
522 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
523 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
524 	if (rv) {
525 		return (-1);
526 	}
527 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
528 	    mpt->mpt_port_page1.Header.PageVersion,
529 	    mpt->mpt_port_page1.Header.PageLength,
530 	    mpt->mpt_port_page1.Header.PageNumber,
531 	    mpt->mpt_port_page1.Header.PageType);
532 
533 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
534 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
535 	if (rv) {
536 		return (-1);
537 	}
538 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
539 	    mpt->mpt_port_page2.Header.PageVersion,
540 	    mpt->mpt_port_page2.Header.PageLength,
541 	    mpt->mpt_port_page2.Header.PageNumber,
542 	    mpt->mpt_port_page2.Header.PageType);
543 
544 	for (i = 0; i < 16; i++) {
545 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
546 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
547 		if (rv) {
548 			return (-1);
549 		}
550 		mpt_lprt(mpt, MPT_PRT_DEBUG,
551 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
552 		    mpt->mpt_dev_page0[i].Header.PageVersion,
553 		    mpt->mpt_dev_page0[i].Header.PageLength,
554 		    mpt->mpt_dev_page0[i].Header.PageNumber,
555 		    mpt->mpt_dev_page0[i].Header.PageType);
556 
557 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
558 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
559 		if (rv) {
560 			return (-1);
561 		}
562 		mpt_lprt(mpt, MPT_PRT_DEBUG,
563 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
564 		    mpt->mpt_dev_page1[i].Header.PageVersion,
565 		    mpt->mpt_dev_page1[i].Header.PageLength,
566 		    mpt->mpt_dev_page1[i].Header.PageNumber,
567 		    mpt->mpt_dev_page1[i].Header.PageType);
568 	}
569 
570 	/*
571 	 * At this point, we don't *have* to fail. As long as we have
572 	 * valid config header information, we can (barely) lurch
573 	 * along.
574 	 */
575 
576 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
577 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
578 	if (rv) {
579 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
580 	} else {
581 		mpt_lprt(mpt, MPT_PRT_DEBUG,
582 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
583 		    mpt->mpt_port_page0.Capabilities,
584 		    mpt->mpt_port_page0.PhysicalInterface);
585 	}
586 
587 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
588 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
589 	if (rv) {
590 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
591 	} else {
592 		mpt_lprt(mpt, MPT_PRT_DEBUG,
593 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
594 		    mpt->mpt_port_page1.Configuration,
595 		    mpt->mpt_port_page1.OnBusTimerValue);
596 	}
597 
598 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
599 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
600 	if (rv) {
601 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
602 	} else {
603 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
604 		    "Port Page 2: Flags %x Settings %x\n",
605 		    mpt->mpt_port_page2.PortFlags,
606 		    mpt->mpt_port_page2.PortSettings);
607 		for (i = 0; i < 16; i++) {
608 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
609 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
610 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
611 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
612 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
613 		}
614 	}
615 
616 	for (i = 0; i < 16; i++) {
617 		rv = mpt_read_cur_cfg_page(mpt, i,
618 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
619 		    FALSE, 5000);
620 		if (rv) {
621 			mpt_prt(mpt,
622 			    "cannot read SPI Target %d Device Page 0\n", i);
623 			continue;
624 		}
625 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
626 		    "target %d page 0: Negotiated Params %x Information %x\n",
627 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
628 		    mpt->mpt_dev_page0[i].Information);
629 
630 		rv = mpt_read_cur_cfg_page(mpt, i,
631 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
632 		    FALSE, 5000);
633 		if (rv) {
634 			mpt_prt(mpt,
635 			    "cannot read SPI Target %d Device Page 1\n", i);
636 			continue;
637 		}
638 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
639 		    "target %d page 1: Requested Params %x Configuration %x\n",
640 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
641 		    mpt->mpt_dev_page1[i].Configuration);
642 	}
643 	return (0);
644 }
645 
646 /*
647  * Validate SPI configuration information.
648  *
649  * In particular, validate SPI Port Page 1.
650  */
651 static int
652 mpt_set_initial_config_spi(struct mpt_softc *mpt)
653 {
654 	int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
655 	int error;
656 
657 	mpt->mpt_disc_enable = 0xff;
658 	mpt->mpt_tag_enable = 0;
659 
660 	if (mpt->mpt_port_page1.Configuration != pp1val) {
661 		CONFIG_PAGE_SCSI_PORT_1 tmp;
662 
663 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
664 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
665 		tmp = mpt->mpt_port_page1;
666 		tmp.Configuration = pp1val;
667 		error = mpt_write_cur_cfg_page(mpt, 0,
668 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
669 		if (error) {
670 			return (-1);
671 		}
672 		error = mpt_read_cur_cfg_page(mpt, 0,
673 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
674 		if (error) {
675 			return (-1);
676 		}
677 		if (tmp.Configuration != pp1val) {
678 			mpt_prt(mpt,
679 			    "failed to reset SPI Port Page 1 Config value\n");
680 			return (-1);
681 		}
682 		mpt->mpt_port_page1 = tmp;
683 	}
684 
685 	/*
686 	 * The purpose of this exercise is to get
687 	 * all targets back to async/narrow.
688 	 *
689 	 * We skip this step if the BIOS has already negotiated
690 	 * speeds with the targets and does not require us to
691 	 * do Domain Validation.
692 	 */
693 	i = mpt->mpt_port_page2.PortSettings &
694 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
695 	j = mpt->mpt_port_page2.PortFlags &
696 	    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
697 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* &&
698 	    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) {
699 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
700 		    "honoring BIOS transfer negotiations\n");
701 	} else {
702 		for (i = 0; i < 16; i++) {
703 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
704 			mpt->mpt_dev_page1[i].Configuration = 0;
705 			(void) mpt_update_spi_config(mpt, i);
706 		}
707 	}
708 	return (0);
709 }
710 
711 int
712 mpt_cam_enable(struct mpt_softc *mpt)
713 {
714 	if (mpt->is_fc) {
715 		if (mpt_read_config_info_fc(mpt)) {
716 			return (EIO);
717 		}
718 		if (mpt_set_initial_config_fc(mpt)) {
719 			return (EIO);
720 		}
721 	} else if (mpt->is_sas) {
722 		if (mpt_read_config_info_sas(mpt)) {
723 			return (EIO);
724 		}
725 		if (mpt_set_initial_config_sas(mpt)) {
726 			return (EIO);
727 		}
728 	} else if (mpt->is_spi) {
729 		if (mpt_read_config_info_spi(mpt)) {
730 			return (EIO);
731 		}
732 		if (mpt_set_initial_config_spi(mpt)) {
733 			return (EIO);
734 		}
735 	}
736 	return (0);
737 }
738 
739 void
740 mpt_cam_detach(struct mpt_softc *mpt)
741 {
742 	mpt_handler_t handler;
743 
744 	mpt_terminate_recovery_thread(mpt);
745 
746 	handler.reply_handler = mpt_scsi_reply_handler;
747 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
748 			       scsi_io_handler_id);
749 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
750 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
751 			       scsi_tmf_handler_id);
752 	handler.reply_handler = mpt_fc_els_reply_handler;
753 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
754 			       fc_els_handler_id);
755 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
756 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
757 			       mpt->scsi_tgt_handler_id);
758 
759 	if (mpt->tmf_req != NULL) {
760 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
761 		mpt_free_request(mpt, mpt->tmf_req);
762 		mpt->tmf_req = NULL;
763 	}
764 
765 	if (mpt->sim != NULL) {
766 		MPTLOCK_2_CAMLOCK(mpt);
767 		xpt_free_path(mpt->path);
768 		xpt_bus_deregister(cam_sim_path(mpt->sim));
769 		cam_sim_free(mpt->sim, TRUE);
770 		mpt->sim = NULL;
771 		CAMLOCK_2_MPTLOCK(mpt);
772 	}
773 
774 	if (mpt->phydisk_sim != NULL) {
775 		MPTLOCK_2_CAMLOCK(mpt);
776 		xpt_free_path(mpt->phydisk_path);
777 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
778 		cam_sim_free(mpt->phydisk_sim, TRUE);
779 		mpt->phydisk_sim = NULL;
780 		CAMLOCK_2_MPTLOCK(mpt);
781 	}
782 }
783 
784 /* This routine is used after a system crash to dump core onto the swap device.
785  */
786 static void
787 mpt_poll(struct cam_sim *sim)
788 {
789 	struct mpt_softc *mpt;
790 
791 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
792 	MPT_LOCK(mpt);
793 	mpt_intr(mpt);
794 	MPT_UNLOCK(mpt);
795 }
796 
797 /*
798  * Watchdog timeout routine for SCSI requests.
799  */
800 static void
801 mpt_timeout(void *arg)
802 {
803 	union ccb	 *ccb;
804 	struct mpt_softc *mpt;
805 	request_t	 *req;
806 
807 	ccb = (union ccb *)arg;
808 	mpt = ccb->ccb_h.ccb_mpt_ptr;
809 
810 	MPT_LOCK(mpt);
811 	req = ccb->ccb_h.ccb_req_ptr;
812 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
813 	    req->serno, ccb, req->ccb);
814 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
815 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
816 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
817 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
818 		req->state |= REQ_STATE_TIMEDOUT;
819 		mpt_wakeup_recovery_thread(mpt);
820 	}
821 	MPT_UNLOCK(mpt);
822 }
823 
824 /*
825  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
826  *
827  * Takes a list of physical segments and builds the SGL for SCSI IO command
828  * and forwards the commard to the IOC after one last check that CAM has not
829  * aborted the transaction.
830  */
831 static void
832 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
833 {
834 	request_t *req, *trq;
835 	char *mpt_off;
836 	union ccb *ccb;
837 	struct mpt_softc *mpt;
838 	int seg, first_lim;
839 	uint32_t flags, nxt_off;
840 	void *sglp = NULL;
841 	MSG_REQUEST_HEADER *hdrp;
842 	SGE_SIMPLE64 *se;
843 	SGE_CHAIN64 *ce;
844 	int istgt = 0;
845 
846 	req = (request_t *)arg;
847 	ccb = req->ccb;
848 
849 	mpt = ccb->ccb_h.ccb_mpt_ptr;
850 	req = ccb->ccb_h.ccb_req_ptr;
851 
852 	hdrp = req->req_vbuf;
853 	mpt_off = req->req_vbuf;
854 
855 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
856 		error = EFBIG;
857 	}
858 
859 	if (error == 0) {
860 		switch (hdrp->Function) {
861 		case MPI_FUNCTION_SCSI_IO_REQUEST:
862 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
863 			istgt = 0;
864 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
865 			break;
866 		case MPI_FUNCTION_TARGET_ASSIST:
867 			istgt = 1;
868 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
869 			break;
870 		default:
871 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
872 			    hdrp->Function);
873 			error = EINVAL;
874 			break;
875 		}
876 	}
877 
878 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
879 		error = EFBIG;
880 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
881 		    nseg, mpt->max_seg_cnt);
882 	}
883 
884 bad:
885 	if (error != 0) {
886 		if (error != EFBIG && error != ENOMEM) {
887 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
888 		}
889 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
890 			cam_status status;
891 			mpt_freeze_ccb(ccb);
892 			if (error == EFBIG) {
893 				status = CAM_REQ_TOO_BIG;
894 			} else if (error == ENOMEM) {
895 				if (mpt->outofbeer == 0) {
896 					mpt->outofbeer = 1;
897 					xpt_freeze_simq(mpt->sim, 1);
898 					mpt_lprt(mpt, MPT_PRT_DEBUG,
899 					    "FREEZEQ\n");
900 				}
901 				status = CAM_REQUEUE_REQ;
902 			} else {
903 				status = CAM_REQ_CMP_ERR;
904 			}
905 			mpt_set_ccb_status(ccb, status);
906 		}
907 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
908 			request_t *cmd_req =
909 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
910 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
911 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
912 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
913 		}
914 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
915 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
916 		xpt_done(ccb);
917 		CAMLOCK_2_MPTLOCK(mpt);
918 		mpt_free_request(mpt, req);
919 		MPTLOCK_2_CAMLOCK(mpt);
920 		return;
921 	}
922 
923 	/*
924 	 * No data to transfer?
925 	 * Just make a single simple SGL with zero length.
926 	 */
927 
928 	if (mpt->verbose >= MPT_PRT_DEBUG) {
929 		int tidx = ((char *)sglp) - mpt_off;
930 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
931 	}
932 
933 	if (nseg == 0) {
934 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
935 		MPI_pSGE_SET_FLAGS(se1,
936 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
937 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
938 		goto out;
939 	}
940 
941 
942 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
943 	if (istgt == 0) {
944 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
945 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
946 		}
947 	} else {
948 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
949 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
950 		}
951 	}
952 
953 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
954 		bus_dmasync_op_t op;
955 		if (istgt == 0) {
956 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
957 				op = BUS_DMASYNC_PREREAD;
958 			} else {
959 				op = BUS_DMASYNC_PREWRITE;
960 			}
961 		} else {
962 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
963 				op = BUS_DMASYNC_PREWRITE;
964 			} else {
965 				op = BUS_DMASYNC_PREREAD;
966 			}
967 		}
968 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
969 	}
970 
971 	/*
972 	 * Okay, fill in what we can at the end of the command frame.
973 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
974 	 * the command frame.
975 	 *
976 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
977 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
978 	 * that.
979 	 */
980 
981 	if (nseg < MPT_NSGL_FIRST(mpt)) {
982 		first_lim = nseg;
983 	} else {
984 		/*
985 		 * Leave room for CHAIN element
986 		 */
987 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
988 	}
989 
990 	se = (SGE_SIMPLE64 *) sglp;
991 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
992 		uint32_t tf;
993 
994 		memset(se, 0, sizeof (*se));
995 		se->Address.Low = dm_segs->ds_addr;
996 		if (sizeof(bus_addr_t) > 4) {
997 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
998 		}
999 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1000 		tf = flags;
1001 		if (seg == first_lim - 1) {
1002 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1003 		}
1004 		if (seg == nseg - 1) {
1005 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1006 				MPI_SGE_FLAGS_END_OF_BUFFER;
1007 		}
1008 		MPI_pSGE_SET_FLAGS(se, tf);
1009 	}
1010 
1011 	if (seg == nseg) {
1012 		goto out;
1013 	}
1014 
1015 	/*
1016 	 * Tell the IOC where to find the first chain element.
1017 	 */
1018 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1019 	nxt_off = MPT_RQSL(mpt);
1020 	trq = req;
1021 
1022 	/*
1023 	 * Make up the rest of the data segments out of a chain element
1024 	 * (contiained in the current request frame) which points to
1025 	 * SIMPLE64 elements in the next request frame, possibly ending
1026 	 * with *another* chain element (if there's more).
1027 	 */
1028 	while (seg < nseg) {
1029 		int this_seg_lim;
1030 		uint32_t tf, cur_off;
1031 		bus_addr_t chain_list_addr;
1032 
1033 		/*
1034 		 * Point to the chain descriptor. Note that the chain
1035 		 * descriptor is at the end of the *previous* list (whether
1036 		 * chain or simple).
1037 		 */
1038 		ce = (SGE_CHAIN64 *) se;
1039 
1040 		/*
1041 		 * Before we change our current pointer, make  sure we won't
1042 		 * overflow the request area with this frame. Note that we
1043 		 * test against 'greater than' here as it's okay in this case
1044 		 * to have next offset be just outside the request area.
1045 		 */
1046 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1047 			nxt_off = MPT_REQUEST_AREA;
1048 			goto next_chain;
1049 		}
1050 
1051 		/*
1052 		 * Set our SGE element pointer to the beginning of the chain
1053 		 * list and update our next chain list offset.
1054 		 */
1055 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1056 		cur_off = nxt_off;
1057 		nxt_off += MPT_RQSL(mpt);
1058 
1059 		/*
1060 		 * Now initialized the chain descriptor.
1061 		 */
1062 		memset(ce, 0, sizeof (*ce));
1063 
1064 		/*
1065 		 * Get the physical address of the chain list.
1066 		 */
1067 		chain_list_addr = trq->req_pbuf;
1068 		chain_list_addr += cur_off;
1069 		if (sizeof (bus_addr_t) > 4) {
1070 			ce->Address.High =
1071 			    (uint32_t) ((uint64_t)chain_list_addr >> 32);
1072 		}
1073 		ce->Address.Low = (uint32_t) chain_list_addr;
1074 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1075 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1076 
1077 		/*
1078 		 * If we have more than a frame's worth of segments left,
1079 		 * set up the chain list to have the last element be another
1080 		 * chain descriptor.
1081 		 */
1082 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1083 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1084 			/*
1085 			 * The length of the chain is the length in bytes of the
1086 			 * number of segments plus the next chain element.
1087 			 *
1088 			 * The next chain descriptor offset is the length,
1089 			 * in words, of the number of segments.
1090 			 */
1091 			ce->Length = (this_seg_lim - seg) *
1092 			    sizeof (SGE_SIMPLE64);
1093 			ce->NextChainOffset = ce->Length >> 2;
1094 			ce->Length += sizeof (SGE_CHAIN64);
1095 		} else {
1096 			this_seg_lim = nseg;
1097 			ce->Length = (this_seg_lim - seg) *
1098 			    sizeof (SGE_SIMPLE64);
1099 		}
1100 
1101 		/*
1102 		 * Fill in the chain list SGE elements with our segment data.
1103 		 *
1104 		 * If we're the last element in this chain list, set the last
1105 		 * element flag. If we're the completely last element period,
1106 		 * set the end of list and end of buffer flags.
1107 		 */
1108 		while (seg < this_seg_lim) {
1109 			memset(se, 0, sizeof (*se));
1110 			se->Address.Low = dm_segs->ds_addr;
1111 			if (sizeof (bus_addr_t) > 4) {
1112 				se->Address.High =
1113 				    ((uint64_t)dm_segs->ds_addr) >> 32;
1114 			}
1115 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1116 			tf = flags;
1117 			if (seg ==  this_seg_lim - 1) {
1118 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1119 			}
1120 			if (seg == nseg - 1) {
1121 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1122 					MPI_SGE_FLAGS_END_OF_BUFFER;
1123 			}
1124 			MPI_pSGE_SET_FLAGS(se, tf);
1125 			se++;
1126 			seg++;
1127 			dm_segs++;
1128 		}
1129 
1130     next_chain:
1131 		/*
1132 		 * If we have more segments to do and we've used up all of
1133 		 * the space in a request area, go allocate another one
1134 		 * and chain to that.
1135 		 */
1136 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1137 			request_t *nrq;
1138 
1139 			CAMLOCK_2_MPTLOCK(mpt);
1140 			nrq = mpt_get_request(mpt, FALSE);
1141 			MPTLOCK_2_CAMLOCK(mpt);
1142 
1143 			if (nrq == NULL) {
1144 				error = ENOMEM;
1145 				goto bad;
1146 			}
1147 
1148 			/*
1149 			 * Append the new request area on the tail of our list.
1150 			 */
1151 			if ((trq = req->chain) == NULL) {
1152 				req->chain = nrq;
1153 			} else {
1154 				while (trq->chain != NULL) {
1155 					trq = trq->chain;
1156 				}
1157 				trq->chain = nrq;
1158 			}
1159 			trq = nrq;
1160 			mpt_off = trq->req_vbuf;
1161 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1162 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1163 			}
1164 			nxt_off = 0;
1165 		}
1166 	}
1167 out:
1168 
1169 	/*
1170 	 * Last time we need to check if this CCB needs to be aborted.
1171 	 */
1172 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1173 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1174 			request_t *cmd_req =
1175 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1176 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1177 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1178 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1179 		}
1180 		mpt_prt(mpt,
1181 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1182 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1183 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1184 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1185 		}
1186 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1187 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1188 		xpt_done(ccb);
1189 		CAMLOCK_2_MPTLOCK(mpt);
1190 		mpt_free_request(mpt, req);
1191 		MPTLOCK_2_CAMLOCK(mpt);
1192 		return;
1193 	}
1194 
1195 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1196 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1197 		ccb->ccb_h.timeout_ch =
1198 			timeout(mpt_timeout, (caddr_t)ccb,
1199 				(ccb->ccb_h.timeout * hz) / 1000);
1200 	} else {
1201 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1202 	}
1203 	if (mpt->verbose > MPT_PRT_DEBUG) {
1204 		int nc = 0;
1205 		mpt_print_request(req->req_vbuf);
1206 		for (trq = req->chain; trq; trq = trq->chain) {
1207 			printf("  Additional Chain Area %d\n", nc++);
1208 			mpt_dump_sgl(trq->req_vbuf, 0);
1209 		}
1210 	}
1211 
1212 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1213 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1214 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1215 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1216 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1217 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1218 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1219 		} else {
1220 			tgt->state = TGT_STATE_MOVING_DATA;
1221 		}
1222 #else
1223 		tgt->state = TGT_STATE_MOVING_DATA;
1224 #endif
1225 	}
1226 	CAMLOCK_2_MPTLOCK(mpt);
1227 	mpt_send_cmd(mpt, req);
1228 	MPTLOCK_2_CAMLOCK(mpt);
1229 }
1230 
1231 static void
1232 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1233 {
1234 	request_t *req, *trq;
1235 	char *mpt_off;
1236 	union ccb *ccb;
1237 	struct mpt_softc *mpt;
1238 	int seg, first_lim;
1239 	uint32_t flags, nxt_off;
1240 	void *sglp = NULL;
1241 	MSG_REQUEST_HEADER *hdrp;
1242 	SGE_SIMPLE32 *se;
1243 	SGE_CHAIN32 *ce;
1244 	int istgt = 0;
1245 
1246 	req = (request_t *)arg;
1247 	ccb = req->ccb;
1248 
1249 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1250 	req = ccb->ccb_h.ccb_req_ptr;
1251 
1252 	hdrp = req->req_vbuf;
1253 	mpt_off = req->req_vbuf;
1254 
1255 
1256 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1257 		error = EFBIG;
1258 	}
1259 
1260 	if (error == 0) {
1261 		switch (hdrp->Function) {
1262 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1263 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1264 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1265 			break;
1266 		case MPI_FUNCTION_TARGET_ASSIST:
1267 			istgt = 1;
1268 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1269 			break;
1270 		default:
1271 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1272 			    hdrp->Function);
1273 			error = EINVAL;
1274 			break;
1275 		}
1276 	}
1277 
1278 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1279 		error = EFBIG;
1280 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1281 		    nseg, mpt->max_seg_cnt);
1282 	}
1283 
1284 bad:
1285 	if (error != 0) {
1286 		if (error != EFBIG && error != ENOMEM) {
1287 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1288 		}
1289 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1290 			cam_status status;
1291 			mpt_freeze_ccb(ccb);
1292 			if (error == EFBIG) {
1293 				status = CAM_REQ_TOO_BIG;
1294 			} else if (error == ENOMEM) {
1295 				if (mpt->outofbeer == 0) {
1296 					mpt->outofbeer = 1;
1297 					xpt_freeze_simq(mpt->sim, 1);
1298 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1299 					    "FREEZEQ\n");
1300 				}
1301 				status = CAM_REQUEUE_REQ;
1302 			} else {
1303 				status = CAM_REQ_CMP_ERR;
1304 			}
1305 			mpt_set_ccb_status(ccb, status);
1306 		}
1307 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1308 			request_t *cmd_req =
1309 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1310 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1311 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1312 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1313 		}
1314 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1315 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1316 		xpt_done(ccb);
1317 		CAMLOCK_2_MPTLOCK(mpt);
1318 		mpt_free_request(mpt, req);
1319 		MPTLOCK_2_CAMLOCK(mpt);
1320 		return;
1321 	}
1322 
1323 	/*
1324 	 * No data to transfer?
1325 	 * Just make a single simple SGL with zero length.
1326 	 */
1327 
1328 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1329 		int tidx = ((char *)sglp) - mpt_off;
1330 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1331 	}
1332 
1333 	if (nseg == 0) {
1334 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1335 		MPI_pSGE_SET_FLAGS(se1,
1336 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1337 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1338 		goto out;
1339 	}
1340 
1341 
1342 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1343 	if (istgt == 0) {
1344 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1345 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1346 		}
1347 	} else {
1348 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1349 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1350 		}
1351 	}
1352 
1353 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1354 		bus_dmasync_op_t op;
1355 		if (istgt) {
1356 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1357 				op = BUS_DMASYNC_PREREAD;
1358 			} else {
1359 				op = BUS_DMASYNC_PREWRITE;
1360 			}
1361 		} else {
1362 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1363 				op = BUS_DMASYNC_PREWRITE;
1364 			} else {
1365 				op = BUS_DMASYNC_PREREAD;
1366 			}
1367 		}
1368 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1369 	}
1370 
1371 	/*
1372 	 * Okay, fill in what we can at the end of the command frame.
1373 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1374 	 * the command frame.
1375 	 *
1376 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1377 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1378 	 * that.
1379 	 */
1380 
1381 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1382 		first_lim = nseg;
1383 	} else {
1384 		/*
1385 		 * Leave room for CHAIN element
1386 		 */
1387 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1388 	}
1389 
1390 	se = (SGE_SIMPLE32 *) sglp;
1391 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1392 		uint32_t tf;
1393 
1394 		memset(se, 0,sizeof (*se));
1395 		se->Address = dm_segs->ds_addr;
1396 
1397 
1398 
1399 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1400 		tf = flags;
1401 		if (seg == first_lim - 1) {
1402 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1403 		}
1404 		if (seg == nseg - 1) {
1405 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1406 				MPI_SGE_FLAGS_END_OF_BUFFER;
1407 		}
1408 		MPI_pSGE_SET_FLAGS(se, tf);
1409 	}
1410 
1411 	if (seg == nseg) {
1412 		goto out;
1413 	}
1414 
1415 	/*
1416 	 * Tell the IOC where to find the first chain element.
1417 	 */
1418 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1419 	nxt_off = MPT_RQSL(mpt);
1420 	trq = req;
1421 
1422 	/*
1423 	 * Make up the rest of the data segments out of a chain element
1424 	 * (contiained in the current request frame) which points to
1425 	 * SIMPLE32 elements in the next request frame, possibly ending
1426 	 * with *another* chain element (if there's more).
1427 	 */
1428 	while (seg < nseg) {
1429 		int this_seg_lim;
1430 		uint32_t tf, cur_off;
1431 		bus_addr_t chain_list_addr;
1432 
1433 		/*
1434 		 * Point to the chain descriptor. Note that the chain
1435 		 * descriptor is at the end of the *previous* list (whether
1436 		 * chain or simple).
1437 		 */
1438 		ce = (SGE_CHAIN32 *) se;
1439 
1440 		/*
1441 		 * Before we change our current pointer, make  sure we won't
1442 		 * overflow the request area with this frame. Note that we
1443 		 * test against 'greater than' here as it's okay in this case
1444 		 * to have next offset be just outside the request area.
1445 		 */
1446 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1447 			nxt_off = MPT_REQUEST_AREA;
1448 			goto next_chain;
1449 		}
1450 
1451 		/*
1452 		 * Set our SGE element pointer to the beginning of the chain
1453 		 * list and update our next chain list offset.
1454 		 */
1455 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1456 		cur_off = nxt_off;
1457 		nxt_off += MPT_RQSL(mpt);
1458 
1459 		/*
1460 		 * Now initialized the chain descriptor.
1461 		 */
1462 		memset(ce, 0, sizeof (*ce));
1463 
1464 		/*
1465 		 * Get the physical address of the chain list.
1466 		 */
1467 		chain_list_addr = trq->req_pbuf;
1468 		chain_list_addr += cur_off;
1469 
1470 
1471 
1472 		ce->Address = chain_list_addr;
1473 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1474 
1475 
1476 		/*
1477 		 * If we have more than a frame's worth of segments left,
1478 		 * set up the chain list to have the last element be another
1479 		 * chain descriptor.
1480 		 */
1481 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1482 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1483 			/*
1484 			 * The length of the chain is the length in bytes of the
1485 			 * number of segments plus the next chain element.
1486 			 *
1487 			 * The next chain descriptor offset is the length,
1488 			 * in words, of the number of segments.
1489 			 */
1490 			ce->Length = (this_seg_lim - seg) *
1491 			    sizeof (SGE_SIMPLE32);
1492 			ce->NextChainOffset = ce->Length >> 2;
1493 			ce->Length += sizeof (SGE_CHAIN32);
1494 		} else {
1495 			this_seg_lim = nseg;
1496 			ce->Length = (this_seg_lim - seg) *
1497 			    sizeof (SGE_SIMPLE32);
1498 		}
1499 
1500 		/*
1501 		 * Fill in the chain list SGE elements with our segment data.
1502 		 *
1503 		 * If we're the last element in this chain list, set the last
1504 		 * element flag. If we're the completely last element period,
1505 		 * set the end of list and end of buffer flags.
1506 		 */
1507 		while (seg < this_seg_lim) {
1508 			memset(se, 0, sizeof (*se));
1509 			se->Address = dm_segs->ds_addr;
1510 
1511 
1512 
1513 
1514 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1515 			tf = flags;
1516 			if (seg ==  this_seg_lim - 1) {
1517 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1518 			}
1519 			if (seg == nseg - 1) {
1520 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1521 					MPI_SGE_FLAGS_END_OF_BUFFER;
1522 			}
1523 			MPI_pSGE_SET_FLAGS(se, tf);
1524 			se++;
1525 			seg++;
1526 			dm_segs++;
1527 		}
1528 
1529     next_chain:
1530 		/*
1531 		 * If we have more segments to do and we've used up all of
1532 		 * the space in a request area, go allocate another one
1533 		 * and chain to that.
1534 		 */
1535 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1536 			request_t *nrq;
1537 
1538 			CAMLOCK_2_MPTLOCK(mpt);
1539 			nrq = mpt_get_request(mpt, FALSE);
1540 			MPTLOCK_2_CAMLOCK(mpt);
1541 
1542 			if (nrq == NULL) {
1543 				error = ENOMEM;
1544 				goto bad;
1545 			}
1546 
1547 			/*
1548 			 * Append the new request area on the tail of our list.
1549 			 */
1550 			if ((trq = req->chain) == NULL) {
1551 				req->chain = nrq;
1552 			} else {
1553 				while (trq->chain != NULL) {
1554 					trq = trq->chain;
1555 				}
1556 				trq->chain = nrq;
1557 			}
1558 			trq = nrq;
1559 			mpt_off = trq->req_vbuf;
1560 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1561 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1562 			}
1563 			nxt_off = 0;
1564 		}
1565 	}
1566 out:
1567 
1568 	/*
1569 	 * Last time we need to check if this CCB needs to be aborted.
1570 	 */
1571 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1572 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1573 			request_t *cmd_req =
1574 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1575 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1576 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1577 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1578 		}
1579 		mpt_prt(mpt,
1580 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1581 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1582 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1583 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1584 		}
1585 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1586 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1587 		xpt_done(ccb);
1588 		CAMLOCK_2_MPTLOCK(mpt);
1589 		mpt_free_request(mpt, req);
1590 		MPTLOCK_2_CAMLOCK(mpt);
1591 		return;
1592 	}
1593 
1594 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1595 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1596 		ccb->ccb_h.timeout_ch =
1597 			timeout(mpt_timeout, (caddr_t)ccb,
1598 				(ccb->ccb_h.timeout * hz) / 1000);
1599 	} else {
1600 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1601 	}
1602 	if (mpt->verbose > MPT_PRT_DEBUG) {
1603 		int nc = 0;
1604 		mpt_print_request(req->req_vbuf);
1605 		for (trq = req->chain; trq; trq = trq->chain) {
1606 			printf("  Additional Chain Area %d\n", nc++);
1607 			mpt_dump_sgl(trq->req_vbuf, 0);
1608 		}
1609 	}
1610 
1611 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1612 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1613 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1614 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1615 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1616 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1617 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1618 		} else {
1619 			tgt->state = TGT_STATE_MOVING_DATA;
1620 		}
1621 #else
1622 		tgt->state = TGT_STATE_MOVING_DATA;
1623 #endif
1624 	}
1625 	CAMLOCK_2_MPTLOCK(mpt);
1626 	mpt_send_cmd(mpt, req);
1627 	MPTLOCK_2_CAMLOCK(mpt);
1628 }
1629 
1630 static void
1631 mpt_start(struct cam_sim *sim, union ccb *ccb)
1632 {
1633 	request_t *req;
1634 	struct mpt_softc *mpt;
1635 	MSG_SCSI_IO_REQUEST *mpt_req;
1636 	struct ccb_scsiio *csio = &ccb->csio;
1637 	struct ccb_hdr *ccbh = &ccb->ccb_h;
1638 	bus_dmamap_callback_t *cb;
1639 	target_id_t tgt;
1640 	int raid_passthru;
1641 
1642 	/* Get the pointer for the physical addapter */
1643 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1644 	raid_passthru = (sim == mpt->phydisk_sim);
1645 
1646 	CAMLOCK_2_MPTLOCK(mpt);
1647 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
1648 		if (mpt->outofbeer == 0) {
1649 			mpt->outofbeer = 1;
1650 			xpt_freeze_simq(mpt->sim, 1);
1651 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1652 		}
1653 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1654 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
1655 		MPTLOCK_2_CAMLOCK(mpt);
1656 		xpt_done(ccb);
1657 		return;
1658 	}
1659 #ifdef	INVARIANTS
1660 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
1661 #endif
1662 	MPTLOCK_2_CAMLOCK(mpt);
1663 
1664 	if (sizeof (bus_addr_t) > 4) {
1665 		cb = mpt_execute_req_a64;
1666 	} else {
1667 		cb = mpt_execute_req;
1668 	}
1669 
1670 	/*
1671 	 * Link the ccb and the request structure so we can find
1672 	 * the other knowing either the request or the ccb
1673 	 */
1674 	req->ccb = ccb;
1675 	ccb->ccb_h.ccb_req_ptr = req;
1676 
1677 	/* Now we build the command for the IOC */
1678 	mpt_req = req->req_vbuf;
1679 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
1680 
1681 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1682 	if (raid_passthru) {
1683 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1684 		CAMLOCK_2_MPTLOCK(mpt);
1685 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1686 			MPTLOCK_2_CAMLOCK(mpt);
1687 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1688 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
1689 			xpt_done(ccb);
1690 			return;
1691 		}
1692 		MPTLOCK_2_CAMLOCK(mpt);
1693 		mpt_req->Bus = 0;	/* we never set bus here */
1694 	} else {
1695 		tgt = ccb->ccb_h.target_id;
1696 		mpt_req->Bus = 0;	/* XXX */
1697 
1698 	}
1699 	mpt_req->SenseBufferLength =
1700 		(csio->sense_len < MPT_SENSE_SIZE) ?
1701 		 csio->sense_len : MPT_SENSE_SIZE;
1702 
1703 	/*
1704 	 * We use the message context to find the request structure when we
1705 	 * Get the command completion interrupt from the IOC.
1706 	 */
1707 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1708 
1709 	/* Which physical device to do the I/O on */
1710 	mpt_req->TargetID = tgt;
1711 
1712 	/* We assume a single level LUN type */
1713 	if (ccb->ccb_h.target_lun >= 256) {
1714 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1715 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1716 	} else {
1717 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1718 	}
1719 
1720 	/* Set the direction of the transfer */
1721 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1722 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1723 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1724 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1725 	} else {
1726 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1727 	}
1728 
1729 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1730 		switch(ccb->csio.tag_action) {
1731 		case MSG_HEAD_OF_Q_TAG:
1732 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
1733 			break;
1734 		case MSG_ACA_TASK:
1735 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
1736 			break;
1737 		case MSG_ORDERED_Q_TAG:
1738 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
1739 			break;
1740 		case MSG_SIMPLE_Q_TAG:
1741 		default:
1742 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1743 			break;
1744 		}
1745 	} else {
1746 		if (mpt->is_fc || mpt->is_sas) {
1747 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1748 		} else {
1749 			/* XXX No such thing for a target doing packetized. */
1750 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
1751 		}
1752 	}
1753 
1754 	if (mpt->is_spi) {
1755 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1756 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
1757 		}
1758 	}
1759 
1760 	/* Copy the scsi command block into place */
1761 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1762 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
1763 	} else {
1764 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
1765 	}
1766 
1767 	mpt_req->CDBLength = csio->cdb_len;
1768 	mpt_req->DataLength = csio->dxfer_len;
1769 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
1770 
1771 	/*
1772 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
1773 	 */
1774 	if (mpt->verbose == MPT_PRT_DEBUG) {
1775 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
1776 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
1777 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
1778 		if (mpt_req->Control != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
1779 			mpt_prtc(mpt, "(%s %u byte%s ",
1780 			    (mpt_req->Control == MPI_SCSIIO_CONTROL_READ)?
1781 			    "read" : "write",  csio->dxfer_len,
1782 			    (csio->dxfer_len == 1)? ")" : "s)");
1783 		}
1784 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
1785 		    ccb->ccb_h.target_lun, req, req->serno);
1786 	}
1787 
1788 	/*
1789 	 * If we have any data to send with this command map it into bus space.
1790 	 */
1791 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1792 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1793 			/*
1794 			 * We've been given a pointer to a single buffer.
1795 			 */
1796 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1797 				/*
1798 				 * Virtual address that needs to translated into
1799 				 * one or more physical address ranges.
1800 				 */
1801 				int error;
1802 				int s = splsoftvm();
1803 				error = bus_dmamap_load(mpt->buffer_dmat,
1804 				    req->dmap, csio->data_ptr, csio->dxfer_len,
1805 				    cb, req, 0);
1806 				splx(s);
1807 				if (error == EINPROGRESS) {
1808 					/*
1809 					 * So as to maintain ordering,
1810 					 * freeze the controller queue
1811 					 * until our mapping is
1812 					 * returned.
1813 					 */
1814 					xpt_freeze_simq(mpt->sim, 1);
1815 					ccbh->status |= CAM_RELEASE_SIMQ;
1816 				}
1817 			} else {
1818 				/*
1819 				 * We have been given a pointer to single
1820 				 * physical buffer.
1821 				 */
1822 				struct bus_dma_segment seg;
1823 				seg.ds_addr =
1824 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1825 				seg.ds_len = csio->dxfer_len;
1826 				(*cb)(req, &seg, 1, 0);
1827 			}
1828 		} else {
1829 			/*
1830 			 * We have been given a list of addresses.
1831 			 * This case could be easily supported but they are not
1832 			 * currently generated by the CAM subsystem so there
1833 			 * is no point in wasting the time right now.
1834 			 */
1835 			struct bus_dma_segment *segs;
1836 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1837 				(*cb)(req, NULL, 0, EFAULT);
1838 			} else {
1839 				/* Just use the segments provided */
1840 				segs = (struct bus_dma_segment *)csio->data_ptr;
1841 				(*cb)(req, segs, csio->sglist_cnt, 0);
1842 			}
1843 		}
1844 	} else {
1845 		(*cb)(req, NULL, 0, 0);
1846 	}
1847 }
1848 
1849 static int
1850 mpt_bus_reset(struct mpt_softc *mpt, int sleep_ok)
1851 {
1852 	int   error;
1853 	uint16_t status;
1854 	uint8_t response;
1855 
1856 	error = mpt_scsi_send_tmf(mpt, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1857 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
1858 	    0, 0, 0, 0, sleep_ok);
1859 
1860 	if (error != 0) {
1861 		/*
1862 		 * mpt_scsi_send_tmf hard resets on failure, so no
1863 		 * need to do so here.
1864 		 */
1865 		mpt_prt(mpt,
1866 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
1867 		return (EIO);
1868 	}
1869 
1870 	/* Wait for bus reset to be processed by the IOC. */
1871 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
1872 	    REQ_STATE_DONE, sleep_ok, 5000);
1873 
1874 	status = mpt->tmf_req->IOCStatus;
1875 	response = mpt->tmf_req->ResponseCode;
1876 	mpt->tmf_req->state = REQ_STATE_FREE;
1877 
1878 	if (error) {
1879 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
1880 		    "Resetting controller.\n");
1881 		mpt_reset(mpt, TRUE);
1882 		return (ETIMEDOUT);
1883 	}
1884 
1885 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1886 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
1887 		    "Resetting controller.\n", status);
1888 		mpt_reset(mpt, TRUE);
1889 		return (EIO);
1890 	}
1891 
1892 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
1893 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
1894 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
1895 		    "Resetting controller.\n", response);
1896 		mpt_reset(mpt, TRUE);
1897 		return (EIO);
1898 	}
1899 	return (0);
1900 }
1901 
1902 static int
1903 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
1904 {
1905 	int r = 0;
1906 	request_t *req;
1907 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
1908 
1909  	req = mpt_get_request(mpt, FALSE);
1910 	if (req == NULL) {
1911 		return (ENOMEM);
1912 	}
1913 	fc = req->req_vbuf;
1914 	memset(fc, 0, sizeof(*fc));
1915 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
1916 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
1917 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
1918 	mpt_send_cmd(mpt, req);
1919 	if (dowait) {
1920 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
1921 		    REQ_STATE_DONE, FALSE, 60 * 1000);
1922 		if (r == 0) {
1923 			mpt_free_request(mpt, req);
1924 		}
1925 	}
1926 	return (r);
1927 }
1928 
1929 static int
1930 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
1931 	      MSG_EVENT_NOTIFY_REPLY *msg)
1932 {
1933 	switch(msg->Event & 0xFF) {
1934 	case MPI_EVENT_UNIT_ATTENTION:
1935 		mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
1936 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1937 		break;
1938 
1939 	case MPI_EVENT_IOC_BUS_RESET:
1940 		/* We generated a bus reset */
1941 		mpt_prt(mpt, "IOC Bus Reset Port: %d\n",
1942 		    (msg->Data[0] >> 8) & 0xff);
1943 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
1944 		break;
1945 
1946 	case MPI_EVENT_EXT_BUS_RESET:
1947 		/* Someone else generated a bus reset */
1948 		mpt_prt(mpt, "External Bus Reset Detected\n");
1949 		/*
1950 		 * These replies don't return EventData like the MPI
1951 		 * spec says they do
1952 		 */
1953 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
1954 		break;
1955 
1956 	case MPI_EVENT_RESCAN:
1957 		/*
1958 		 * In general this means a device has been added to the loop.
1959 		 */
1960 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
1961 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
1962 		break;
1963 
1964 	case MPI_EVENT_LINK_STATUS_CHANGE:
1965 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
1966 		    (msg->Data[1] >> 8) & 0xff,
1967 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
1968 		break;
1969 
1970 	case MPI_EVENT_LOOP_STATE_CHANGE:
1971 		switch ((msg->Data[0] >> 16) & 0xff) {
1972 		case 0x01:
1973 			mpt_prt(mpt,
1974 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
1975 			    "(Loop Initialization)\n",
1976 			    (msg->Data[1] >> 8) & 0xff,
1977 			    (msg->Data[0] >> 8) & 0xff,
1978 			    (msg->Data[0]     ) & 0xff);
1979 			switch ((msg->Data[0] >> 8) & 0xff) {
1980 			case 0xF7:
1981 				if ((msg->Data[0] & 0xff) == 0xF7) {
1982 					mpt_prt(mpt, "Device needs AL_PA\n");
1983 				} else {
1984 					mpt_prt(mpt, "Device %02x doesn't like "
1985 					    "FC performance\n",
1986 					    msg->Data[0] & 0xFF);
1987 				}
1988 				break;
1989 			case 0xF8:
1990 				if ((msg->Data[0] & 0xff) == 0xF7) {
1991 					mpt_prt(mpt, "Device had loop failure "
1992 					    "at its receiver prior to acquiring"
1993 					    " AL_PA\n");
1994 				} else {
1995 					mpt_prt(mpt, "Device %02x detected loop"
1996 					    " failure at its receiver\n",
1997 					    msg->Data[0] & 0xFF);
1998 				}
1999 				break;
2000 			default:
2001 				mpt_prt(mpt, "Device %02x requests that device "
2002 				    "%02x reset itself\n",
2003 				    msg->Data[0] & 0xFF,
2004 				    (msg->Data[0] >> 8) & 0xFF);
2005 				break;
2006 			}
2007 			break;
2008 		case 0x02:
2009 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2010 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2011 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2012 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2013 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2014 			break;
2015 		case 0x03:
2016 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2017 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2018 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2019 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
2020 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
2021 			break;
2022 		default:
2023 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2024 			    "FC event (%02x %02x %02x)\n",
2025 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2026 			    (msg->Data[0] >> 16) & 0xff, /* Event */
2027 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2028 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2029 		}
2030 		break;
2031 
2032 	case MPI_EVENT_LOGOUT:
2033 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2034 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
2035 		break;
2036 	case MPI_EVENT_EVENT_CHANGE:
2037 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2038 		    "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
2039 		break;
2040 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2041 		/*
2042 		 * Devices are attachin'.....
2043 		 */
2044 		mpt_prt(mpt,
2045 		    "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n");
2046 		break;
2047 	default:
2048 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2049 		    msg->Event & 0xFF);
2050 		return (0);
2051 	}
2052 	return (1);
2053 }
2054 
2055 /*
2056  * Reply path for all SCSI I/O requests, called from our
2057  * interrupt handler by extracting our handler index from
2058  * the MsgContext field of the reply from the IOC.
2059  *
2060  * This routine is optimized for the common case of a
2061  * completion without error.  All exception handling is
2062  * offloaded to non-inlined helper routines to minimize
2063  * cache footprint.
2064  */
2065 static int
2066 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2067     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2068 {
2069 	MSG_SCSI_IO_REQUEST *scsi_req;
2070 	union ccb *ccb;
2071 	target_id_t tgt;
2072 
2073 	if (req->state == REQ_STATE_FREE) {
2074 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2075 		return (TRUE);
2076 	}
2077 
2078 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2079 	ccb = req->ccb;
2080 	if (ccb == NULL) {
2081 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2082 		    req, req->serno);
2083 		return (TRUE);
2084 	}
2085 
2086 	tgt = scsi_req->TargetID;
2087 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2088 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2089 
2090 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2091 		bus_dmasync_op_t op;
2092 
2093 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2094 			op = BUS_DMASYNC_POSTREAD;
2095 		else
2096 			op = BUS_DMASYNC_POSTWRITE;
2097 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2098 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2099 	}
2100 
2101 	if (reply_frame == NULL) {
2102 		/*
2103 		 * Context only reply, completion without error status.
2104 		 */
2105 		ccb->csio.resid = 0;
2106 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2107 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2108 	} else {
2109 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2110 	}
2111 
2112 	if (mpt->outofbeer) {
2113 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2114 		mpt->outofbeer = 0;
2115 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2116 	}
2117 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2118 		struct scsi_inquiry_data *iq =
2119 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2120 		if (scsi_req->Function ==
2121 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2122 			/*
2123 			 * Fake out the device type so that only the
2124 			 * pass-thru device will attach.
2125 			 */
2126 			iq->device &= ~0x1F;
2127 			iq->device |= T_NODEVICE;
2128 		}
2129 	}
2130 	if (mpt->verbose == MPT_PRT_DEBUG) {
2131 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2132 		    req, req->serno);
2133 	}
2134 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2135 	MPTLOCK_2_CAMLOCK(mpt);
2136 	xpt_done(ccb);
2137 	CAMLOCK_2_MPTLOCK(mpt);
2138 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2139 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2140 	} else {
2141 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2142 		    req, req->serno);
2143 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2144 	}
2145 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2146 	    ("CCB req needed wakeup"));
2147 #ifdef	INVARIANTS
2148 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2149 #endif
2150 	mpt_free_request(mpt, req);
2151 	return (TRUE);
2152 }
2153 
2154 static int
2155 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2156     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2157 {
2158 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2159 
2160 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2161 #ifdef	INVARIANTS
2162 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2163 #endif
2164 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2165 	/* Record IOC Status and Response Code of TMF for any waiters. */
2166 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2167 	req->ResponseCode = tmf_reply->ResponseCode;
2168 
2169 	mpt_lprt(mpt, MPT_PRT_INFO, "TMF complete: req %p:%u status 0x%x\n",
2170 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2171 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2172 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2173 		req->state |= REQ_STATE_DONE;
2174 		wakeup(req);
2175 	} else {
2176 		mpt->tmf_req->state = REQ_STATE_FREE;
2177 	}
2178 	return (TRUE);
2179 }
2180 
2181 /*
2182  * XXX: Move to definitions file
2183  */
2184 #define	ELS	0x22
2185 #define	FC4LS	0x32
2186 #define	ABTS	0x81
2187 #define	BA_ACC	0x84
2188 
2189 #define	LS_RJT	0x01
2190 #define	LS_ACC	0x02
2191 #define	PLOGI	0x03
2192 #define	LOGO	0x05
2193 #define SRR	0x14
2194 #define PRLI	0x20
2195 #define PRLO	0x21
2196 #define ADISC	0x52
2197 #define RSCN	0x61
2198 
2199 static void
2200 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2201     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2202 {
2203 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2204 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2205 
2206 	/*
2207 	 * We are going to reuse the ELS request to send this response back.
2208 	 */
2209 	rsp = &tmp;
2210 	memset(rsp, 0, sizeof(*rsp));
2211 
2212 #ifdef	USE_IMMEDIATE_LINK_DATA
2213 	/*
2214 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2215 	 */
2216 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2217 #endif
2218 	rsp->RspLength = length;
2219 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2220 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2221 
2222 	/*
2223 	 * Copy over information from the original reply frame to
2224 	 * it's correct place in the response.
2225 	 */
2226 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2227 
2228 	/*
2229 	 * And now copy back the temporary area to the original frame.
2230 	 */
2231 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2232 	rsp = req->req_vbuf;
2233 
2234 #ifdef	USE_IMMEDIATE_LINK_DATA
2235 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2236 #else
2237 {
2238 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2239 	bus_addr_t paddr = req->req_pbuf;
2240 	paddr += MPT_RQSL(mpt);
2241 
2242 	se->FlagsLength =
2243 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2244 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2245 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2246 		MPI_SGE_FLAGS_END_OF_LIST	|
2247 		MPI_SGE_FLAGS_END_OF_BUFFER;
2248 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2249 	se->FlagsLength |= (length);
2250 	se->Address = (uint32_t) paddr;
2251 }
2252 #endif
2253 
2254 	/*
2255 	 * Send it on...
2256 	 */
2257 	mpt_send_cmd(mpt, req);
2258 }
2259 
2260 static int
2261 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2262     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2263 {
2264 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2265 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2266 	U8 rctl;
2267 	U8 type;
2268 	U8 cmd;
2269 	U16 status = le16toh(reply_frame->IOCStatus);
2270 	U32 *elsbuf;
2271 	int ioindex;
2272 	int do_refresh = TRUE;
2273 
2274 #ifdef	INVARIANTS
2275 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2276 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2277 	    req, req->serno, rp->Function));
2278 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2279 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2280 	} else {
2281 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2282 	}
2283 #endif
2284 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2285 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2286 	    req, req->serno, reply_frame, reply_frame->Function);
2287 
2288 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2289 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2290 		    status, reply_frame->Function);
2291 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2292 			/*
2293 			 * XXX: to get around shutdown issue
2294 			 */
2295 			mpt->disabled = 1;
2296 			return (TRUE);
2297 		}
2298 		return (TRUE);
2299 	}
2300 
2301 	/*
2302 	 * If the function of a link service response, we recycle the
2303 	 * response to be a refresh for a new link service request.
2304 	 *
2305 	 * The request pointer is bogus in this case and we have to fetch
2306 	 * it based upon the TransactionContext.
2307 	 */
2308 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2309 		/* Freddie Uncle Charlie Katie */
2310 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2311 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2312 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2313 				break;
2314 			}
2315 
2316 		KASSERT(ioindex < mpt->els_cmds_allocated,
2317 		    ("can't find my mommie!"));
2318 
2319 		/* remove from active list as we're going to re-post it */
2320 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2321 		req->state &= ~REQ_STATE_QUEUED;
2322 		req->state |= REQ_STATE_DONE;
2323 		mpt_fc_post_els(mpt, req, ioindex);
2324 		return (TRUE);
2325 	}
2326 
2327 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2328 		/* remove from active list as we're done */
2329 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2330 		req->state &= ~REQ_STATE_QUEUED;
2331 		req->state |= REQ_STATE_DONE;
2332 		if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2333 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2334 			    "Async Primitive Send Complete\n");
2335 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2336 			mpt_free_request(mpt, req);
2337 		} else {
2338 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2339 			    "Sync Primitive Send Complete\n");
2340 			wakeup(req);
2341 		}
2342 		return (TRUE);
2343 	}
2344 
2345 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2346 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2347 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2348 		    rp->MsgLength, rp->MsgFlags);
2349 		return (TRUE);
2350 	}
2351 
2352 	if (rp->MsgLength <= 5) {
2353 		/*
2354 		 * This is just a ack of an original ELS buffer post
2355 		 */
2356 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2357 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2358 		return (TRUE);
2359 	}
2360 
2361 
2362 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2363 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2364 
2365 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2366 	cmd = be32toh(elsbuf[0]) >> 24;
2367 
2368 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2369 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2370 		return (TRUE);
2371 	}
2372 
2373 	ioindex = le32toh(rp->TransactionContext);
2374 	req = mpt->els_cmd_ptrs[ioindex];
2375 
2376 	if (rctl == ELS && type == 1) {
2377 		switch (cmd) {
2378 		case PRLI:
2379 			/*
2380 			 * Send back a PRLI ACC
2381 			 */
2382 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2383 			    le32toh(rp->Wwn.PortNameHigh),
2384 			    le32toh(rp->Wwn.PortNameLow));
2385 			elsbuf[0] = htobe32(0x02100014);
2386 			elsbuf[1] |= htobe32(0x00000100);
2387 			elsbuf[4] = htobe32(0x00000002);
2388 			if (mpt->role & MPT_ROLE_TARGET)
2389 				elsbuf[4] |= htobe32(0x00000010);
2390 			if (mpt->role & MPT_ROLE_INITIATOR)
2391 				elsbuf[4] |= htobe32(0x00000020);
2392 			/* remove from active list as we're done */
2393 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2394 			req->state &= ~REQ_STATE_QUEUED;
2395 			req->state |= REQ_STATE_DONE;
2396 			mpt_fc_els_send_response(mpt, req, rp, 20);
2397 			do_refresh = FALSE;
2398 			break;
2399 		case PRLO:
2400 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2401 			elsbuf[0] = htobe32(0x02100014);
2402 			elsbuf[1] = htobe32(0x08000100);
2403 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2404 			    le32toh(rp->Wwn.PortNameHigh),
2405 			    le32toh(rp->Wwn.PortNameLow));
2406 			/* remove from active list as we're done */
2407 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2408 			req->state &= ~REQ_STATE_QUEUED;
2409 			req->state |= REQ_STATE_DONE;
2410 			mpt_fc_els_send_response(mpt, req, rp, 20);
2411 			do_refresh = FALSE;
2412 			break;
2413 		default:
2414 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2415 			break;
2416 		}
2417 	} else if (rctl == ABTS && type == 0) {
2418 		uint16_t rx_id = le16toh(rp->Rxid);
2419 		uint16_t ox_id = le16toh(rp->Oxid);
2420 		request_t *tgt_req = NULL;
2421 
2422 		mpt_prt(mpt,
2423 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2424 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2425 		    le32toh(rp->Wwn.PortNameLow));
2426 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2427 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2428 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2429 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2430 		} else {
2431 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2432 		}
2433 		if (tgt_req) {
2434 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2435 			uint8_t *vbuf;
2436 			union ccb *ccb = tgt->ccb;
2437 			uint32_t ct_id;
2438 
2439 			vbuf = tgt_req->req_vbuf;
2440 			vbuf += MPT_RQSL(mpt);
2441 
2442 			/*
2443 			 * Check to make sure we have the correct command
2444 			 * The reply descriptor in the target state should
2445 			 * should contain an IoIndex that should match the
2446 			 * RX_ID.
2447 			 *
2448 			 * It'd be nice to have OX_ID to crosscheck with
2449 			 * as well.
2450 			 */
2451 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2452 
2453 			if (ct_id != rx_id) {
2454 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2455 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2456 				    rx_id, ct_id);
2457 				goto skip;
2458 			}
2459 
2460 			ccb = tgt->ccb;
2461 			if (ccb) {
2462 				mpt_prt(mpt,
2463 				    "CCB (%p): lun %u flags %x status %x\n",
2464 				    ccb, ccb->ccb_h.target_lun,
2465 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2466 			}
2467 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2468 			    "%x nxfers %x\n", tgt->state,
2469 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2470 			    tgt->nxfers);
2471   skip:
2472 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2473 				mpt_prt(mpt, "unable to start TargetAbort\n");
2474 			}
2475 		} else {
2476 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2477 		}
2478 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2479 		elsbuf[0] = htobe32(0);
2480 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2481 		elsbuf[2] = htobe32(0x000ffff);
2482 		/*
2483 		 * Dork with the reply frame so that the reponse to it
2484 		 * will be correct.
2485 		 */
2486 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2487 		/* remove from active list as we're done */
2488 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2489 		req->state &= ~REQ_STATE_QUEUED;
2490 		req->state |= REQ_STATE_DONE;
2491 		mpt_fc_els_send_response(mpt, req, rp, 12);
2492 		do_refresh = FALSE;
2493 	} else {
2494 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2495 	}
2496 	if (do_refresh == TRUE) {
2497 		/* remove from active list as we're done */
2498 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2499 		req->state &= ~REQ_STATE_QUEUED;
2500 		req->state |= REQ_STATE_DONE;
2501 		mpt_fc_post_els(mpt, req, ioindex);
2502 	}
2503 	return (TRUE);
2504 }
2505 
2506 /*
2507  * Clean up all SCSI Initiator personality state in response
2508  * to a controller reset.
2509  */
2510 static void
2511 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2512 {
2513 	/*
2514 	 * The pending list is already run down by
2515 	 * the generic handler.  Perform the same
2516 	 * operation on the timed out request list.
2517 	 */
2518 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
2519 				   MPI_IOCSTATUS_INVALID_STATE);
2520 
2521 	/*
2522 	 * XXX: We need to repost ELS and Target Command Buffers?
2523 	 */
2524 
2525 	/*
2526 	 * Inform the XPT that a bus reset has occurred.
2527 	 */
2528 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
2529 }
2530 
2531 /*
2532  * Parse additional completion information in the reply
2533  * frame for SCSI I/O requests.
2534  */
2535 static int
2536 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
2537 			     MSG_DEFAULT_REPLY *reply_frame)
2538 {
2539 	union ccb *ccb;
2540 	MSG_SCSI_IO_REPLY *scsi_io_reply;
2541 	u_int ioc_status;
2542 	u_int sstate;
2543 	u_int loginfo;
2544 
2545 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
2546 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
2547 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
2548 		("MPT SCSI I/O Handler called with incorrect reply type"));
2549 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
2550 		("MPT SCSI I/O Handler called with continuation reply"));
2551 
2552 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
2553 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
2554 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
2555 	ioc_status &= MPI_IOCSTATUS_MASK;
2556 	sstate = scsi_io_reply->SCSIState;
2557 
2558 	ccb = req->ccb;
2559 	ccb->csio.resid =
2560 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
2561 
2562 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
2563 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
2564 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2565 		ccb->csio.sense_resid =
2566 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
2567 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
2568 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
2569 	}
2570 
2571 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
2572 		/*
2573 		 * Tag messages rejected, but non-tagged retry
2574 		 * was successful.
2575 XXXX
2576 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
2577 		 */
2578 	}
2579 
2580 	switch(ioc_status) {
2581 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2582 		/*
2583 		 * XXX
2584 		 * Linux driver indicates that a zero
2585 		 * transfer length with this error code
2586 		 * indicates a CRC error.
2587 		 *
2588 		 * No need to swap the bytes for checking
2589 		 * against zero.
2590 		 */
2591 		if (scsi_io_reply->TransferCount == 0) {
2592 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2593 			break;
2594 		}
2595 		/* FALLTHROUGH */
2596 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
2597 	case MPI_IOCSTATUS_SUCCESS:
2598 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
2599 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
2600 			/*
2601 			 * Status was never returned for this transaction.
2602 			 */
2603 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
2604 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
2605 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
2606 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
2607 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
2608 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
2609 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
2610 
2611 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
2612 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2613 		} else
2614 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2615 		break;
2616 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
2617 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
2618 		break;
2619 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
2620 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2621 		break;
2622 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2623 		/*
2624 		 * Since selection timeouts and "device really not
2625 		 * there" are grouped into this error code, report
2626 		 * selection timeout.  Selection timeouts are
2627 		 * typically retried before giving up on the device
2628 		 * whereas "device not there" errors are considered
2629 		 * unretryable.
2630 		 */
2631 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2632 		break;
2633 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2634 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
2635 		break;
2636 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
2637 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
2638 		break;
2639 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
2640 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
2641 		break;
2642 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2643 		ccb->ccb_h.status = CAM_UA_TERMIO;
2644 		break;
2645 	case MPI_IOCSTATUS_INVALID_STATE:
2646 		/*
2647 		 * The IOC has been reset.  Emulate a bus reset.
2648 		 */
2649 		/* FALLTHROUGH */
2650 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
2651 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2652 		break;
2653 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
2654 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
2655 		/*
2656 		 * Don't clobber any timeout status that has
2657 		 * already been set for this transaction.  We
2658 		 * want the SCSI layer to be able to differentiate
2659 		 * between the command we aborted due to timeout
2660 		 * and any innocent bystanders.
2661 		 */
2662 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
2663 			break;
2664 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
2665 		break;
2666 
2667 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2668 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
2669 		break;
2670 	case MPI_IOCSTATUS_BUSY:
2671 		mpt_set_ccb_status(ccb, CAM_BUSY);
2672 		break;
2673 	case MPI_IOCSTATUS_INVALID_FUNCTION:
2674 	case MPI_IOCSTATUS_INVALID_SGL:
2675 	case MPI_IOCSTATUS_INTERNAL_ERROR:
2676 	case MPI_IOCSTATUS_INVALID_FIELD:
2677 	default:
2678 		/* XXX
2679 		 * Some of the above may need to kick
2680 		 * of a recovery action!!!!
2681 		 */
2682 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2683 		break;
2684 	}
2685 
2686 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2687 		mpt_freeze_ccb(ccb);
2688 	}
2689 
2690 	return (TRUE);
2691 }
2692 
2693 static void
2694 mpt_action(struct cam_sim *sim, union ccb *ccb)
2695 {
2696 	struct mpt_softc *mpt;
2697 	struct ccb_trans_settings *cts;
2698 	target_id_t tgt;
2699 	int raid_passthru;
2700 
2701 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2702 
2703 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
2704 	KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
2705 	raid_passthru = (sim == mpt->phydisk_sim);
2706 
2707 	tgt = ccb->ccb_h.target_id;
2708 	if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ &&
2709 	    ccb->ccb_h.func_code != XPT_RESET_BUS) {
2710 		CAMLOCK_2_MPTLOCK(mpt);
2711 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2712 			MPTLOCK_2_CAMLOCK(mpt);
2713 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2714 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2715 			xpt_done(ccb);
2716 			return;
2717 		}
2718 		MPTLOCK_2_CAMLOCK(mpt);
2719 	}
2720 	ccb->ccb_h.ccb_mpt_ptr = mpt;
2721 
2722 	switch (ccb->ccb_h.func_code) {
2723 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2724 		/*
2725 		 * Do a couple of preliminary checks...
2726 		 */
2727 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2728 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2729 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2730 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2731 				break;
2732 			}
2733 		}
2734 		/* Max supported CDB length is 16 bytes */
2735 		/* XXX Unless we implement the new 32byte message type */
2736 		if (ccb->csio.cdb_len >
2737 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
2738 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2739 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2740 			break;
2741 		}
2742 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2743 		mpt_start(sim, ccb);
2744 		return;
2745 
2746 	case XPT_RESET_BUS:
2747 		mpt_lprt(mpt, MPT_PRT_DEBUG, "XPT_RESET_BUS\n");
2748 
2749 		CAMLOCK_2_MPTLOCK(mpt);
2750 		(void) mpt_bus_reset(mpt, FALSE);
2751 		MPTLOCK_2_CAMLOCK(mpt);
2752 
2753 		/*
2754 		 * mpt_bus_reset is always successful in that it
2755 		 * will fall back to a hard reset should a bus
2756 		 * reset attempt fail.
2757 		 */
2758 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2759 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2760 		break;
2761 
2762 	case XPT_ABORT:
2763 	{
2764 		union ccb *accb = ccb->cab.abort_ccb;
2765 		CAMLOCK_2_MPTLOCK(mpt);
2766 		switch (accb->ccb_h.func_code) {
2767 		case XPT_ACCEPT_TARGET_IO:
2768 		case XPT_IMMED_NOTIFY:
2769 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
2770 			break;
2771 		case XPT_CONT_TARGET_IO:
2772 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
2773 			ccb->ccb_h.status = CAM_UA_ABORT;
2774 			break;
2775 		case XPT_SCSI_IO:
2776 			ccb->ccb_h.status = CAM_UA_ABORT;
2777 			break;
2778 		default:
2779 			ccb->ccb_h.status = CAM_REQ_INVALID;
2780 			break;
2781 		}
2782 		MPTLOCK_2_CAMLOCK(mpt);
2783 		break;
2784 	}
2785 
2786 #ifdef	CAM_NEW_TRAN_CODE
2787 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2788 #else
2789 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2790 #endif
2791 #define	DP_DISC_ENABLE	0x1
2792 #define	DP_DISC_DISABL	0x2
2793 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
2794 
2795 #define	DP_TQING_ENABLE	0x4
2796 #define	DP_TQING_DISABL	0x8
2797 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
2798 
2799 #define	DP_WIDE		0x10
2800 #define	DP_NARROW	0x20
2801 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
2802 
2803 #define	DP_SYNC		0x40
2804 
2805 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2806 	{
2807 #ifdef	CAM_NEW_TRAN_CODE
2808 		struct ccb_trans_settings_scsi *scsi;
2809 		struct ccb_trans_settings_spi *spi;
2810 #endif
2811 		uint8_t dval;
2812 		u_int period;
2813 		u_int offset;
2814 		int i, j;
2815 
2816 		cts = &ccb->cts;
2817 		if (!IS_CURRENT_SETTINGS(cts)) {
2818 			mpt_prt(mpt, "Attempt to set User settings\n");
2819 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2820 			break;
2821 		}
2822 
2823 		if (mpt->is_fc || mpt->is_sas) {
2824 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2825 			break;
2826 		}
2827 
2828 		/*
2829 		 * Skip attempting settings on RAID volume disks.
2830 		 * Other devices on the bus get the normal treatment.
2831 		 */
2832 		if (mpt->phydisk_sim && raid_passthru == 0 &&
2833 		    mpt_is_raid_volume(mpt, tgt) != 0) {
2834 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
2835 			    "skipping transfer settings for RAID volumes\n");
2836 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2837 			break;
2838 		}
2839 
2840 		i = mpt->mpt_port_page2.PortSettings &
2841 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
2842 		j = mpt->mpt_port_page2.PortFlags &
2843 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
2844 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
2845 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
2846 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
2847 			    "honoring BIOS transfer negotiations\n");
2848 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2849 			break;
2850 		}
2851 
2852 		dval = 0;
2853 		period = 0;
2854 		offset = 0;
2855 
2856 #ifndef	CAM_NEW_TRAN_CODE
2857 		if (cts->valid & CCB_TRANS_DISC_VALID) {
2858 			dval |= DP_DISC_ENABLE;
2859 		}
2860 		if (cts->valid & CCB_TRANS_TQ_VALID) {
2861 			dval |= DP_TQING_ENABLE;
2862 		}
2863 		if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2864 			if (cts->bus_width)
2865 				dval |= DP_WIDE;
2866 			else
2867 				dval |= DP_NARROW;
2868 		}
2869 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2870 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
2871 			dval |= DP_SYNC;
2872 			period = cts->sync_period;
2873 			offset = cts->sync_offset;
2874 		}
2875 #else
2876 		scsi = &cts->proto_specific.scsi;
2877 		spi = &cts->xport_specific.spi;
2878 
2879 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2880 			if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) {
2881 				dval |= DP_DISC_ENABLE;
2882 			} else {
2883 				dval |= DP_DISC_DISABL;
2884 			}
2885 		}
2886 
2887 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2888 			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
2889 				dval |= DP_TQING_ENABLE;
2890 			} else {
2891 				dval |= DP_TQING_DISABL;
2892 			}
2893 		}
2894 
2895 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2896 			if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) {
2897 				dval |= DP_WIDE;
2898 			} else {
2899 				dval |= DP_NARROW;
2900 			}
2901 		}
2902 
2903 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2904 		    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2905 		    (spi->sync_period && spi->sync_offset)) {
2906 			dval |= DP_SYNC;
2907 			period = spi->sync_period;
2908 			offset = spi->sync_offset;
2909 		}
2910 #endif
2911 		CAMLOCK_2_MPTLOCK(mpt);
2912 		if (dval & DP_DISC_ENABLE) {
2913 			mpt->mpt_disc_enable |= (1 << tgt);
2914 		} else if (dval & DP_DISC_DISABL) {
2915 			mpt->mpt_disc_enable &= ~(1 << tgt);
2916 		}
2917 		if (dval & DP_TQING_ENABLE) {
2918 			mpt->mpt_tag_enable |= (1 << tgt);
2919 		} else if (dval & DP_TQING_DISABL) {
2920 			mpt->mpt_tag_enable &= ~(1 << tgt);
2921 		}
2922 		if (dval & DP_WIDTH) {
2923 			mpt_setwidth(mpt, tgt, 1);
2924 		}
2925 		if (dval & DP_SYNC) {
2926 			mpt_setsync(mpt, tgt, period, offset);
2927 		}
2928 
2929 		if (mpt_update_spi_config(mpt, tgt)) {
2930 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2931 		} else {
2932 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2933 		}
2934 		MPTLOCK_2_CAMLOCK(mpt);
2935 		break;
2936 	}
2937 	case XPT_GET_TRAN_SETTINGS:
2938 		cts = &ccb->cts;
2939 		if (mpt->is_fc) {
2940 #ifndef	CAM_NEW_TRAN_CODE
2941 			/*
2942 			 * a lot of normal SCSI things don't make sense.
2943 			 */
2944 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2945 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2946 			/*
2947 			 * How do you measure the width of a high
2948 			 * speed serial bus? Well, in bytes.
2949 			 *
2950 			 * Offset and period make no sense, though, so we set
2951 			 * (above) a 'base' transfer speed to be gigabit.
2952 			 */
2953 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2954 #else
2955 			struct ccb_trans_settings_fc *fc =
2956 			    &cts->xport_specific.fc;
2957 
2958 			cts->protocol = PROTO_SCSI;
2959 			cts->protocol_version = SCSI_REV_2;
2960 			cts->transport = XPORT_FC;
2961 			cts->transport_version = 0;
2962 
2963 			fc->valid = CTS_FC_VALID_SPEED;
2964 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
2965 			/* XXX: need a port database for each target */
2966 #endif
2967 		} else if (mpt->is_sas) {
2968 #ifndef	CAM_NEW_TRAN_CODE
2969 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2970 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2971 			/*
2972 			 * How do you measure the width of a high
2973 			 * speed serial bus? Well, in bytes.
2974 			 *
2975 			 * Offset and period make no sense, though, so we set
2976 			 * (above) a 'base' transfer speed to be gigabit.
2977 			 */
2978 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2979 #else
2980 			struct ccb_trans_settings_sas *sas =
2981 			    &cts->xport_specific.sas;
2982 
2983 			cts->protocol = PROTO_SCSI;
2984 			cts->protocol_version = SCSI_REV_3;
2985 			cts->transport = XPORT_SAS;
2986 			cts->transport_version = 0;
2987 
2988 			sas->valid = CTS_SAS_VALID_SPEED;
2989 			sas->bitrate = 300000;	/* XXX: Default 3Gbps */
2990 #endif
2991 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
2992 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2993 			break;
2994 		}
2995 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2996 		break;
2997 
2998 	case XPT_CALC_GEOMETRY:
2999 	{
3000 		struct ccb_calc_geometry *ccg;
3001 
3002 		ccg = &ccb->ccg;
3003 		if (ccg->block_size == 0) {
3004 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3005 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3006 			break;
3007 		}
3008 		mpt_calc_geometry(ccg, /*extended*/1);
3009 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3010 		break;
3011 	}
3012 	case XPT_PATH_INQ:		/* Path routing inquiry */
3013 	{
3014 		struct ccb_pathinq *cpi = &ccb->cpi;
3015 
3016 		cpi->version_num = 1;
3017 		cpi->target_sprt = 0;
3018 		cpi->hba_eng_cnt = 0;
3019 		cpi->max_lun = 7;
3020 		cpi->bus_id = cam_sim_bus(sim);
3021 		/* XXX Report base speed more accurately for FC/SAS, etc.*/
3022 		if (mpt->is_fc) {
3023 			/* XXX SHOULD BE BASED UPON IOC FACTS XXX XXX */
3024 			cpi->max_target = 255;
3025 			cpi->hba_misc = PIM_NOBUSRESET;
3026 			cpi->initiator_id = mpt->mpt_ini_id;
3027 			cpi->base_transfer_speed = 100000;
3028 			cpi->hba_inquiry = PI_TAG_ABLE;
3029 		} else if (mpt->is_sas) {
3030 			cpi->max_target = 63;	/* XXX */
3031 			cpi->hba_misc = PIM_NOBUSRESET;
3032 			cpi->initiator_id = mpt->mpt_ini_id;
3033 			cpi->base_transfer_speed = 300000;
3034 			cpi->hba_inquiry = PI_TAG_ABLE;
3035 		} else {
3036 			cpi->max_target = 15;
3037 			cpi->hba_misc = PIM_SEQSCAN;
3038 			cpi->initiator_id = mpt->mpt_ini_id;
3039 			cpi->base_transfer_speed = 3300;
3040 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3041 		}
3042 
3043 		/*
3044 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3045 		 * wide, restrict it to one lun and have it *not* be a bus
3046 		 * that can have a SCSI bus reset.
3047 		 */
3048 		if (raid_passthru) {
3049 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3050 			cpi->initiator_id = cpi->max_target + 1;
3051 			cpi->max_lun = 0;
3052 			cpi->hba_misc |= PIM_NOBUSRESET;
3053 		}
3054 
3055 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3056 			cpi->hba_misc |= PIM_NOINITIATOR;
3057 		}
3058 		if ((mpt->role & MPT_ROLE_TARGET) != 0) {
3059 			cpi->target_sprt =
3060 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3061 		} else {
3062 			cpi->target_sprt = 0;
3063 		}
3064 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3065 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3066 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3067 		cpi->unit_number = cam_sim_unit(sim);
3068 		cpi->ccb_h.status = CAM_REQ_CMP;
3069 		break;
3070 	}
3071 	case XPT_EN_LUN:		/* Enable LUN as a target */
3072 	{
3073 		int result;
3074 
3075 		CAMLOCK_2_MPTLOCK(mpt);
3076 		if (ccb->cel.enable)
3077 			result = mpt_enable_lun(mpt,
3078 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3079 		else
3080 			result = mpt_disable_lun(mpt,
3081 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3082 		MPTLOCK_2_CAMLOCK(mpt);
3083 		if (result == 0) {
3084 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3085 		} else {
3086 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3087 		}
3088 		break;
3089 	}
3090 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3091 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3092 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3093 	{
3094 		tgt_resource_t *trtp;
3095 		lun_id_t lun = ccb->ccb_h.target_lun;
3096 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3097 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3098 		ccb->ccb_h.flags = 0;
3099 
3100 		if (lun == CAM_LUN_WILDCARD) {
3101 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3102 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3103 				break;
3104 			}
3105 			trtp = &mpt->trt_wildcard;
3106 		} else if (lun >= MPT_MAX_LUNS) {
3107 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3108 			break;
3109 		} else {
3110 			trtp = &mpt->trt[lun];
3111 		}
3112 		CAMLOCK_2_MPTLOCK(mpt);
3113 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3114 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3115 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3116 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3117 			    sim_links.stqe);
3118 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3119 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3120 			    "Put FREE INOT lun %d\n", lun);
3121 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3122 			    sim_links.stqe);
3123 		} else {
3124 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3125 		}
3126 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3127 		MPTLOCK_2_CAMLOCK(mpt);
3128 		break;
3129 	}
3130 	case XPT_CONT_TARGET_IO:
3131 		CAMLOCK_2_MPTLOCK(mpt);
3132 		mpt_target_start_io(mpt, ccb);
3133 		MPTLOCK_2_CAMLOCK(mpt);
3134 		break;
3135 	default:
3136 		ccb->ccb_h.status = CAM_REQ_INVALID;
3137 		break;
3138 	}
3139 	xpt_done(ccb);
3140 }
3141 
3142 static int
3143 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3144 {
3145 #ifdef	CAM_NEW_TRAN_CODE
3146 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3147 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3148 #endif
3149 	target_id_t tgt;
3150 	uint8_t dval, pval, oval;
3151 	int rv;
3152 
3153 	if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3154 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3155 			return (-1);
3156 		}
3157 	} else {
3158 		tgt = cts->ccb_h.target_id;
3159 	}
3160 
3161 	/*
3162 	 * XXX: We aren't looking Port Page 2 BIOS settings here.
3163 	 * XXX: For goal settings, we pick the max from port page 0
3164 	 *
3165 	 * For current settings we read the current settings out from
3166 	 * device page 0 for that target.
3167 	 */
3168 	if (IS_CURRENT_SETTINGS(cts)) {
3169 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3170 		dval = 0;
3171 
3172 		CAMLOCK_2_MPTLOCK(mpt);
3173 		tmp = mpt->mpt_dev_page0[tgt];
3174 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3175 		    sizeof(tmp), FALSE, 5000);
3176 		if (rv) {
3177 			MPTLOCK_2_CAMLOCK(mpt);
3178 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3179 			return (rv);
3180 		}
3181 		MPTLOCK_2_CAMLOCK(mpt);
3182 		if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) {
3183 			dval |= DP_WIDE;
3184 		}
3185 		if (mpt->mpt_disc_enable & (1 << tgt)) {
3186 			dval |= DP_DISC_ENABLE;
3187 		}
3188 		if (mpt->mpt_tag_enable & (1 << tgt)) {
3189 			dval |= DP_TQING_ENABLE;
3190 		}
3191 		oval = (tmp.NegotiatedParameters >> 16) & 0xff;
3192 		pval = (tmp.NegotiatedParameters >>  8) & 0xff;
3193 		mpt->mpt_dev_page0[tgt] = tmp;
3194 	} else {
3195 		/*
3196 		 * XXX: Just make theoretical maximum.
3197 		 */
3198 		dval = DP_WIDE|DP_DISC|DP_TQING;
3199 		oval = (mpt->mpt_port_page0.Capabilities >> 16);
3200 		pval = (mpt->mpt_port_page0.Capabilities >>  8);
3201 	}
3202 #ifndef	CAM_NEW_TRAN_CODE
3203 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3204 	if (dval & DP_DISC_ENABLE) {
3205 		cts->flags |= CCB_TRANS_DISC_ENB;
3206 	}
3207 	if (dval & DP_TQING_ENABLE) {
3208 		cts->flags |= CCB_TRANS_TAG_ENB;
3209 	}
3210 	if (dval & DP_WIDE) {
3211 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3212 	} else {
3213 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3214 	}
3215 	cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
3216 	    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3217 	if (oval) {
3218 		cts->sync_period = pval;
3219 		cts->sync_offset = oval;
3220 		cts->valid |=
3221 		    CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID;
3222 	}
3223 #else
3224 	cts->protocol = PROTO_SCSI;
3225 	cts->protocol_version = SCSI_REV_2;
3226 	cts->transport = XPORT_SPI;
3227 	cts->transport_version = 2;
3228 
3229 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
3230 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
3231 	if (dval & DP_DISC_ENABLE) {
3232 		spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3233 	}
3234 	if (dval & DP_TQING_ENABLE) {
3235 		scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3236 	}
3237 	if (oval && pval) {
3238 		spi->sync_offset = oval;
3239 		spi->sync_period = pval;
3240 		spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3241 		spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3242 	}
3243 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3244 	if (dval & DP_WIDE) {
3245 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3246 	} else {
3247 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3248 	}
3249 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3250 		scsi->valid = CTS_SCSI_VALID_TQ;
3251 		spi->valid |= CTS_SPI_VALID_DISC;
3252 	} else {
3253 		scsi->valid = 0;
3254 	}
3255 #endif
3256 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3257 	    "mpt_get_spi_settings[%d]: %s 0x%x period 0x%x offset %d\n", tgt,
3258 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3259 	return (0);
3260 }
3261 
3262 static void
3263 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3264 {
3265 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3266 
3267 	ptr = &mpt->mpt_dev_page1[tgt];
3268 	if (onoff) {
3269 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3270 	} else {
3271 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3272 	}
3273 }
3274 
3275 static void
3276 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3277 {
3278 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3279 
3280 	ptr = &mpt->mpt_dev_page1[tgt];
3281 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3282 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3283 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3284 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3285 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3286 	ptr->RequestedParameters |= (period << 8) | (offset << 16);
3287 	if (period < 0xa) {
3288 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3289 	}
3290 	if (period < 0x9) {
3291 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3292 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3293 	}
3294 }
3295 
3296 static int
3297 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3298 {
3299 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3300 	int rv;
3301 
3302 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3303 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3304 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3305 	tmp = mpt->mpt_dev_page1[tgt];
3306 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3307 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3308 	if (rv) {
3309 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3310 		return (-1);
3311 	}
3312 	return (0);
3313 }
3314 
3315 static void
3316 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3317 {
3318 #if __FreeBSD_version >= 500000
3319 	cam_calc_geometry(ccg, extended);
3320 #else
3321 	uint32_t size_mb;
3322 	uint32_t secs_per_cylinder;
3323 
3324 	if (ccg->block_size == 0) {
3325 		ccg->ccb_h.status = CAM_REQ_INVALID;
3326 		return;
3327 	}
3328 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3329 	if (size_mb > 1024 && extended) {
3330 		ccg->heads = 255;
3331 		ccg->secs_per_track = 63;
3332 	} else {
3333 		ccg->heads = 64;
3334 		ccg->secs_per_track = 32;
3335 	}
3336 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3337 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3338 	ccg->ccb_h.status = CAM_REQ_CMP;
3339 #endif
3340 }
3341 
3342 /****************************** Timeout Recovery ******************************/
3343 static int
3344 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3345 {
3346 	int error;
3347 
3348 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3349 	    &mpt->recovery_thread, /*flags*/0,
3350 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3351 	return (error);
3352 }
3353 
3354 static void
3355 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3356 {
3357 	if (mpt->recovery_thread == NULL) {
3358 		return;
3359 	}
3360 	mpt->shutdwn_recovery = 1;
3361 	wakeup(mpt);
3362 	/*
3363 	 * Sleep on a slightly different location
3364 	 * for this interlock just for added safety.
3365 	 */
3366 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3367 }
3368 
3369 static void
3370 mpt_recovery_thread(void *arg)
3371 {
3372 	struct mpt_softc *mpt;
3373 
3374 #if __FreeBSD_version >= 500000
3375 	mtx_lock(&Giant);
3376 #endif
3377 	mpt = (struct mpt_softc *)arg;
3378 	MPT_LOCK(mpt);
3379 	for (;;) {
3380 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3381 			if (mpt->shutdwn_recovery == 0) {
3382 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3383 			}
3384 		}
3385 		if (mpt->shutdwn_recovery != 0) {
3386 			break;
3387 		}
3388 		mpt_recover_commands(mpt);
3389 	}
3390 	mpt->recovery_thread = NULL;
3391 	wakeup(&mpt->recovery_thread);
3392 	MPT_UNLOCK(mpt);
3393 #if __FreeBSD_version >= 500000
3394 	mtx_unlock(&Giant);
3395 #endif
3396 	kthread_exit(0);
3397 }
3398 
3399 static int
3400 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3401     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3402 {
3403 	MSG_SCSI_TASK_MGMT *tmf_req;
3404 	int		    error;
3405 
3406 	/*
3407 	 * Wait for any current TMF request to complete.
3408 	 * We're only allowed to issue one TMF at a time.
3409 	 */
3410 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3411 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3412 	if (error != 0) {
3413 		mpt_reset(mpt, TRUE);
3414 		return (ETIMEDOUT);
3415 	}
3416 
3417 	mpt_assign_serno(mpt, mpt->tmf_req);
3418 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3419 
3420 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3421 	memset(tmf_req, 0, sizeof(*tmf_req));
3422 	tmf_req->TargetID = target;
3423 	tmf_req->Bus = channel;
3424 	tmf_req->ChainOffset = 0;
3425 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3426 	tmf_req->Reserved = 0;
3427 	tmf_req->TaskType = type;
3428 	tmf_req->Reserved1 = 0;
3429 	tmf_req->MsgFlags = flags;
3430 	tmf_req->MsgContext =
3431 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3432 	memset(&tmf_req->LUN, 0,
3433 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
3434 	if (lun > 256) {
3435 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3436 		tmf_req->LUN[1] = lun & 0xff;
3437 	} else {
3438 		tmf_req->LUN[1] = lun;
3439 	}
3440 	tmf_req->TaskMsgContext = abort_ctx;
3441 
3442 	mpt_lprt(mpt, MPT_PRT_INFO,
3443 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3444 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3445 	if (mpt->verbose > MPT_PRT_DEBUG) {
3446 		mpt_print_request(tmf_req);
3447 	}
3448 
3449 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3450 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3451 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3452 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3453 	if (error != MPT_OK) {
3454 		mpt_reset(mpt, TRUE);
3455 	}
3456 	return (error);
3457 }
3458 
3459 /*
3460  * When a command times out, it is placed on the requeust_timeout_list
3461  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3462  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3463  * the timedout transactions.  The next TMF is issued either by the
3464  * completion handler of the current TMF waking our recovery thread,
3465  * or the TMF timeout handler causing a hard reset sequence.
3466  */
3467 static void
3468 mpt_recover_commands(struct mpt_softc *mpt)
3469 {
3470 	request_t	   *req;
3471 	union ccb	   *ccb;
3472 	int		    error;
3473 
3474 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3475 		/*
3476 		 * No work to do- leave.
3477 		 */
3478 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3479 		return;
3480 	}
3481 
3482 	/*
3483 	 * Flush any commands whose completion coincides with their timeout.
3484 	 */
3485 	mpt_intr(mpt);
3486 
3487 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3488 		/*
3489 		 * The timedout commands have already
3490 		 * completed.  This typically means
3491 		 * that either the timeout value was on
3492 		 * the hairy edge of what the device
3493 		 * requires or - more likely - interrupts
3494 		 * are not happening.
3495 		 */
3496 		mpt_prt(mpt, "Timedout requests already complete. "
3497 		    "Interrupts may not be functioning.\n");
3498 		mpt_enable_ints(mpt);
3499 		return;
3500 	}
3501 
3502 	/*
3503 	 * We have no visibility into the current state of the
3504 	 * controller, so attempt to abort the commands in the
3505 	 * order they timed-out. For initiator commands, we
3506 	 * depend on the reply handler pulling requests off
3507 	 * the timeout list.
3508 	 */
3509 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3510 		uint16_t status;
3511 		uint8_t response;
3512 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3513 
3514 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3515 		    req, req->serno, hdrp->Function);
3516 		ccb = req->ccb;
3517 		if (ccb == NULL) {
3518 			mpt_prt(mpt, "null ccb in timed out request. "
3519 			    "Resetting Controller.\n");
3520 			mpt_reset(mpt, TRUE);
3521 			continue;
3522 		}
3523 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3524 
3525 		/*
3526 		 * Check to see if this is not an initiator command and
3527 		 * deal with it differently if it is.
3528 		 */
3529 		switch (hdrp->Function) {
3530 		case MPI_FUNCTION_SCSI_IO_REQUEST:
3531 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
3532 			break;
3533 		default:
3534 			/*
3535 			 * XXX: FIX ME: need to abort target assists...
3536 			 */
3537 			mpt_prt(mpt, "just putting it back on the pend q\n");
3538 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
3539 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
3540 			    links);
3541 			continue;
3542 		}
3543 
3544 		error = mpt_scsi_send_tmf(mpt,
3545 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3546 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
3547 		    htole32(req->index | scsi_io_handler_id), TRUE);
3548 
3549 		if (error != 0) {
3550 			/*
3551 			 * mpt_scsi_send_tmf hard resets on failure, so no
3552 			 * need to do so here.  Our queue should be emptied
3553 			 * by the hard reset.
3554 			 */
3555 			continue;
3556 		}
3557 
3558 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
3559 		    REQ_STATE_DONE, TRUE, 500);
3560 
3561 		status = mpt->tmf_req->IOCStatus;
3562 		response = mpt->tmf_req->ResponseCode;
3563 		mpt->tmf_req->state = REQ_STATE_FREE;
3564 
3565 		if (error != 0) {
3566 			/*
3567 			 * If we've errored out,, reset the controller.
3568 			 */
3569 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
3570 			    "Resetting controller\n");
3571 			mpt_reset(mpt, TRUE);
3572 			continue;
3573 		}
3574 
3575 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
3576 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
3577 			    "Resetting controller.\n", status);
3578 			mpt_reset(mpt, TRUE);
3579 			continue;
3580 		}
3581 
3582 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
3583 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
3584 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
3585 			    "Resetting controller.\n", response);
3586 			mpt_reset(mpt, TRUE);
3587 			continue;
3588 		}
3589 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
3590 	}
3591 }
3592 
3593 /************************ Target Mode Support ****************************/
3594 static void
3595 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
3596 {
3597 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3598 	PTR_SGE_TRANSACTION32 tep;
3599 	PTR_SGE_SIMPLE32 se;
3600 	bus_addr_t paddr;
3601 
3602 	paddr = req->req_pbuf;
3603 	paddr += MPT_RQSL(mpt);
3604 
3605 	fc = req->req_vbuf;
3606 	memset(fc, 0, MPT_REQUEST_AREA);
3607 	fc->BufferCount = 1;
3608 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3609 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
3610 
3611 	/*
3612 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
3613 	 * consist of a TE SGL element (with details length of zero)
3614 	 * followe by a SIMPLE SGL element which holds the address
3615 	 * of the buffer.
3616 	 */
3617 
3618 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3619 
3620 	tep->ContextSize = 4;
3621 	tep->Flags = 0;
3622 	tep->TransactionContext[0] = htole32(ioindex);
3623 
3624 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3625 	se->FlagsLength =
3626 		MPI_SGE_FLAGS_HOST_TO_IOC	|
3627 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
3628 		MPI_SGE_FLAGS_LAST_ELEMENT	|
3629 		MPI_SGE_FLAGS_END_OF_LIST	|
3630 		MPI_SGE_FLAGS_END_OF_BUFFER;
3631 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
3632 	se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3633 	se->Address = (uint32_t) paddr;
3634 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3635 	    "add ELS index %d ioindex %d for %p:%u\n",
3636 	    req->index, ioindex, req, req->serno);
3637 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
3638 	    ("mpt_fc_post_els: request not locked"));
3639 	mpt_send_cmd(mpt, req);
3640 }
3641 
3642 static void
3643 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3644 {
3645 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3646 	PTR_CMD_BUFFER_DESCRIPTOR cb;
3647 	bus_addr_t paddr;
3648 
3649 	paddr = req->req_pbuf;
3650 	paddr += MPT_RQSL(mpt);
3651 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3652 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
3653 
3654 	fc = req->req_vbuf;
3655 	fc->BufferCount = 1;
3656 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3657 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3658 
3659 	cb = &fc->Buffer[0];
3660 	cb->IoIndex = htole16(ioindex);
3661 	cb->u.PhysicalAddress32 = (U32) paddr;
3662 
3663 	mpt_check_doorbell(mpt);
3664 	mpt_send_cmd(mpt, req);
3665 }
3666 
3667 static int
3668 mpt_add_els_buffers(struct mpt_softc *mpt)
3669 {
3670 	int i;
3671 
3672 	if (mpt->is_fc == 0) {
3673 		return (TRUE);
3674 	}
3675 
3676 	if (mpt->els_cmds_allocated) {
3677 		return (TRUE);
3678 	}
3679 
3680 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
3681 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3682 
3683 	if (mpt->els_cmd_ptrs == NULL) {
3684 		return (FALSE);
3685 	}
3686 
3687 	/*
3688 	 * Feed the chip some ELS buffer resources
3689 	 */
3690 	for (i = 0; i < MPT_MAX_ELS; i++) {
3691 		request_t *req = mpt_get_request(mpt, FALSE);
3692 		if (req == NULL) {
3693 			break;
3694 		}
3695 		req->state |= REQ_STATE_LOCKED;
3696 		mpt->els_cmd_ptrs[i] = req;
3697 		mpt_fc_post_els(mpt, req, i);
3698 	}
3699 
3700 	if (i == 0) {
3701 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
3702 		free(mpt->els_cmd_ptrs, M_DEVBUF);
3703 		mpt->els_cmd_ptrs = NULL;
3704 		return (FALSE);
3705 	}
3706 	if (i != MPT_MAX_ELS) {
3707 		mpt_lprt(mpt, MPT_PRT_INFO,
3708 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
3709 	}
3710 	mpt->els_cmds_allocated = i;
3711 	return(TRUE);
3712 }
3713 
3714 static int
3715 mpt_add_target_commands(struct mpt_softc *mpt)
3716 {
3717 	int i, max;
3718 
3719 	if (mpt->tgt_cmd_ptrs) {
3720 		return (TRUE);
3721 	}
3722 
3723 	max = MPT_MAX_REQUESTS(mpt) >> 1;
3724 	if (max > mpt->mpt_max_tgtcmds) {
3725 		max = mpt->mpt_max_tgtcmds;
3726 	}
3727 	mpt->tgt_cmd_ptrs =
3728 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
3729 	if (mpt->tgt_cmd_ptrs == NULL) {
3730 		mpt_prt(mpt,
3731 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
3732 		return (FALSE);
3733 	}
3734 
3735 	for (i = 0; i < max; i++) {
3736 		request_t *req;
3737 
3738 		req = mpt_get_request(mpt, FALSE);
3739 		if (req == NULL) {
3740 			break;
3741 		}
3742 		req->state |= REQ_STATE_LOCKED;
3743 		mpt->tgt_cmd_ptrs[i] = req;
3744 		mpt_post_target_command(mpt, req, i);
3745 	}
3746 
3747 
3748 	if (i == 0) {
3749 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
3750 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
3751 		mpt->tgt_cmd_ptrs = NULL;
3752 		return (FALSE);
3753 	}
3754 
3755 	mpt->tgt_cmds_allocated = i;
3756 
3757 	if (i < max) {
3758 		mpt_lprt(mpt, MPT_PRT_INFO,
3759 		    "added %d of %d target bufs\n", i, max);
3760 	}
3761 	return (i);
3762 }
3763 
3764 static void
3765 mpt_free_els_buffers(struct mpt_softc *mpt)
3766 {
3767 	mpt_prt(mpt, "fix me! need to implement mpt_free_els_buffers");
3768 }
3769 
3770 static void
3771 mpt_free_target_commands(struct mpt_softc *mpt)
3772 {
3773 	mpt_prt(mpt, "fix me! need to implement mpt_free_target_commands");
3774 }
3775 
3776 
3777 static int
3778 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3779 {
3780 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3781 		mpt->twildcard = 1;
3782 	} else if (lun >= MPT_MAX_LUNS) {
3783 		return (EINVAL);
3784 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3785 		return (EINVAL);
3786 	}
3787 	if (mpt->tenabled == 0) {
3788 		/*
3789 		 * Try to add some target command resources
3790 		 */
3791 		if (mpt_add_target_commands(mpt) == FALSE) {
3792 			mpt_free_els_buffers(mpt);
3793 			return (ENOMEM);
3794 		}
3795 		if (mpt->is_fc) {
3796 			(void) mpt_fc_reset_link(mpt, 0);
3797 		}
3798 		mpt->tenabled = 1;
3799 	}
3800 	if (lun == CAM_LUN_WILDCARD) {
3801 		mpt->trt_wildcard.enabled = 1;
3802 	} else {
3803 		mpt->trt[lun].enabled = 1;
3804 	}
3805 	return (0);
3806 }
3807 
3808 static int
3809 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3810 {
3811 	int i;
3812 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3813 		mpt->twildcard = 0;
3814 	} else if (lun >= MPT_MAX_LUNS) {
3815 		return (EINVAL);
3816 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3817 		return (EINVAL);
3818 	}
3819 	if (lun == CAM_LUN_WILDCARD) {
3820 		mpt->trt_wildcard.enabled = 0;
3821 	} else {
3822 		mpt->trt[lun].enabled = 0;
3823 	}
3824 	for (i = 0; i < MPT_MAX_LUNS; i++) {
3825 		if (mpt->trt[lun].enabled) {
3826 			break;
3827 		}
3828 	}
3829 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
3830 		mpt_free_els_buffers(mpt);
3831 		mpt_free_target_commands(mpt);
3832 		if (mpt->is_fc) {
3833 			(void) mpt_fc_reset_link(mpt, 0);
3834 		}
3835 		mpt->tenabled = 0;
3836 	}
3837 	return (0);
3838 }
3839 
3840 /*
3841  * Called with MPT lock held
3842  */
3843 static void
3844 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
3845 {
3846 	struct ccb_scsiio *csio = &ccb->csio;
3847 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
3848 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
3849 
3850 	switch (tgt->state) {
3851 	case TGT_STATE_IN_CAM:
3852 		break;
3853 	case TGT_STATE_MOVING_DATA:
3854 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3855 		xpt_freeze_simq(mpt->sim, 1);
3856 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3857 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3858 		MPTLOCK_2_CAMLOCK(mpt);
3859 		xpt_done(ccb);
3860 		CAMLOCK_2_MPTLOCK(mpt);
3861 		return;
3862 	default:
3863 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
3864 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
3865 		mpt_tgt_dump_req_state(mpt, cmd_req);
3866 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3867 		MPTLOCK_2_CAMLOCK(mpt);
3868 		xpt_done(ccb);
3869 		CAMLOCK_2_MPTLOCK(mpt);
3870 		return;
3871 	}
3872 
3873 	if (csio->dxfer_len) {
3874 		bus_dmamap_callback_t *cb;
3875 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
3876 		request_t *req;
3877 
3878 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
3879 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
3880 
3881 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
3882 			if (mpt->outofbeer == 0) {
3883 				mpt->outofbeer = 1;
3884 				xpt_freeze_simq(mpt->sim, 1);
3885 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
3886 			}
3887 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3888 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3889 			MPTLOCK_2_CAMLOCK(mpt);
3890 			xpt_done(ccb);
3891 			CAMLOCK_2_MPTLOCK(mpt);
3892 			return;
3893 		}
3894 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
3895 		if (sizeof (bus_addr_t) > 4) {
3896 			cb = mpt_execute_req_a64;
3897 		} else {
3898 			cb = mpt_execute_req;
3899 		}
3900 
3901 		req->ccb = ccb;
3902 		ccb->ccb_h.ccb_req_ptr = req;
3903 
3904 		/*
3905 		 * Record the currently active ccb and the
3906 		 * request for it in our target state area.
3907 		 */
3908 		tgt->ccb = ccb;
3909 		tgt->req = req;
3910 
3911 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
3912 		ta = req->req_vbuf;
3913 
3914 		if (mpt->is_sas == 0) {
3915 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
3916 			     cmd_req->req_vbuf;
3917 			ta->QueueTag = ssp->InitiatorTag;
3918 		} else if (mpt->is_spi) {
3919 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
3920 			     cmd_req->req_vbuf;
3921 			ta->QueueTag = sp->Tag;
3922 		}
3923 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
3924 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3925 		ta->ReplyWord = htole32(tgt->reply_desc);
3926 		if (csio->ccb_h.target_lun > 256) {
3927 			ta->LUN[0] =
3928 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
3929 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
3930 		} else {
3931 			ta->LUN[1] = csio->ccb_h.target_lun;
3932 		}
3933 
3934 		ta->RelativeOffset = tgt->bytes_xfered;
3935 		ta->DataLength = ccb->csio.dxfer_len;
3936 		if (ta->DataLength > tgt->resid) {
3937 			ta->DataLength = tgt->resid;
3938 		}
3939 
3940 		/*
3941 		 * XXX Should be done after data transfer completes?
3942 		 */
3943 		tgt->resid -= csio->dxfer_len;
3944 		tgt->bytes_xfered += csio->dxfer_len;
3945 
3946 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
3947 			ta->TargetAssistFlags |=
3948 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
3949 		}
3950 
3951 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
3952 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
3953 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
3954 			ta->TargetAssistFlags |=
3955 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
3956 		}
3957 #endif
3958 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
3959 
3960 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3961 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
3962 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
3963 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
3964 
3965 		MPTLOCK_2_CAMLOCK(mpt);
3966 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
3967 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
3968 				int error;
3969 				int s = splsoftvm();
3970 				error = bus_dmamap_load(mpt->buffer_dmat,
3971 				    req->dmap, csio->data_ptr, csio->dxfer_len,
3972 				    cb, req, 0);
3973 				splx(s);
3974 				if (error == EINPROGRESS) {
3975 					xpt_freeze_simq(mpt->sim, 1);
3976 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3977 				}
3978 			} else {
3979 				/*
3980 				 * We have been given a pointer to single
3981 				 * physical buffer.
3982 				 */
3983 				struct bus_dma_segment seg;
3984 				seg.ds_addr = (bus_addr_t)
3985 				    (vm_offset_t)csio->data_ptr;
3986 				seg.ds_len = csio->dxfer_len;
3987 				(*cb)(req, &seg, 1, 0);
3988 			}
3989 		} else {
3990 			/*
3991 			 * We have been given a list of addresses.
3992 			 * This case could be easily supported but they are not
3993 			 * currently generated by the CAM subsystem so there
3994 			 * is no point in wasting the time right now.
3995 			 */
3996 			struct bus_dma_segment *sgs;
3997 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
3998 				(*cb)(req, NULL, 0, EFAULT);
3999 			} else {
4000 				/* Just use the segments provided */
4001 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4002 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4003 			}
4004 		}
4005 		CAMLOCK_2_MPTLOCK(mpt);
4006 	} else {
4007 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4008 
4009 		/*
4010 		 * XXX: I don't know why this seems to happen, but
4011 		 * XXX: completing the CCB seems to make things happy.
4012 		 * XXX: This seems to happen if the initiator requests
4013 		 * XXX: enough data that we have to do multiple CTIOs.
4014 		 */
4015 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4016 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4017 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4018 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4019 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4020 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4021 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4022 			MPTLOCK_2_CAMLOCK(mpt);
4023 			xpt_done(ccb);
4024 			CAMLOCK_2_MPTLOCK(mpt);
4025 			return;
4026 		}
4027 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4028 			sp = sense;
4029 			memcpy(sp, &csio->sense_data,
4030 			   min(csio->sense_len, MPT_SENSE_SIZE));
4031 		}
4032 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4033 	}
4034 }
4035 
4036 /*
4037  * Abort queued up CCBs
4038  */
4039 static cam_status
4040 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4041 {
4042 	struct mpt_hdr_stailq *lp;
4043 	struct ccb_hdr *srch;
4044 	int found = 0;
4045 	union ccb *accb = ccb->cab.abort_ccb;
4046 	tgt_resource_t *trtp;
4047 
4048 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4049 
4050 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4051 		trtp = &mpt->trt_wildcard;
4052 	} else {
4053 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4054 	}
4055 
4056 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4057 		lp = &trtp->atios;
4058 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4059 		lp = &trtp->inots;
4060 	} else {
4061 		return (CAM_REQ_INVALID);
4062 	}
4063 
4064 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4065 		if (srch == &accb->ccb_h) {
4066 			found = 1;
4067 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4068 			break;
4069 		}
4070 	}
4071 	if (found) {
4072 		accb->ccb_h.status = CAM_REQ_ABORTED;
4073 		xpt_done(accb);
4074 		return (CAM_REQ_CMP);
4075 	}
4076 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4077 	return (CAM_PATH_INVALID);
4078 }
4079 
4080 /*
4081  * Ask the MPT to abort the current target command
4082  */
4083 static int
4084 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4085 {
4086 	int error;
4087 	request_t *req;
4088 	PTR_MSG_TARGET_MODE_ABORT abtp;
4089 
4090 	req = mpt_get_request(mpt, FALSE);
4091 	if (req == NULL) {
4092 		return (-1);
4093 	}
4094 	abtp = req->req_vbuf;
4095 	memset(abtp, 0, sizeof (*abtp));
4096 
4097 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4098 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4099 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4100 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4101 	error = 0;
4102 	if (mpt->is_fc || mpt->is_sas) {
4103 		mpt_send_cmd(mpt, req);
4104 	} else {
4105 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4106 	}
4107 	return (error);
4108 }
4109 
4110 /*
4111  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4112  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4113  * FC929 to set bogus FC_RSP fields (nonzero residuals
4114  * but w/o RESID fields set). This causes QLogic initiators
4115  * to think maybe that a frame was lost.
4116  *
4117  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4118  * we use allocated requests to do TARGET_ASSIST and we
4119  * need to know when to release them.
4120  */
4121 
4122 static void
4123 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4124     uint8_t status, uint8_t const *sense_data)
4125 {
4126 	uint8_t *cmd_vbuf;
4127 	mpt_tgt_state_t *tgt;
4128 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4129 	request_t *req;
4130 	bus_addr_t paddr;
4131 	int resplen = 0;
4132 
4133 	cmd_vbuf = cmd_req->req_vbuf;
4134 	cmd_vbuf += MPT_RQSL(mpt);
4135 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4136 
4137 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4138 		if (mpt->outofbeer == 0) {
4139 			mpt->outofbeer = 1;
4140 			xpt_freeze_simq(mpt->sim, 1);
4141 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4142 		}
4143 		if (ccb) {
4144 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4145 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4146 			MPTLOCK_2_CAMLOCK(mpt);
4147 			xpt_done(ccb);
4148 			CAMLOCK_2_MPTLOCK(mpt);
4149 		} else {
4150 			mpt_prt(mpt,
4151 			    "XXXX could not allocate status req- dropping\n");
4152 		}
4153 		return;
4154 	}
4155 	req->ccb = ccb;
4156 	if (ccb) {
4157 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4158 		ccb->ccb_h.ccb_req_ptr = req;
4159 	}
4160 
4161 	/*
4162 	 * Record the currently active ccb, if any, and the
4163 	 * request for it in our target state area.
4164 	 */
4165 	tgt->ccb = ccb;
4166 	tgt->req = req;
4167 	tgt->state = TGT_STATE_SENDING_STATUS;
4168 
4169 	tp = req->req_vbuf;
4170 	paddr = req->req_pbuf;
4171 	paddr += MPT_RQSL(mpt);
4172 
4173 	memset(tp, 0, sizeof (*tp));
4174 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4175 	if (mpt->is_fc) {
4176 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4177 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4178 		uint8_t *sts_vbuf;
4179 		uint32_t *rsp;
4180 
4181 		sts_vbuf = req->req_vbuf;
4182 		sts_vbuf += MPT_RQSL(mpt);
4183 		rsp = (uint32_t *) sts_vbuf;
4184 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4185 
4186 		/*
4187 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4188 		 * It has to be big-endian in memory and is organized
4189 		 * in 32 bit words, which are much easier to deal with
4190 		 * as words which are swizzled as needed.
4191 		 *
4192 		 * All we're filling here is the FC_RSP payload.
4193 		 * We may just have the chip synthesize it if
4194 		 * we have no residual and an OK status.
4195 		 *
4196 		 */
4197 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4198 
4199 		rsp[2] = status;
4200 		if (tgt->resid) {
4201 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4202 			rsp[3] = htobe32(tgt->resid);
4203 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4204 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4205 #endif
4206 		}
4207 		if (status == SCSI_STATUS_CHECK_COND) {
4208 			int i;
4209 
4210 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4211 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4212 			if (sense_data) {
4213 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4214 			} else {
4215 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4216 				    "TION but no sense data?\n");
4217 				memset(&rsp, 0, MPT_SENSE_SIZE);
4218 			}
4219 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4220 				rsp[i] = htobe32(rsp[i]);
4221 			}
4222 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4223 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4224 #endif
4225 		}
4226 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4227 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4228 #endif
4229 		rsp[2] = htobe32(rsp[2]);
4230 	} else if (mpt->is_sas) {
4231 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4232 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4233 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4234 	} else {
4235 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4236 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4237 		tp->StatusCode = status;
4238 		tp->QueueTag = htole16(sp->Tag);
4239 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4240 	}
4241 
4242 	tp->ReplyWord = htole32(tgt->reply_desc);
4243 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4244 
4245 #ifdef	WE_CAN_USE_AUTO_REPOST
4246 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4247 #endif
4248 	if (status == SCSI_STATUS_OK && resplen == 0) {
4249 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4250 	} else {
4251 		tp->StatusDataSGE.u.Address32 = (uint32_t) paddr;
4252 		tp->StatusDataSGE.FlagsLength =
4253 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4254 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4255 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4256 			MPI_SGE_FLAGS_END_OF_LIST	|
4257 			MPI_SGE_FLAGS_END_OF_BUFFER;
4258 		tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
4259 		tp->StatusDataSGE.FlagsLength |= resplen;
4260 	}
4261 
4262 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4263 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4264 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4265 	    req->serno, tgt->resid);
4266 	if (ccb) {
4267 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4268 		ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
4269 	}
4270 	mpt_send_cmd(mpt, req);
4271 }
4272 
4273 static void
4274 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4275     tgt_resource_t *trtp, int init_id)
4276 {
4277 	struct ccb_immed_notify *inot;
4278 	mpt_tgt_state_t *tgt;
4279 
4280 	tgt = MPT_TGT_STATE(mpt, req);
4281 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4282 	if (inot == NULL) {
4283 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4284 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4285 		return;
4286 	}
4287 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4288 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4289 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4290 
4291 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4292 	inot->sense_len = 0;
4293 	memset(inot->message_args, 0, sizeof (inot->message_args));
4294 	inot->initiator_id = init_id;	/* XXX */
4295 
4296 	/*
4297 	 * This is a somewhat grotesque attempt to map from task management
4298 	 * to old style SCSI messages. God help us all.
4299 	 */
4300 	switch (fc) {
4301 	case MPT_ABORT_TASK_SET:
4302 		inot->message_args[0] = MSG_ABORT_TAG;
4303 		break;
4304 	case MPT_CLEAR_TASK_SET:
4305 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4306 		break;
4307 	case MPT_TARGET_RESET:
4308 		inot->message_args[0] = MSG_TARGET_RESET;
4309 		break;
4310 	case MPT_CLEAR_ACA:
4311 		inot->message_args[0] = MSG_CLEAR_ACA;
4312 		break;
4313 	case MPT_TERMINATE_TASK:
4314 		inot->message_args[0] = MSG_ABORT_TAG;
4315 		break;
4316 	default:
4317 		inot->message_args[0] = MSG_NOOP;
4318 		break;
4319 	}
4320 	tgt->ccb = (union ccb *) inot;
4321 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4322 	MPTLOCK_2_CAMLOCK(mpt);
4323 	xpt_done((union ccb *)inot);
4324 	CAMLOCK_2_MPTLOCK(mpt);
4325 }
4326 
4327 static void
4328 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4329 {
4330 	struct ccb_accept_tio *atiop;
4331 	lun_id_t lun;
4332 	int tag_action = 0;
4333 	mpt_tgt_state_t *tgt;
4334 	tgt_resource_t *trtp = NULL;
4335 	U8 *lunptr;
4336 	U8 *vbuf;
4337 	U16 itag;
4338 	U16 ioindex;
4339 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4340 	uint8_t *cdbp;
4341 
4342 	/*
4343 	 * First, DMA sync the received command- which is in the *request*
4344 	 * phys area.
4345 	 * XXX: We could optimize this for a range
4346 	 */
4347 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
4348 	    BUS_DMASYNC_POSTREAD);
4349 
4350 	/*
4351 	 * Stash info for the current command where we can get at it later.
4352 	 */
4353 	vbuf = req->req_vbuf;
4354 	vbuf += MPT_RQSL(mpt);
4355 
4356 	/*
4357 	 * Get our state pointer set up.
4358 	 */
4359 	tgt = MPT_TGT_STATE(mpt, req);
4360 	if (tgt->state != TGT_STATE_LOADED) {
4361 		mpt_tgt_dump_req_state(mpt, req);
4362 		panic("bad target state in mpt_scsi_tgt_atio");
4363 	}
4364 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4365 	tgt->state = TGT_STATE_IN_CAM;
4366 	tgt->reply_desc = reply_desc;
4367 	ioindex = GET_IO_INDEX(reply_desc);
4368 
4369 	if (mpt->is_fc) {
4370 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4371 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4372 		if (fc->FcpCntl[2]) {
4373 			/*
4374 			 * Task Management Request
4375 			 */
4376 			switch (fc->FcpCntl[2]) {
4377 			case 0x2:
4378 				fct = MPT_ABORT_TASK_SET;
4379 				break;
4380 			case 0x4:
4381 				fct = MPT_CLEAR_TASK_SET;
4382 				break;
4383 			case 0x20:
4384 				fct = MPT_TARGET_RESET;
4385 				break;
4386 			case 0x40:
4387 				fct = MPT_CLEAR_ACA;
4388 				break;
4389 			case 0x80:
4390 				fct = MPT_TERMINATE_TASK;
4391 				break;
4392 			default:
4393 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4394 				    fc->FcpCntl[2]);
4395 				mpt_scsi_tgt_status(mpt, 0, req,
4396 				    SCSI_STATUS_OK, 0);
4397 				return;
4398 			}
4399 		} else {
4400 			switch (fc->FcpCntl[1]) {
4401 			case 0:
4402 				tag_action = MSG_SIMPLE_Q_TAG;
4403 				break;
4404 			case 1:
4405 				tag_action = MSG_HEAD_OF_Q_TAG;
4406 				break;
4407 			case 2:
4408 				tag_action = MSG_ORDERED_Q_TAG;
4409 				break;
4410 			default:
4411 				/*
4412 				 * Bah. Ignore Untagged Queing and ACA
4413 				 */
4414 				tag_action = MSG_SIMPLE_Q_TAG;
4415 				break;
4416 			}
4417 		}
4418 		tgt->resid = be32toh(fc->FcpDl);
4419 		cdbp = fc->FcpCdb;
4420 		lunptr = fc->FcpLun;
4421 		itag = be16toh(fc->OptionalOxid);
4422 	} else if (mpt->is_sas) {
4423 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4424 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4425 		cdbp = ssp->CDB;
4426 		lunptr = ssp->LogicalUnitNumber;
4427 		itag = ssp->InitiatorTag;
4428 	} else {
4429 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4430 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4431 		cdbp = sp->CDB;
4432 		lunptr = sp->LogicalUnitNumber;
4433 		itag = sp->Tag;
4434 	}
4435 
4436 	/*
4437 	 * Generate a simple lun
4438 	 */
4439 	switch (lunptr[0] & 0xc0) {
4440 	case 0x40:
4441 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4442 		break;
4443 	case 0:
4444 		lun = lunptr[1];
4445 		break;
4446 	default:
4447 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4448 		lun = 0xffff;
4449 		break;
4450 	}
4451 
4452 	/*
4453 	 * Deal with non-enabled or bad luns here.
4454 	 */
4455 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4456 	    mpt->trt[lun].enabled == 0) {
4457 		if (mpt->twildcard) {
4458 			trtp = &mpt->trt_wildcard;
4459 		} else if (fct != MPT_NIL_TMT_VALUE) {
4460 			const uint8_t sp[MPT_SENSE_SIZE] = {
4461 				0xf0, 0, 0x5, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0x25
4462 			};
4463 			mpt_scsi_tgt_status(mpt, NULL, req,
4464 			    SCSI_STATUS_CHECK_COND, sp);
4465 			return;
4466 		}
4467 	} else {
4468 		trtp = &mpt->trt[lun];
4469 	}
4470 
4471 	/*
4472 	 * Deal with any task management
4473 	 */
4474 	if (fct != MPT_NIL_TMT_VALUE) {
4475 		if (trtp == NULL) {
4476 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
4477 			    fct);
4478 			mpt_scsi_tgt_status(mpt, 0, req,
4479 			    SCSI_STATUS_OK, 0);
4480 		} else {
4481 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
4482 			    GET_INITIATOR_INDEX(reply_desc));
4483 		}
4484 		return;
4485 	}
4486 
4487 
4488 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
4489 	if (atiop == NULL) {
4490 		mpt_lprt(mpt, MPT_PRT_WARN,
4491 		    "no ATIOs for lun %u- sending back %s\n", lun,
4492 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
4493 		mpt_scsi_tgt_status(mpt, NULL, req,
4494 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
4495 		    NULL);
4496 		return;
4497 	}
4498 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
4499 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4500 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
4501 	atiop->ccb_h.ccb_mpt_ptr = mpt;
4502 	atiop->ccb_h.status = CAM_CDB_RECVD;
4503 	atiop->ccb_h.target_lun = lun;
4504 	atiop->sense_len = 0;
4505 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
4506 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
4507 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
4508 
4509 	/*
4510 	 * The tag we construct here allows us to find the
4511 	 * original request that the command came in with.
4512 	 *
4513 	 * This way we don't have to depend on anything but the
4514 	 * tag to find things when CCBs show back up from CAM.
4515 	 */
4516 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4517 	tgt->tag_id = atiop->tag_id;
4518 	if (tag_action) {
4519 		atiop->tag_action = tag_action;
4520 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
4521 	}
4522 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4523 		int i;
4524 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
4525 		    atiop->ccb_h.target_lun);
4526 		for (i = 0; i < atiop->cdb_len; i++) {
4527 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
4528 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
4529 		}
4530 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
4531 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
4532 	}
4533 
4534 	MPTLOCK_2_CAMLOCK(mpt);
4535 	xpt_done((union ccb *)atiop);
4536 	CAMLOCK_2_MPTLOCK(mpt);
4537 }
4538 
4539 static void
4540 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
4541 {
4542 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4543 
4544 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
4545 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
4546 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
4547 	    tgt->tag_id, tgt->state);
4548 }
4549 
4550 static void
4551 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
4552 {
4553 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
4554 	    req->index, req->index, req->state);
4555 	mpt_tgt_dump_tgt_state(mpt, req);
4556 }
4557 
4558 static int
4559 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
4560     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4561 {
4562 	int dbg;
4563 	union ccb *ccb;
4564 	U16 status;
4565 
4566 	if (reply_frame == NULL) {
4567 		/*
4568 		 * Figure out what the state of the command is.
4569 		 */
4570 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4571 
4572 #ifdef	INVARIANTS
4573 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
4574 		if (tgt->req) {
4575 			mpt_req_not_spcl(mpt, tgt->req,
4576 			    "turbo scsi_tgt_reply associated req", __LINE__);
4577 		}
4578 #endif
4579 		switch(tgt->state) {
4580 		case TGT_STATE_LOADED:
4581 			/*
4582 			 * This is a new command starting.
4583 			 */
4584 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
4585 			break;
4586 		case TGT_STATE_MOVING_DATA:
4587 		{
4588 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4589 
4590 			ccb = tgt->ccb;
4591 			if (tgt->req == NULL) {
4592 				panic("mpt: turbo target reply with null "
4593 				    "associated request moving data");
4594 				/* NOTREACHED */
4595 			}
4596 			if (ccb == NULL) {
4597 				panic("mpt: turbo target reply with null "
4598 				    "associated ccb moving data");
4599 				/* NOTREACHED */
4600 			}
4601 			tgt->ccb = NULL;
4602 			tgt->nxfers++;
4603 			untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
4604 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4605 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
4606 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
4607 			/*
4608 			 * Free the Target Assist Request
4609 			 */
4610 			KASSERT(tgt->req->ccb == ccb,
4611 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
4612 			    tgt->req->serno, tgt->req->ccb));
4613 			TAILQ_REMOVE(&mpt->request_pending_list,
4614 			    tgt->req, links);
4615 			mpt_free_request(mpt, tgt->req);
4616 			tgt->req = NULL;
4617 
4618 			/*
4619 			 * Do we need to send status now? That is, are
4620 			 * we done with all our data transfers?
4621 			 */
4622 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4623 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4624 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4625 				KASSERT(ccb->ccb_h.status,
4626 				    ("zero ccb sts at %d\n", __LINE__));
4627 				tgt->state = TGT_STATE_IN_CAM;
4628 				if (mpt->outofbeer) {
4629 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4630 					mpt->outofbeer = 0;
4631 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4632 				}
4633 				MPTLOCK_2_CAMLOCK(mpt);
4634 				xpt_done(ccb);
4635 				CAMLOCK_2_MPTLOCK(mpt);
4636 				break;
4637 			}
4638 			/*
4639 			 * Otherwise, send status (and sense)
4640 			 */
4641 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4642 				sp = sense;
4643 				memcpy(sp, &ccb->csio.sense_data,
4644 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
4645 			}
4646 			mpt_scsi_tgt_status(mpt, ccb, req,
4647 			    ccb->csio.scsi_status, sp);
4648 			break;
4649 		}
4650 		case TGT_STATE_SENDING_STATUS:
4651 		case TGT_STATE_MOVING_DATA_AND_STATUS:
4652 		{
4653 			int ioindex;
4654 			ccb = tgt->ccb;
4655 
4656 			if (tgt->req == NULL) {
4657 				panic("mpt: turbo target reply with null "
4658 				    "associated request sending status");
4659 				/* NOTREACHED */
4660 			}
4661 
4662 			if (ccb) {
4663 				tgt->ccb = NULL;
4664 				if (tgt->state ==
4665 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
4666 					tgt->nxfers++;
4667 				}
4668 				untimeout(mpt_timeout, ccb,
4669 				    ccb->ccb_h.timeout_ch);
4670 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4671 					ccb->ccb_h.status |= CAM_SENT_SENSE;
4672 				}
4673 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4674 				    "TARGET_STATUS tag %x sts %x flgs %x req "
4675 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
4676 				    ccb->ccb_h.flags, tgt->req);
4677 				/*
4678 				 * Free the Target Send Status Request
4679 				 */
4680 				KASSERT(tgt->req->ccb == ccb,
4681 				    ("tgt->req %p:%u tgt->req->ccb %p",
4682 				    tgt->req, tgt->req->serno, tgt->req->ccb));
4683 				/*
4684 				 * Notify CAM that we're done
4685 				 */
4686 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4687 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4688 				KASSERT(ccb->ccb_h.status,
4689 				    ("ZERO ccb sts at %d\n", __LINE__));
4690 				tgt->ccb = NULL;
4691 			} else {
4692 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4693 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
4694 				    tgt->req, tgt->req->serno);
4695 			}
4696 			TAILQ_REMOVE(&mpt->request_pending_list,
4697 			    tgt->req, links);
4698 			mpt_free_request(mpt, tgt->req);
4699 			tgt->req = NULL;
4700 
4701 			/*
4702 			 * And re-post the Command Buffer.
4703 			 * This wil reset the state.
4704 			 */
4705 			ioindex = GET_IO_INDEX(reply_desc);
4706 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4707 			mpt_post_target_command(mpt, req, ioindex);
4708 
4709 			/*
4710 			 * And post a done for anyone who cares
4711 			 */
4712 			if (ccb) {
4713 				if (mpt->outofbeer) {
4714 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4715 					mpt->outofbeer = 0;
4716 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4717 				}
4718 				MPTLOCK_2_CAMLOCK(mpt);
4719 				xpt_done(ccb);
4720 				CAMLOCK_2_MPTLOCK(mpt);
4721 			}
4722 			break;
4723 		}
4724 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
4725 			tgt->state = TGT_STATE_LOADED;
4726 			break;
4727 		default:
4728 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
4729 			    "Reply Function\n", tgt->state);
4730 		}
4731 		return (TRUE);
4732 	}
4733 
4734 	status = le16toh(reply_frame->IOCStatus);
4735 	if (status != MPI_IOCSTATUS_SUCCESS) {
4736 		dbg = MPT_PRT_ERROR;
4737 	} else {
4738 		dbg = MPT_PRT_DEBUG1;
4739 	}
4740 
4741 	mpt_lprt(mpt, dbg,
4742 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
4743 	     req, req->serno, reply_frame, reply_frame->Function, status);
4744 
4745 	switch (reply_frame->Function) {
4746 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
4747 	{
4748 		mpt_tgt_state_t *tgt;
4749 #ifdef	INVARIANTS
4750 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
4751 #endif
4752 		if (status != MPI_IOCSTATUS_SUCCESS) {
4753 			/*
4754 			 * XXX What to do?
4755 			 */
4756 			break;
4757 		}
4758 		tgt = MPT_TGT_STATE(mpt, req);
4759 		KASSERT(tgt->state == TGT_STATE_LOADING,
4760 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
4761 		mpt_assign_serno(mpt, req);
4762 		tgt->state = TGT_STATE_LOADED;
4763 		break;
4764 	}
4765 	case MPI_FUNCTION_TARGET_ASSIST:
4766 #ifdef	INVARIANTS
4767 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
4768 #endif
4769 		mpt_prt(mpt, "target assist completion\n");
4770 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4771 		mpt_free_request(mpt, req);
4772 		break;
4773 	case MPI_FUNCTION_TARGET_STATUS_SEND:
4774 #ifdef	INVARIANTS
4775 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
4776 #endif
4777 		mpt_prt(mpt, "status send completion\n");
4778 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4779 		mpt_free_request(mpt, req);
4780 		break;
4781 	case MPI_FUNCTION_TARGET_MODE_ABORT:
4782 	{
4783 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
4784 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
4785 		PTR_MSG_TARGET_MODE_ABORT abtp =
4786 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
4787 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
4788 #ifdef	INVARIANTS
4789 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
4790 #endif
4791 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
4792 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
4793 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4794 		mpt_free_request(mpt, req);
4795 		break;
4796 	}
4797 	default:
4798 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
4799 		    "0x%x\n", reply_frame->Function);
4800 		break;
4801 	}
4802 	return (TRUE);
4803 }
4804