xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision d056fa046c6a91b90cd98165face0e42a33a5173)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  */
96 #include <sys/cdefs.h>
97 __FBSDID("$FreeBSD$");
98 
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 
108 #include <sys/callout.h>
109 #include <sys/kthread.h>
110 
111 static void mpt_poll(struct cam_sim *);
112 static timeout_t mpt_timeout;
113 static void mpt_action(struct cam_sim *, union ccb *);
114 static int
115 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
116 static void mpt_setwidth(struct mpt_softc *, int, int);
117 static void mpt_setsync(struct mpt_softc *, int, int, int);
118 static int mpt_update_spi_config(struct mpt_softc *, int);
119 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
120 
121 static mpt_reply_handler_t mpt_scsi_reply_handler;
122 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
123 static mpt_reply_handler_t mpt_fc_els_reply_handler;
124 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
125 					MSG_DEFAULT_REPLY *);
126 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
127 static int mpt_fc_reset_link(struct mpt_softc *, int);
128 
129 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
130 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
131 static void mpt_recovery_thread(void *arg);
132 static void mpt_recover_commands(struct mpt_softc *mpt);
133 
134 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
135     u_int, u_int, u_int, int);
136 
137 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
138 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
139 static int mpt_add_els_buffers(struct mpt_softc *mpt);
140 static int mpt_add_target_commands(struct mpt_softc *mpt);
141 static void mpt_free_els_buffers(struct mpt_softc *mpt);
142 static void mpt_free_target_commands(struct mpt_softc *mpt);
143 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
145 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
146 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
147 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
148 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
149     uint8_t, uint8_t const *);
150 static void
151 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
152     tgt_resource_t *, int);
153 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
154 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
155 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
156 
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 
161 static mpt_probe_handler_t	mpt_cam_probe;
162 static mpt_attach_handler_t	mpt_cam_attach;
163 static mpt_enable_handler_t	mpt_cam_enable;
164 static mpt_event_handler_t	mpt_cam_event;
165 static mpt_reset_handler_t	mpt_cam_ioc_reset;
166 static mpt_detach_handler_t	mpt_cam_detach;
167 
168 static struct mpt_personality mpt_cam_personality =
169 {
170 	.name		= "mpt_cam",
171 	.probe		= mpt_cam_probe,
172 	.attach		= mpt_cam_attach,
173 	.enable		= mpt_cam_enable,
174 	.event		= mpt_cam_event,
175 	.reset		= mpt_cam_ioc_reset,
176 	.detach		= mpt_cam_detach,
177 };
178 
179 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
180 
181 int
182 mpt_cam_probe(struct mpt_softc *mpt)
183 {
184 	/*
185 	 * Only attach to nodes that support the initiator or target
186 	 * role or have RAID physical devices that need CAM pass-thru support.
187 	 */
188 	if ((mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_INITIATOR) != 0
189 	 || (mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_TARGET) != 0
190 	 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
191 		return (0);
192 	}
193 	return (ENODEV);
194 }
195 
196 int
197 mpt_cam_attach(struct mpt_softc *mpt)
198 {
199 	struct cam_devq *devq;
200 	mpt_handler_t	 handler;
201 	int		 maxq;
202 	int		 error;
203 
204 	TAILQ_INIT(&mpt->request_timeout_list);
205 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
206 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
207 
208 	handler.reply_handler = mpt_scsi_reply_handler;
209 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
210 				     &scsi_io_handler_id);
211 	if (error != 0) {
212 		goto cleanup0;
213 	}
214 
215 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
216 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
217 				     &scsi_tmf_handler_id);
218 	if (error != 0) {
219 		goto cleanup0;
220 	}
221 
222 	/*
223 	 * If we're fibre channel and could support target mode, we register
224 	 * an ELS reply handler and give it resources.
225 	 */
226 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
227 		handler.reply_handler = mpt_fc_els_reply_handler;
228 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
229 		    &fc_els_handler_id);
230 		if (error != 0) {
231 			goto cleanup0;
232 		}
233 		if (mpt_add_els_buffers(mpt) == FALSE) {
234 			error = ENOMEM;
235 			goto cleanup0;
236 		}
237 		maxq -= mpt->els_cmds_allocated;
238 	}
239 
240 	/*
241 	 * If we support target mode, we register a reply handler for it,
242 	 * but don't add resources until we actually enable target mode.
243 	 */
244 	if ((mpt->role & MPT_ROLE_TARGET) != 0) {
245 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
246 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
247 		    &mpt->scsi_tgt_handler_id);
248 		if (error != 0) {
249 			goto cleanup0;
250 		}
251 	}
252 
253 	/*
254 	 * We keep one request reserved for timeout TMF requests.
255 	 */
256 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
257 	if (mpt->tmf_req == NULL) {
258 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
259 		error = ENOMEM;
260 		goto cleanup0;
261 	}
262 
263 	/*
264 	 * Mark the request as free even though not on the free list.
265 	 * There is only one TMF request allowed to be outstanding at
266 	 * a time and the TMF routines perform their own allocation
267 	 * tracking using the standard state flags.
268 	 */
269 	mpt->tmf_req->state = REQ_STATE_FREE;
270 	maxq--;
271 
272 	if (mpt_spawn_recovery_thread(mpt) != 0) {
273 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
274 		error = ENOMEM;
275 		goto cleanup0;
276 	}
277 
278 	/*
279 	 * The rest of this is CAM foo, for which we need to drop our lock
280 	 */
281 	MPTLOCK_2_CAMLOCK(mpt);
282 
283 	/*
284 	 * Create the device queue for our SIM(s).
285 	 */
286 	devq = cam_simq_alloc(maxq);
287 	if (devq == NULL) {
288 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
289 		error = ENOMEM;
290 		goto cleanup;
291 	}
292 
293 	/*
294 	 * Construct our SIM entry.
295 	 */
296 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
297 	    mpt->unit, 1, maxq, devq);
298 	if (mpt->sim == NULL) {
299 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
300 		cam_simq_free(devq);
301 		error = ENOMEM;
302 		goto cleanup;
303 	}
304 
305 	/*
306 	 * Register exactly this bus.
307 	 */
308 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
309 		mpt_prt(mpt, "Bus registration Failed!\n");
310 		error = ENOMEM;
311 		goto cleanup;
312 	}
313 
314 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
315 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
316 		mpt_prt(mpt, "Unable to allocate Path!\n");
317 		error = ENOMEM;
318 		goto cleanup;
319 	}
320 
321 	/*
322 	 * Only register a second bus for RAID physical
323 	 * devices if the controller supports RAID.
324 	 */
325 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
326 		CAMLOCK_2_MPTLOCK(mpt);
327 		return (0);
328 	}
329 
330 	/*
331 	 * Create a "bus" to export all hidden disks to CAM.
332 	 */
333 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
334 	    mpt->unit, 1, maxq, devq);
335 	if (mpt->phydisk_sim == NULL) {
336 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
337 		error = ENOMEM;
338 		goto cleanup;
339 	}
340 
341 	/*
342 	 * Register this bus.
343 	 */
344 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
345 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
346 		error = ENOMEM;
347 		goto cleanup;
348 	}
349 
350 	if (xpt_create_path(&mpt->phydisk_path, NULL,
351 	    cam_sim_path(mpt->phydisk_sim),
352 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
353 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
354 		error = ENOMEM;
355 		goto cleanup;
356 	}
357 	CAMLOCK_2_MPTLOCK(mpt);
358 	return (0);
359 
360 cleanup:
361 	CAMLOCK_2_MPTLOCK(mpt);
362 cleanup0:
363 	mpt_cam_detach(mpt);
364 	return (error);
365 }
366 
367 /*
368  * Read FC configuration information
369  */
370 static int
371 mpt_read_config_info_fc(struct mpt_softc *mpt)
372 {
373 	char *topology = NULL;
374 	int rv;
375 
376 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
377 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
378 	if (rv) {
379 		return (-1);
380 	}
381 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
382 		 mpt->mpt_fcport_page0.Header.PageVersion,
383 		 mpt->mpt_fcport_page0.Header.PageLength,
384 		 mpt->mpt_fcport_page0.Header.PageNumber,
385 		 mpt->mpt_fcport_page0.Header.PageType);
386 
387 
388 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
389 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
390 	if (rv) {
391 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
392 		return (-1);
393 	}
394 
395 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
396 
397 	switch (mpt->mpt_fcport_page0.Flags &
398 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
399 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
400 		mpt->mpt_fcport_speed = 0;
401 		topology = "<NO LOOP>";
402 		break;
403 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
404 		topology = "N-Port";
405 		break;
406 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
407 		topology = "NL-Port";
408 		break;
409 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
410 		topology = "F-Port";
411 		break;
412 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
413 		topology = "FL-Port";
414 		break;
415 	default:
416 		mpt->mpt_fcport_speed = 0;
417 		topology = "?";
418 		break;
419 	}
420 
421 	mpt_lprt(mpt, MPT_PRT_INFO,
422 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
423 	    "Speed %u-Gbit\n", topology,
424 	    mpt->mpt_fcport_page0.WWNN.High,
425 	    mpt->mpt_fcport_page0.WWNN.Low,
426 	    mpt->mpt_fcport_page0.WWPN.High,
427 	    mpt->mpt_fcport_page0.WWPN.Low,
428 	    mpt->mpt_fcport_speed);
429 
430 	return (0);
431 }
432 
433 /*
434  * Set FC configuration information.
435  */
436 static int
437 mpt_set_initial_config_fc(struct mpt_softc *mpt)
438 {
439 
440 	CONFIG_PAGE_FC_PORT_1 fc;
441 	U32 fl;
442 	int r, doit = 0;
443 	int role;
444 
445 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
446 	    &fc.Header, FALSE, 5000);
447 	if (r) {
448 		mpt_prt(mpt, "failed to read FC page 1 header\n");
449 		return (mpt_fc_reset_link(mpt, 1));
450 	}
451 
452 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
453 	    &fc.Header, sizeof (fc), FALSE, 5000);
454 	if (r) {
455 		mpt_prt(mpt, "failed to read FC page 1\n");
456 		return (mpt_fc_reset_link(mpt, 1));
457 	}
458 
459 	/*
460 	 * Check our flags to make sure we support the role we want.
461 	 */
462 	doit = 0;
463 	role = 0;
464 	fl = le32toh(fc.Flags);;
465 
466 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
467 		role |= MPT_ROLE_INITIATOR;
468 	}
469 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
470 		role |= MPT_ROLE_TARGET;
471 	}
472 
473 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
474 
475 	if (mpt->do_cfg_role == 0) {
476 		role = mpt->cfg_role;
477 	} else {
478 		mpt->do_cfg_role = 0;
479 	}
480 
481 	if (role != mpt->cfg_role) {
482 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
483 			if ((role & MPT_ROLE_INITIATOR) == 0) {
484 				mpt_prt(mpt, "adding initiator role\n");
485 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
486 				doit++;
487 			} else {
488 				mpt_prt(mpt, "keeping initiator role\n");
489 			}
490 		} else if (role & MPT_ROLE_INITIATOR) {
491 			mpt_prt(mpt, "removing initiator role\n");
492 			doit++;
493 		}
494 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
495 			if ((role & MPT_ROLE_TARGET) == 0) {
496 				mpt_prt(mpt, "adding target role\n");
497 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
498 				doit++;
499 			} else {
500 				mpt_prt(mpt, "keeping target role\n");
501 			}
502 		} else if (role & MPT_ROLE_TARGET) {
503 			mpt_prt(mpt, "removing target role\n");
504 			doit++;
505 		}
506 		mpt->role = mpt->cfg_role;
507 	}
508 
509 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
510 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
511 			mpt_prt(mpt, "adding OXID option\n");
512 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
513 			doit++;
514 		}
515 	}
516 
517 	if (doit) {
518 		fc.Flags = htole32(fl);
519 		r = mpt_write_cfg_page(mpt,
520 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
521 		    sizeof(fc), FALSE, 5000);
522 		if (r != 0) {
523 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
524 			return (0);
525 		}
526 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
527 		    "effect until next reboot or IOC reset\n");
528 	}
529 	return (0);
530 }
531 
532 /*
533  * Read SAS configuration information. Nothing to do yet.
534  */
535 static int
536 mpt_read_config_info_sas(struct mpt_softc *mpt)
537 {
538 	return (0);
539 }
540 
541 /*
542  * Set SAS configuration information. Nothing to do yet.
543  */
544 static int
545 mpt_set_initial_config_sas(struct mpt_softc *mpt)
546 {
547 	return (0);
548 }
549 
550 /*
551  * Read SCSI configuration information
552  */
553 static int
554 mpt_read_config_info_spi(struct mpt_softc *mpt)
555 {
556 	int rv, i;
557 
558 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
559 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
560 	if (rv) {
561 		return (-1);
562 	}
563 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
564 	    mpt->mpt_port_page0.Header.PageVersion,
565 	    mpt->mpt_port_page0.Header.PageLength,
566 	    mpt->mpt_port_page0.Header.PageNumber,
567 	    mpt->mpt_port_page0.Header.PageType);
568 
569 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
570 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
571 	if (rv) {
572 		return (-1);
573 	}
574 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
575 	    mpt->mpt_port_page1.Header.PageVersion,
576 	    mpt->mpt_port_page1.Header.PageLength,
577 	    mpt->mpt_port_page1.Header.PageNumber,
578 	    mpt->mpt_port_page1.Header.PageType);
579 
580 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
581 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
582 	if (rv) {
583 		return (-1);
584 	}
585 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
586 	    mpt->mpt_port_page2.Header.PageVersion,
587 	    mpt->mpt_port_page2.Header.PageLength,
588 	    mpt->mpt_port_page2.Header.PageNumber,
589 	    mpt->mpt_port_page2.Header.PageType);
590 
591 	for (i = 0; i < 16; i++) {
592 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
593 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
594 		if (rv) {
595 			return (-1);
596 		}
597 		mpt_lprt(mpt, MPT_PRT_DEBUG,
598 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
599 		    mpt->mpt_dev_page0[i].Header.PageVersion,
600 		    mpt->mpt_dev_page0[i].Header.PageLength,
601 		    mpt->mpt_dev_page0[i].Header.PageNumber,
602 		    mpt->mpt_dev_page0[i].Header.PageType);
603 
604 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
605 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
606 		if (rv) {
607 			return (-1);
608 		}
609 		mpt_lprt(mpt, MPT_PRT_DEBUG,
610 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
611 		    mpt->mpt_dev_page1[i].Header.PageVersion,
612 		    mpt->mpt_dev_page1[i].Header.PageLength,
613 		    mpt->mpt_dev_page1[i].Header.PageNumber,
614 		    mpt->mpt_dev_page1[i].Header.PageType);
615 	}
616 
617 	/*
618 	 * At this point, we don't *have* to fail. As long as we have
619 	 * valid config header information, we can (barely) lurch
620 	 * along.
621 	 */
622 
623 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
624 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
625 	if (rv) {
626 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
627 	} else {
628 		mpt_lprt(mpt, MPT_PRT_DEBUG,
629 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
630 		    mpt->mpt_port_page0.Capabilities,
631 		    mpt->mpt_port_page0.PhysicalInterface);
632 	}
633 
634 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
635 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
636 	if (rv) {
637 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
638 	} else {
639 		mpt_lprt(mpt, MPT_PRT_DEBUG,
640 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
641 		    mpt->mpt_port_page1.Configuration,
642 		    mpt->mpt_port_page1.OnBusTimerValue);
643 	}
644 
645 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
646 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
647 	if (rv) {
648 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
649 	} else {
650 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
651 		    "Port Page 2: Flags %x Settings %x\n",
652 		    mpt->mpt_port_page2.PortFlags,
653 		    mpt->mpt_port_page2.PortSettings);
654 		for (i = 0; i < 16; i++) {
655 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
656 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
657 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
658 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
659 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
660 		}
661 	}
662 
663 	for (i = 0; i < 16; i++) {
664 		rv = mpt_read_cur_cfg_page(mpt, i,
665 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
666 		    FALSE, 5000);
667 		if (rv) {
668 			mpt_prt(mpt,
669 			    "cannot read SPI Target %d Device Page 0\n", i);
670 			continue;
671 		}
672 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
673 		    "target %d page 0: Negotiated Params %x Information %x\n",
674 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
675 		    mpt->mpt_dev_page0[i].Information);
676 
677 		rv = mpt_read_cur_cfg_page(mpt, i,
678 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
679 		    FALSE, 5000);
680 		if (rv) {
681 			mpt_prt(mpt,
682 			    "cannot read SPI Target %d Device Page 1\n", i);
683 			continue;
684 		}
685 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
686 		    "target %d page 1: Requested Params %x Configuration %x\n",
687 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
688 		    mpt->mpt_dev_page1[i].Configuration);
689 	}
690 	return (0);
691 }
692 
693 /*
694  * Validate SPI configuration information.
695  *
696  * In particular, validate SPI Port Page 1.
697  */
698 static int
699 mpt_set_initial_config_spi(struct mpt_softc *mpt)
700 {
701 	int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
702 	int error;
703 
704 	mpt->mpt_disc_enable = 0xff;
705 	mpt->mpt_tag_enable = 0;
706 
707 	if (mpt->mpt_port_page1.Configuration != pp1val) {
708 		CONFIG_PAGE_SCSI_PORT_1 tmp;
709 
710 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
711 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
712 		tmp = mpt->mpt_port_page1;
713 		tmp.Configuration = pp1val;
714 		error = mpt_write_cur_cfg_page(mpt, 0,
715 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
716 		if (error) {
717 			return (-1);
718 		}
719 		error = mpt_read_cur_cfg_page(mpt, 0,
720 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
721 		if (error) {
722 			return (-1);
723 		}
724 		if (tmp.Configuration != pp1val) {
725 			mpt_prt(mpt,
726 			    "failed to reset SPI Port Page 1 Config value\n");
727 			return (-1);
728 		}
729 		mpt->mpt_port_page1 = tmp;
730 	}
731 
732 	/*
733 	 * The purpose of this exercise is to get
734 	 * all targets back to async/narrow.
735 	 *
736 	 * We skip this step if the BIOS has already negotiated
737 	 * speeds with the targets and does not require us to
738 	 * do Domain Validation.
739 	 */
740 	i = mpt->mpt_port_page2.PortSettings &
741 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
742 	j = mpt->mpt_port_page2.PortFlags &
743 	    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
744 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* &&
745 	    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) {
746 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
747 		    "honoring BIOS transfer negotiations\n");
748 	} else {
749 		for (i = 0; i < 16; i++) {
750 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
751 			mpt->mpt_dev_page1[i].Configuration = 0;
752 			(void) mpt_update_spi_config(mpt, i);
753 		}
754 	}
755 	return (0);
756 }
757 
758 int
759 mpt_cam_enable(struct mpt_softc *mpt)
760 {
761 	if (mpt->is_fc) {
762 		if (mpt_read_config_info_fc(mpt)) {
763 			return (EIO);
764 		}
765 		if (mpt_set_initial_config_fc(mpt)) {
766 			return (EIO);
767 		}
768 	} else if (mpt->is_sas) {
769 		if (mpt_read_config_info_sas(mpt)) {
770 			return (EIO);
771 		}
772 		if (mpt_set_initial_config_sas(mpt)) {
773 			return (EIO);
774 		}
775 	} else if (mpt->is_spi) {
776 		if (mpt_read_config_info_spi(mpt)) {
777 			return (EIO);
778 		}
779 		if (mpt_set_initial_config_spi(mpt)) {
780 			return (EIO);
781 		}
782 	}
783 	return (0);
784 }
785 
786 void
787 mpt_cam_detach(struct mpt_softc *mpt)
788 {
789 	mpt_handler_t handler;
790 
791 	mpt_terminate_recovery_thread(mpt);
792 
793 	handler.reply_handler = mpt_scsi_reply_handler;
794 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
795 			       scsi_io_handler_id);
796 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
797 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
798 			       scsi_tmf_handler_id);
799 	handler.reply_handler = mpt_fc_els_reply_handler;
800 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
801 			       fc_els_handler_id);
802 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
803 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
804 			       mpt->scsi_tgt_handler_id);
805 
806 	if (mpt->tmf_req != NULL) {
807 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
808 		mpt_free_request(mpt, mpt->tmf_req);
809 		mpt->tmf_req = NULL;
810 	}
811 
812 	if (mpt->sim != NULL) {
813 		MPTLOCK_2_CAMLOCK(mpt);
814 		xpt_free_path(mpt->path);
815 		xpt_bus_deregister(cam_sim_path(mpt->sim));
816 		cam_sim_free(mpt->sim, TRUE);
817 		mpt->sim = NULL;
818 		CAMLOCK_2_MPTLOCK(mpt);
819 	}
820 
821 	if (mpt->phydisk_sim != NULL) {
822 		MPTLOCK_2_CAMLOCK(mpt);
823 		xpt_free_path(mpt->phydisk_path);
824 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
825 		cam_sim_free(mpt->phydisk_sim, TRUE);
826 		mpt->phydisk_sim = NULL;
827 		CAMLOCK_2_MPTLOCK(mpt);
828 	}
829 }
830 
831 /* This routine is used after a system crash to dump core onto the swap device.
832  */
833 static void
834 mpt_poll(struct cam_sim *sim)
835 {
836 	struct mpt_softc *mpt;
837 
838 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
839 	MPT_LOCK(mpt);
840 	mpt_intr(mpt);
841 	MPT_UNLOCK(mpt);
842 }
843 
844 /*
845  * Watchdog timeout routine for SCSI requests.
846  */
847 static void
848 mpt_timeout(void *arg)
849 {
850 	union ccb	 *ccb;
851 	struct mpt_softc *mpt;
852 	request_t	 *req;
853 
854 	ccb = (union ccb *)arg;
855 	mpt = ccb->ccb_h.ccb_mpt_ptr;
856 
857 	MPT_LOCK(mpt);
858 	req = ccb->ccb_h.ccb_req_ptr;
859 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
860 	    req->serno, ccb, req->ccb);
861 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
862 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
863 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
864 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
865 		req->state |= REQ_STATE_TIMEDOUT;
866 		mpt_wakeup_recovery_thread(mpt);
867 	}
868 	MPT_UNLOCK(mpt);
869 }
870 
871 /*
872  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
873  *
874  * Takes a list of physical segments and builds the SGL for SCSI IO command
875  * and forwards the commard to the IOC after one last check that CAM has not
876  * aborted the transaction.
877  */
878 static void
879 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
880 {
881 	request_t *req, *trq;
882 	char *mpt_off;
883 	union ccb *ccb;
884 	struct mpt_softc *mpt;
885 	int seg, first_lim;
886 	uint32_t flags, nxt_off;
887 	void *sglp = NULL;
888 	MSG_REQUEST_HEADER *hdrp;
889 	SGE_SIMPLE64 *se;
890 	SGE_CHAIN64 *ce;
891 	int istgt = 0;
892 
893 	req = (request_t *)arg;
894 	ccb = req->ccb;
895 
896 	mpt = ccb->ccb_h.ccb_mpt_ptr;
897 	req = ccb->ccb_h.ccb_req_ptr;
898 
899 	hdrp = req->req_vbuf;
900 	mpt_off = req->req_vbuf;
901 
902 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
903 		error = EFBIG;
904 	}
905 
906 	if (error == 0) {
907 		switch (hdrp->Function) {
908 		case MPI_FUNCTION_SCSI_IO_REQUEST:
909 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
910 			istgt = 0;
911 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
912 			break;
913 		case MPI_FUNCTION_TARGET_ASSIST:
914 			istgt = 1;
915 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
916 			break;
917 		default:
918 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
919 			    hdrp->Function);
920 			error = EINVAL;
921 			break;
922 		}
923 	}
924 
925 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
926 		error = EFBIG;
927 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
928 		    nseg, mpt->max_seg_cnt);
929 	}
930 
931 bad:
932 	if (error != 0) {
933 		if (error != EFBIG && error != ENOMEM) {
934 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
935 		}
936 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
937 			cam_status status;
938 			mpt_freeze_ccb(ccb);
939 			if (error == EFBIG) {
940 				status = CAM_REQ_TOO_BIG;
941 			} else if (error == ENOMEM) {
942 				if (mpt->outofbeer == 0) {
943 					mpt->outofbeer = 1;
944 					xpt_freeze_simq(mpt->sim, 1);
945 					mpt_lprt(mpt, MPT_PRT_DEBUG,
946 					    "FREEZEQ\n");
947 				}
948 				status = CAM_REQUEUE_REQ;
949 			} else {
950 				status = CAM_REQ_CMP_ERR;
951 			}
952 			mpt_set_ccb_status(ccb, status);
953 		}
954 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
955 			request_t *cmd_req =
956 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
957 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
958 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
959 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
960 		}
961 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
962 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
963 		xpt_done(ccb);
964 		CAMLOCK_2_MPTLOCK(mpt);
965 		mpt_free_request(mpt, req);
966 		MPTLOCK_2_CAMLOCK(mpt);
967 		return;
968 	}
969 
970 	/*
971 	 * No data to transfer?
972 	 * Just make a single simple SGL with zero length.
973 	 */
974 
975 	if (mpt->verbose >= MPT_PRT_DEBUG) {
976 		int tidx = ((char *)sglp) - mpt_off;
977 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
978 	}
979 
980 	if (nseg == 0) {
981 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
982 		MPI_pSGE_SET_FLAGS(se1,
983 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
984 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
985 		goto out;
986 	}
987 
988 
989 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
990 	if (istgt == 0) {
991 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
992 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
993 		}
994 	} else {
995 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
996 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
997 		}
998 	}
999 
1000 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1001 		bus_dmasync_op_t op;
1002 		if (istgt == 0) {
1003 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1004 				op = BUS_DMASYNC_PREREAD;
1005 			} else {
1006 				op = BUS_DMASYNC_PREWRITE;
1007 			}
1008 		} else {
1009 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1010 				op = BUS_DMASYNC_PREWRITE;
1011 			} else {
1012 				op = BUS_DMASYNC_PREREAD;
1013 			}
1014 		}
1015 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1016 	}
1017 
1018 	/*
1019 	 * Okay, fill in what we can at the end of the command frame.
1020 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1021 	 * the command frame.
1022 	 *
1023 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1024 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1025 	 * that.
1026 	 */
1027 
1028 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1029 		first_lim = nseg;
1030 	} else {
1031 		/*
1032 		 * Leave room for CHAIN element
1033 		 */
1034 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1035 	}
1036 
1037 	se = (SGE_SIMPLE64 *) sglp;
1038 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1039 		uint32_t tf;
1040 
1041 		memset(se, 0, sizeof (*se));
1042 		se->Address.Low = dm_segs->ds_addr;
1043 		if (sizeof(bus_addr_t) > 4) {
1044 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
1045 		}
1046 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1047 		tf = flags;
1048 		if (seg == first_lim - 1) {
1049 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1050 		}
1051 		if (seg == nseg - 1) {
1052 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1053 				MPI_SGE_FLAGS_END_OF_BUFFER;
1054 		}
1055 		MPI_pSGE_SET_FLAGS(se, tf);
1056 	}
1057 
1058 	if (seg == nseg) {
1059 		goto out;
1060 	}
1061 
1062 	/*
1063 	 * Tell the IOC where to find the first chain element.
1064 	 */
1065 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1066 	nxt_off = MPT_RQSL(mpt);
1067 	trq = req;
1068 
1069 	/*
1070 	 * Make up the rest of the data segments out of a chain element
1071 	 * (contiained in the current request frame) which points to
1072 	 * SIMPLE64 elements in the next request frame, possibly ending
1073 	 * with *another* chain element (if there's more).
1074 	 */
1075 	while (seg < nseg) {
1076 		int this_seg_lim;
1077 		uint32_t tf, cur_off;
1078 		bus_addr_t chain_list_addr;
1079 
1080 		/*
1081 		 * Point to the chain descriptor. Note that the chain
1082 		 * descriptor is at the end of the *previous* list (whether
1083 		 * chain or simple).
1084 		 */
1085 		ce = (SGE_CHAIN64 *) se;
1086 
1087 		/*
1088 		 * Before we change our current pointer, make  sure we won't
1089 		 * overflow the request area with this frame. Note that we
1090 		 * test against 'greater than' here as it's okay in this case
1091 		 * to have next offset be just outside the request area.
1092 		 */
1093 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1094 			nxt_off = MPT_REQUEST_AREA;
1095 			goto next_chain;
1096 		}
1097 
1098 		/*
1099 		 * Set our SGE element pointer to the beginning of the chain
1100 		 * list and update our next chain list offset.
1101 		 */
1102 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1103 		cur_off = nxt_off;
1104 		nxt_off += MPT_RQSL(mpt);
1105 
1106 		/*
1107 		 * Now initialized the chain descriptor.
1108 		 */
1109 		memset(ce, 0, sizeof (*ce));
1110 
1111 		/*
1112 		 * Get the physical address of the chain list.
1113 		 */
1114 		chain_list_addr = trq->req_pbuf;
1115 		chain_list_addr += cur_off;
1116 		if (sizeof (bus_addr_t) > 4) {
1117 			ce->Address.High =
1118 			    (uint32_t) ((uint64_t)chain_list_addr >> 32);
1119 		}
1120 		ce->Address.Low = (uint32_t) chain_list_addr;
1121 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1122 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1123 
1124 		/*
1125 		 * If we have more than a frame's worth of segments left,
1126 		 * set up the chain list to have the last element be another
1127 		 * chain descriptor.
1128 		 */
1129 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1130 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1131 			/*
1132 			 * The length of the chain is the length in bytes of the
1133 			 * number of segments plus the next chain element.
1134 			 *
1135 			 * The next chain descriptor offset is the length,
1136 			 * in words, of the number of segments.
1137 			 */
1138 			ce->Length = (this_seg_lim - seg) *
1139 			    sizeof (SGE_SIMPLE64);
1140 			ce->NextChainOffset = ce->Length >> 2;
1141 			ce->Length += sizeof (SGE_CHAIN64);
1142 		} else {
1143 			this_seg_lim = nseg;
1144 			ce->Length = (this_seg_lim - seg) *
1145 			    sizeof (SGE_SIMPLE64);
1146 		}
1147 
1148 		/*
1149 		 * Fill in the chain list SGE elements with our segment data.
1150 		 *
1151 		 * If we're the last element in this chain list, set the last
1152 		 * element flag. If we're the completely last element period,
1153 		 * set the end of list and end of buffer flags.
1154 		 */
1155 		while (seg < this_seg_lim) {
1156 			memset(se, 0, sizeof (*se));
1157 			se->Address.Low = dm_segs->ds_addr;
1158 			if (sizeof (bus_addr_t) > 4) {
1159 				se->Address.High =
1160 				    ((uint64_t)dm_segs->ds_addr) >> 32;
1161 			}
1162 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1163 			tf = flags;
1164 			if (seg ==  this_seg_lim - 1) {
1165 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1166 			}
1167 			if (seg == nseg - 1) {
1168 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1169 					MPI_SGE_FLAGS_END_OF_BUFFER;
1170 			}
1171 			MPI_pSGE_SET_FLAGS(se, tf);
1172 			se++;
1173 			seg++;
1174 			dm_segs++;
1175 		}
1176 
1177     next_chain:
1178 		/*
1179 		 * If we have more segments to do and we've used up all of
1180 		 * the space in a request area, go allocate another one
1181 		 * and chain to that.
1182 		 */
1183 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1184 			request_t *nrq;
1185 
1186 			CAMLOCK_2_MPTLOCK(mpt);
1187 			nrq = mpt_get_request(mpt, FALSE);
1188 			MPTLOCK_2_CAMLOCK(mpt);
1189 
1190 			if (nrq == NULL) {
1191 				error = ENOMEM;
1192 				goto bad;
1193 			}
1194 
1195 			/*
1196 			 * Append the new request area on the tail of our list.
1197 			 */
1198 			if ((trq = req->chain) == NULL) {
1199 				req->chain = nrq;
1200 			} else {
1201 				while (trq->chain != NULL) {
1202 					trq = trq->chain;
1203 				}
1204 				trq->chain = nrq;
1205 			}
1206 			trq = nrq;
1207 			mpt_off = trq->req_vbuf;
1208 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1209 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1210 			}
1211 			nxt_off = 0;
1212 		}
1213 	}
1214 out:
1215 
1216 	/*
1217 	 * Last time we need to check if this CCB needs to be aborted.
1218 	 */
1219 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1220 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1221 			request_t *cmd_req =
1222 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1223 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1224 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1225 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1226 		}
1227 		mpt_prt(mpt,
1228 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1229 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1230 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1231 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1232 		}
1233 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1234 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1235 		xpt_done(ccb);
1236 		CAMLOCK_2_MPTLOCK(mpt);
1237 		mpt_free_request(mpt, req);
1238 		MPTLOCK_2_CAMLOCK(mpt);
1239 		return;
1240 	}
1241 
1242 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1243 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1244 		ccb->ccb_h.timeout_ch =
1245 			timeout(mpt_timeout, (caddr_t)ccb,
1246 				(ccb->ccb_h.timeout * hz) / 1000);
1247 	} else {
1248 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1249 	}
1250 	if (mpt->verbose > MPT_PRT_DEBUG) {
1251 		int nc = 0;
1252 		mpt_print_request(req->req_vbuf);
1253 		for (trq = req->chain; trq; trq = trq->chain) {
1254 			printf("  Additional Chain Area %d\n", nc++);
1255 			mpt_dump_sgl(trq->req_vbuf, 0);
1256 		}
1257 	}
1258 
1259 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1260 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1261 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1262 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1263 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1264 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1265 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1266 		} else {
1267 			tgt->state = TGT_STATE_MOVING_DATA;
1268 		}
1269 #else
1270 		tgt->state = TGT_STATE_MOVING_DATA;
1271 #endif
1272 	}
1273 	CAMLOCK_2_MPTLOCK(mpt);
1274 	mpt_send_cmd(mpt, req);
1275 	MPTLOCK_2_CAMLOCK(mpt);
1276 }
1277 
1278 static void
1279 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1280 {
1281 	request_t *req, *trq;
1282 	char *mpt_off;
1283 	union ccb *ccb;
1284 	struct mpt_softc *mpt;
1285 	int seg, first_lim;
1286 	uint32_t flags, nxt_off;
1287 	void *sglp = NULL;
1288 	MSG_REQUEST_HEADER *hdrp;
1289 	SGE_SIMPLE32 *se;
1290 	SGE_CHAIN32 *ce;
1291 	int istgt = 0;
1292 
1293 	req = (request_t *)arg;
1294 	ccb = req->ccb;
1295 
1296 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1297 	req = ccb->ccb_h.ccb_req_ptr;
1298 
1299 	hdrp = req->req_vbuf;
1300 	mpt_off = req->req_vbuf;
1301 
1302 
1303 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1304 		error = EFBIG;
1305 	}
1306 
1307 	if (error == 0) {
1308 		switch (hdrp->Function) {
1309 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1310 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1311 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1312 			break;
1313 		case MPI_FUNCTION_TARGET_ASSIST:
1314 			istgt = 1;
1315 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1316 			break;
1317 		default:
1318 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1319 			    hdrp->Function);
1320 			error = EINVAL;
1321 			break;
1322 		}
1323 	}
1324 
1325 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1326 		error = EFBIG;
1327 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1328 		    nseg, mpt->max_seg_cnt);
1329 	}
1330 
1331 bad:
1332 	if (error != 0) {
1333 		if (error != EFBIG && error != ENOMEM) {
1334 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1335 		}
1336 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1337 			cam_status status;
1338 			mpt_freeze_ccb(ccb);
1339 			if (error == EFBIG) {
1340 				status = CAM_REQ_TOO_BIG;
1341 			} else if (error == ENOMEM) {
1342 				if (mpt->outofbeer == 0) {
1343 					mpt->outofbeer = 1;
1344 					xpt_freeze_simq(mpt->sim, 1);
1345 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1346 					    "FREEZEQ\n");
1347 				}
1348 				status = CAM_REQUEUE_REQ;
1349 			} else {
1350 				status = CAM_REQ_CMP_ERR;
1351 			}
1352 			mpt_set_ccb_status(ccb, status);
1353 		}
1354 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1355 			request_t *cmd_req =
1356 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1357 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1358 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1359 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1360 		}
1361 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1362 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1363 		xpt_done(ccb);
1364 		CAMLOCK_2_MPTLOCK(mpt);
1365 		mpt_free_request(mpt, req);
1366 		MPTLOCK_2_CAMLOCK(mpt);
1367 		return;
1368 	}
1369 
1370 	/*
1371 	 * No data to transfer?
1372 	 * Just make a single simple SGL with zero length.
1373 	 */
1374 
1375 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1376 		int tidx = ((char *)sglp) - mpt_off;
1377 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1378 	}
1379 
1380 	if (nseg == 0) {
1381 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1382 		MPI_pSGE_SET_FLAGS(se1,
1383 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1384 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1385 		goto out;
1386 	}
1387 
1388 
1389 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1390 	if (istgt == 0) {
1391 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1392 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1393 		}
1394 	} else {
1395 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1396 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1397 		}
1398 	}
1399 
1400 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1401 		bus_dmasync_op_t op;
1402 		if (istgt) {
1403 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1404 				op = BUS_DMASYNC_PREREAD;
1405 			} else {
1406 				op = BUS_DMASYNC_PREWRITE;
1407 			}
1408 		} else {
1409 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1410 				op = BUS_DMASYNC_PREWRITE;
1411 			} else {
1412 				op = BUS_DMASYNC_PREREAD;
1413 			}
1414 		}
1415 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1416 	}
1417 
1418 	/*
1419 	 * Okay, fill in what we can at the end of the command frame.
1420 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1421 	 * the command frame.
1422 	 *
1423 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1424 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1425 	 * that.
1426 	 */
1427 
1428 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1429 		first_lim = nseg;
1430 	} else {
1431 		/*
1432 		 * Leave room for CHAIN element
1433 		 */
1434 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1435 	}
1436 
1437 	se = (SGE_SIMPLE32 *) sglp;
1438 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1439 		uint32_t tf;
1440 
1441 		memset(se, 0,sizeof (*se));
1442 		se->Address = dm_segs->ds_addr;
1443 
1444 
1445 
1446 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1447 		tf = flags;
1448 		if (seg == first_lim - 1) {
1449 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1450 		}
1451 		if (seg == nseg - 1) {
1452 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1453 				MPI_SGE_FLAGS_END_OF_BUFFER;
1454 		}
1455 		MPI_pSGE_SET_FLAGS(se, tf);
1456 	}
1457 
1458 	if (seg == nseg) {
1459 		goto out;
1460 	}
1461 
1462 	/*
1463 	 * Tell the IOC where to find the first chain element.
1464 	 */
1465 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1466 	nxt_off = MPT_RQSL(mpt);
1467 	trq = req;
1468 
1469 	/*
1470 	 * Make up the rest of the data segments out of a chain element
1471 	 * (contiained in the current request frame) which points to
1472 	 * SIMPLE32 elements in the next request frame, possibly ending
1473 	 * with *another* chain element (if there's more).
1474 	 */
1475 	while (seg < nseg) {
1476 		int this_seg_lim;
1477 		uint32_t tf, cur_off;
1478 		bus_addr_t chain_list_addr;
1479 
1480 		/*
1481 		 * Point to the chain descriptor. Note that the chain
1482 		 * descriptor is at the end of the *previous* list (whether
1483 		 * chain or simple).
1484 		 */
1485 		ce = (SGE_CHAIN32 *) se;
1486 
1487 		/*
1488 		 * Before we change our current pointer, make  sure we won't
1489 		 * overflow the request area with this frame. Note that we
1490 		 * test against 'greater than' here as it's okay in this case
1491 		 * to have next offset be just outside the request area.
1492 		 */
1493 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1494 			nxt_off = MPT_REQUEST_AREA;
1495 			goto next_chain;
1496 		}
1497 
1498 		/*
1499 		 * Set our SGE element pointer to the beginning of the chain
1500 		 * list and update our next chain list offset.
1501 		 */
1502 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1503 		cur_off = nxt_off;
1504 		nxt_off += MPT_RQSL(mpt);
1505 
1506 		/*
1507 		 * Now initialized the chain descriptor.
1508 		 */
1509 		memset(ce, 0, sizeof (*ce));
1510 
1511 		/*
1512 		 * Get the physical address of the chain list.
1513 		 */
1514 		chain_list_addr = trq->req_pbuf;
1515 		chain_list_addr += cur_off;
1516 
1517 
1518 
1519 		ce->Address = chain_list_addr;
1520 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1521 
1522 
1523 		/*
1524 		 * If we have more than a frame's worth of segments left,
1525 		 * set up the chain list to have the last element be another
1526 		 * chain descriptor.
1527 		 */
1528 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1529 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1530 			/*
1531 			 * The length of the chain is the length in bytes of the
1532 			 * number of segments plus the next chain element.
1533 			 *
1534 			 * The next chain descriptor offset is the length,
1535 			 * in words, of the number of segments.
1536 			 */
1537 			ce->Length = (this_seg_lim - seg) *
1538 			    sizeof (SGE_SIMPLE32);
1539 			ce->NextChainOffset = ce->Length >> 2;
1540 			ce->Length += sizeof (SGE_CHAIN32);
1541 		} else {
1542 			this_seg_lim = nseg;
1543 			ce->Length = (this_seg_lim - seg) *
1544 			    sizeof (SGE_SIMPLE32);
1545 		}
1546 
1547 		/*
1548 		 * Fill in the chain list SGE elements with our segment data.
1549 		 *
1550 		 * If we're the last element in this chain list, set the last
1551 		 * element flag. If we're the completely last element period,
1552 		 * set the end of list and end of buffer flags.
1553 		 */
1554 		while (seg < this_seg_lim) {
1555 			memset(se, 0, sizeof (*se));
1556 			se->Address = dm_segs->ds_addr;
1557 
1558 
1559 
1560 
1561 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1562 			tf = flags;
1563 			if (seg ==  this_seg_lim - 1) {
1564 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1565 			}
1566 			if (seg == nseg - 1) {
1567 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1568 					MPI_SGE_FLAGS_END_OF_BUFFER;
1569 			}
1570 			MPI_pSGE_SET_FLAGS(se, tf);
1571 			se++;
1572 			seg++;
1573 			dm_segs++;
1574 		}
1575 
1576     next_chain:
1577 		/*
1578 		 * If we have more segments to do and we've used up all of
1579 		 * the space in a request area, go allocate another one
1580 		 * and chain to that.
1581 		 */
1582 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1583 			request_t *nrq;
1584 
1585 			CAMLOCK_2_MPTLOCK(mpt);
1586 			nrq = mpt_get_request(mpt, FALSE);
1587 			MPTLOCK_2_CAMLOCK(mpt);
1588 
1589 			if (nrq == NULL) {
1590 				error = ENOMEM;
1591 				goto bad;
1592 			}
1593 
1594 			/*
1595 			 * Append the new request area on the tail of our list.
1596 			 */
1597 			if ((trq = req->chain) == NULL) {
1598 				req->chain = nrq;
1599 			} else {
1600 				while (trq->chain != NULL) {
1601 					trq = trq->chain;
1602 				}
1603 				trq->chain = nrq;
1604 			}
1605 			trq = nrq;
1606 			mpt_off = trq->req_vbuf;
1607 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1608 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1609 			}
1610 			nxt_off = 0;
1611 		}
1612 	}
1613 out:
1614 
1615 	/*
1616 	 * Last time we need to check if this CCB needs to be aborted.
1617 	 */
1618 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1619 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1620 			request_t *cmd_req =
1621 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1622 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1623 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1624 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1625 		}
1626 		mpt_prt(mpt,
1627 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1628 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1629 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1630 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1631 		}
1632 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1633 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1634 		xpt_done(ccb);
1635 		CAMLOCK_2_MPTLOCK(mpt);
1636 		mpt_free_request(mpt, req);
1637 		MPTLOCK_2_CAMLOCK(mpt);
1638 		return;
1639 	}
1640 
1641 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1642 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1643 		ccb->ccb_h.timeout_ch =
1644 			timeout(mpt_timeout, (caddr_t)ccb,
1645 				(ccb->ccb_h.timeout * hz) / 1000);
1646 	} else {
1647 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1648 	}
1649 	if (mpt->verbose > MPT_PRT_DEBUG) {
1650 		int nc = 0;
1651 		mpt_print_request(req->req_vbuf);
1652 		for (trq = req->chain; trq; trq = trq->chain) {
1653 			printf("  Additional Chain Area %d\n", nc++);
1654 			mpt_dump_sgl(trq->req_vbuf, 0);
1655 		}
1656 	}
1657 
1658 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1659 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1660 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1661 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1662 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1663 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1664 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1665 		} else {
1666 			tgt->state = TGT_STATE_MOVING_DATA;
1667 		}
1668 #else
1669 		tgt->state = TGT_STATE_MOVING_DATA;
1670 #endif
1671 	}
1672 	CAMLOCK_2_MPTLOCK(mpt);
1673 	mpt_send_cmd(mpt, req);
1674 	MPTLOCK_2_CAMLOCK(mpt);
1675 }
1676 
1677 static void
1678 mpt_start(struct cam_sim *sim, union ccb *ccb)
1679 {
1680 	request_t *req;
1681 	struct mpt_softc *mpt;
1682 	MSG_SCSI_IO_REQUEST *mpt_req;
1683 	struct ccb_scsiio *csio = &ccb->csio;
1684 	struct ccb_hdr *ccbh = &ccb->ccb_h;
1685 	bus_dmamap_callback_t *cb;
1686 	target_id_t tgt;
1687 	int raid_passthru;
1688 
1689 	/* Get the pointer for the physical addapter */
1690 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1691 	raid_passthru = (sim == mpt->phydisk_sim);
1692 
1693 	CAMLOCK_2_MPTLOCK(mpt);
1694 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
1695 		if (mpt->outofbeer == 0) {
1696 			mpt->outofbeer = 1;
1697 			xpt_freeze_simq(mpt->sim, 1);
1698 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1699 		}
1700 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1701 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
1702 		MPTLOCK_2_CAMLOCK(mpt);
1703 		xpt_done(ccb);
1704 		return;
1705 	}
1706 #ifdef	INVARIANTS
1707 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
1708 #endif
1709 	MPTLOCK_2_CAMLOCK(mpt);
1710 
1711 	if (sizeof (bus_addr_t) > 4) {
1712 		cb = mpt_execute_req_a64;
1713 	} else {
1714 		cb = mpt_execute_req;
1715 	}
1716 
1717 	/*
1718 	 * Link the ccb and the request structure so we can find
1719 	 * the other knowing either the request or the ccb
1720 	 */
1721 	req->ccb = ccb;
1722 	ccb->ccb_h.ccb_req_ptr = req;
1723 
1724 	/* Now we build the command for the IOC */
1725 	mpt_req = req->req_vbuf;
1726 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
1727 
1728 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1729 	if (raid_passthru) {
1730 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1731 		CAMLOCK_2_MPTLOCK(mpt);
1732 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
1733 			MPTLOCK_2_CAMLOCK(mpt);
1734 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1735 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
1736 			xpt_done(ccb);
1737 			return;
1738 		}
1739 		MPTLOCK_2_CAMLOCK(mpt);
1740 		mpt_req->Bus = 0;	/* we never set bus here */
1741 	} else {
1742 		tgt = ccb->ccb_h.target_id;
1743 		mpt_req->Bus = 0;	/* XXX */
1744 
1745 	}
1746 	mpt_req->SenseBufferLength =
1747 		(csio->sense_len < MPT_SENSE_SIZE) ?
1748 		 csio->sense_len : MPT_SENSE_SIZE;
1749 
1750 	/*
1751 	 * We use the message context to find the request structure when we
1752 	 * Get the command completion interrupt from the IOC.
1753 	 */
1754 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1755 
1756 	/* Which physical device to do the I/O on */
1757 	mpt_req->TargetID = tgt;
1758 
1759 	/* We assume a single level LUN type */
1760 	if (ccb->ccb_h.target_lun >= 256) {
1761 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1762 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1763 	} else {
1764 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1765 	}
1766 
1767 	/* Set the direction of the transfer */
1768 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1769 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1770 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1771 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1772 	} else {
1773 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1774 	}
1775 
1776 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1777 		switch(ccb->csio.tag_action) {
1778 		case MSG_HEAD_OF_Q_TAG:
1779 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
1780 			break;
1781 		case MSG_ACA_TASK:
1782 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
1783 			break;
1784 		case MSG_ORDERED_Q_TAG:
1785 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
1786 			break;
1787 		case MSG_SIMPLE_Q_TAG:
1788 		default:
1789 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1790 			break;
1791 		}
1792 	} else {
1793 		if (mpt->is_fc || mpt->is_sas) {
1794 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1795 		} else {
1796 			/* XXX No such thing for a target doing packetized. */
1797 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
1798 		}
1799 	}
1800 
1801 	if (mpt->is_spi) {
1802 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1803 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
1804 		}
1805 	}
1806 
1807 	/* Copy the scsi command block into place */
1808 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1809 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
1810 	} else {
1811 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
1812 	}
1813 
1814 	mpt_req->CDBLength = csio->cdb_len;
1815 	mpt_req->DataLength = csio->dxfer_len;
1816 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
1817 
1818 	/*
1819 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
1820 	 */
1821 	if (mpt->verbose == MPT_PRT_DEBUG) {
1822 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
1823 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
1824 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
1825 		if (mpt_req->Control != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
1826 			mpt_prtc(mpt, "(%s %u byte%s ",
1827 			    (mpt_req->Control == MPI_SCSIIO_CONTROL_READ)?
1828 			    "read" : "write",  csio->dxfer_len,
1829 			    (csio->dxfer_len == 1)? ")" : "s)");
1830 		}
1831 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
1832 		    ccb->ccb_h.target_lun, req, req->serno);
1833 	}
1834 
1835 	/*
1836 	 * If we have any data to send with this command map it into bus space.
1837 	 */
1838 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1839 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1840 			/*
1841 			 * We've been given a pointer to a single buffer.
1842 			 */
1843 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1844 				/*
1845 				 * Virtual address that needs to translated into
1846 				 * one or more physical address ranges.
1847 				 */
1848 				int error;
1849 				int s = splsoftvm();
1850 				error = bus_dmamap_load(mpt->buffer_dmat,
1851 				    req->dmap, csio->data_ptr, csio->dxfer_len,
1852 				    cb, req, 0);
1853 				splx(s);
1854 				if (error == EINPROGRESS) {
1855 					/*
1856 					 * So as to maintain ordering,
1857 					 * freeze the controller queue
1858 					 * until our mapping is
1859 					 * returned.
1860 					 */
1861 					xpt_freeze_simq(mpt->sim, 1);
1862 					ccbh->status |= CAM_RELEASE_SIMQ;
1863 				}
1864 			} else {
1865 				/*
1866 				 * We have been given a pointer to single
1867 				 * physical buffer.
1868 				 */
1869 				struct bus_dma_segment seg;
1870 				seg.ds_addr =
1871 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1872 				seg.ds_len = csio->dxfer_len;
1873 				(*cb)(req, &seg, 1, 0);
1874 			}
1875 		} else {
1876 			/*
1877 			 * We have been given a list of addresses.
1878 			 * This case could be easily supported but they are not
1879 			 * currently generated by the CAM subsystem so there
1880 			 * is no point in wasting the time right now.
1881 			 */
1882 			struct bus_dma_segment *segs;
1883 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1884 				(*cb)(req, NULL, 0, EFAULT);
1885 			} else {
1886 				/* Just use the segments provided */
1887 				segs = (struct bus_dma_segment *)csio->data_ptr;
1888 				(*cb)(req, segs, csio->sglist_cnt, 0);
1889 			}
1890 		}
1891 	} else {
1892 		(*cb)(req, NULL, 0, 0);
1893 	}
1894 }
1895 
1896 static int
1897 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
1898     int sleep_ok)
1899 {
1900 	int   error;
1901 	uint16_t status;
1902 	uint8_t response;
1903 
1904 	error = mpt_scsi_send_tmf(mpt,
1905 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
1906 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
1907 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1908 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
1909 	    0,	/* XXX How do I get the channel ID? */
1910 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
1911 	    lun != CAM_LUN_WILDCARD ? lun : 0,
1912 	    0, sleep_ok);
1913 
1914 	if (error != 0) {
1915 		/*
1916 		 * mpt_scsi_send_tmf hard resets on failure, so no
1917 		 * need to do so here.
1918 		 */
1919 		mpt_prt(mpt,
1920 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
1921 		return (EIO);
1922 	}
1923 
1924 	/* Wait for bus reset to be processed by the IOC. */
1925 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
1926 	    REQ_STATE_DONE, sleep_ok, 5000);
1927 
1928 	status = mpt->tmf_req->IOCStatus;
1929 	response = mpt->tmf_req->ResponseCode;
1930 	mpt->tmf_req->state = REQ_STATE_FREE;
1931 
1932 	if (error) {
1933 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
1934 		    "Resetting controller.\n");
1935 		mpt_reset(mpt, TRUE);
1936 		return (ETIMEDOUT);
1937 	}
1938 
1939 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1940 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
1941 		    "Resetting controller.\n", status);
1942 		mpt_reset(mpt, TRUE);
1943 		return (EIO);
1944 	}
1945 
1946 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
1947 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
1948 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
1949 		    "Resetting controller.\n", response);
1950 		mpt_reset(mpt, TRUE);
1951 		return (EIO);
1952 	}
1953 	return (0);
1954 }
1955 
1956 static int
1957 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
1958 {
1959 	int r = 0;
1960 	request_t *req;
1961 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
1962 
1963  	req = mpt_get_request(mpt, FALSE);
1964 	if (req == NULL) {
1965 		return (ENOMEM);
1966 	}
1967 	fc = req->req_vbuf;
1968 	memset(fc, 0, sizeof(*fc));
1969 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
1970 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
1971 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
1972 	mpt_send_cmd(mpt, req);
1973 	if (dowait) {
1974 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
1975 		    REQ_STATE_DONE, FALSE, 60 * 1000);
1976 		if (r == 0) {
1977 			mpt_free_request(mpt, req);
1978 		}
1979 	}
1980 	return (r);
1981 }
1982 
1983 static int
1984 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
1985 	      MSG_EVENT_NOTIFY_REPLY *msg)
1986 {
1987 	switch(msg->Event & 0xFF) {
1988 	case MPI_EVENT_UNIT_ATTENTION:
1989 		mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
1990 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1991 		break;
1992 
1993 	case MPI_EVENT_IOC_BUS_RESET:
1994 		/* We generated a bus reset */
1995 		mpt_prt(mpt, "IOC Bus Reset Port: %d\n",
1996 		    (msg->Data[0] >> 8) & 0xff);
1997 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
1998 		break;
1999 
2000 	case MPI_EVENT_EXT_BUS_RESET:
2001 		/* Someone else generated a bus reset */
2002 		mpt_prt(mpt, "External Bus Reset Detected\n");
2003 		/*
2004 		 * These replies don't return EventData like the MPI
2005 		 * spec says they do
2006 		 */
2007 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2008 		break;
2009 
2010 	case MPI_EVENT_RESCAN:
2011 		/*
2012 		 * In general this means a device has been added to the loop.
2013 		 */
2014 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
2015 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
2016 		break;
2017 
2018 	case MPI_EVENT_LINK_STATUS_CHANGE:
2019 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2020 		    (msg->Data[1] >> 8) & 0xff,
2021 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
2022 		break;
2023 
2024 	case MPI_EVENT_LOOP_STATE_CHANGE:
2025 		switch ((msg->Data[0] >> 16) & 0xff) {
2026 		case 0x01:
2027 			mpt_prt(mpt,
2028 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2029 			    "(Loop Initialization)\n",
2030 			    (msg->Data[1] >> 8) & 0xff,
2031 			    (msg->Data[0] >> 8) & 0xff,
2032 			    (msg->Data[0]     ) & 0xff);
2033 			switch ((msg->Data[0] >> 8) & 0xff) {
2034 			case 0xF7:
2035 				if ((msg->Data[0] & 0xff) == 0xF7) {
2036 					mpt_prt(mpt, "Device needs AL_PA\n");
2037 				} else {
2038 					mpt_prt(mpt, "Device %02x doesn't like "
2039 					    "FC performance\n",
2040 					    msg->Data[0] & 0xFF);
2041 				}
2042 				break;
2043 			case 0xF8:
2044 				if ((msg->Data[0] & 0xff) == 0xF7) {
2045 					mpt_prt(mpt, "Device had loop failure "
2046 					    "at its receiver prior to acquiring"
2047 					    " AL_PA\n");
2048 				} else {
2049 					mpt_prt(mpt, "Device %02x detected loop"
2050 					    " failure at its receiver\n",
2051 					    msg->Data[0] & 0xFF);
2052 				}
2053 				break;
2054 			default:
2055 				mpt_prt(mpt, "Device %02x requests that device "
2056 				    "%02x reset itself\n",
2057 				    msg->Data[0] & 0xFF,
2058 				    (msg->Data[0] >> 8) & 0xFF);
2059 				break;
2060 			}
2061 			break;
2062 		case 0x02:
2063 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2064 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2065 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2066 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2067 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2068 			break;
2069 		case 0x03:
2070 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2071 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2072 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2073 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
2074 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
2075 			break;
2076 		default:
2077 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2078 			    "FC event (%02x %02x %02x)\n",
2079 			    (msg->Data[1] >> 8) & 0xff, /* Port */
2080 			    (msg->Data[0] >> 16) & 0xff, /* Event */
2081 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
2082 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
2083 		}
2084 		break;
2085 
2086 	case MPI_EVENT_LOGOUT:
2087 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2088 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
2089 		break;
2090 	case MPI_EVENT_EVENT_CHANGE:
2091 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2092 		    "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
2093 		break;
2094 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2095 		/*
2096 		 * Devices are attachin'.....
2097 		 */
2098 		mpt_prt(mpt,
2099 		    "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n");
2100 		break;
2101 	default:
2102 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2103 		    msg->Event & 0xFF);
2104 		return (0);
2105 	}
2106 	return (1);
2107 }
2108 
2109 /*
2110  * Reply path for all SCSI I/O requests, called from our
2111  * interrupt handler by extracting our handler index from
2112  * the MsgContext field of the reply from the IOC.
2113  *
2114  * This routine is optimized for the common case of a
2115  * completion without error.  All exception handling is
2116  * offloaded to non-inlined helper routines to minimize
2117  * cache footprint.
2118  */
2119 static int
2120 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2121     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2122 {
2123 	MSG_SCSI_IO_REQUEST *scsi_req;
2124 	union ccb *ccb;
2125 	target_id_t tgt;
2126 
2127 	if (req->state == REQ_STATE_FREE) {
2128 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2129 		return (TRUE);
2130 	}
2131 
2132 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2133 	ccb = req->ccb;
2134 	if (ccb == NULL) {
2135 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2136 		    req, req->serno);
2137 		return (TRUE);
2138 	}
2139 
2140 	tgt = scsi_req->TargetID;
2141 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2142 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2143 
2144 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2145 		bus_dmasync_op_t op;
2146 
2147 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2148 			op = BUS_DMASYNC_POSTREAD;
2149 		else
2150 			op = BUS_DMASYNC_POSTWRITE;
2151 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2152 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2153 	}
2154 
2155 	if (reply_frame == NULL) {
2156 		/*
2157 		 * Context only reply, completion without error status.
2158 		 */
2159 		ccb->csio.resid = 0;
2160 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2161 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2162 	} else {
2163 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2164 	}
2165 
2166 	if (mpt->outofbeer) {
2167 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2168 		mpt->outofbeer = 0;
2169 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2170 	}
2171 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2172 		struct scsi_inquiry_data *iq =
2173 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2174 		if (scsi_req->Function ==
2175 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2176 			/*
2177 			 * Fake out the device type so that only the
2178 			 * pass-thru device will attach.
2179 			 */
2180 			iq->device &= ~0x1F;
2181 			iq->device |= T_NODEVICE;
2182 		}
2183 	}
2184 	if (mpt->verbose == MPT_PRT_DEBUG) {
2185 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2186 		    req, req->serno);
2187 	}
2188 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2189 	MPTLOCK_2_CAMLOCK(mpt);
2190 	xpt_done(ccb);
2191 	CAMLOCK_2_MPTLOCK(mpt);
2192 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2193 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2194 	} else {
2195 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2196 		    req, req->serno);
2197 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2198 	}
2199 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2200 	    ("CCB req needed wakeup"));
2201 #ifdef	INVARIANTS
2202 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2203 #endif
2204 	mpt_free_request(mpt, req);
2205 	return (TRUE);
2206 }
2207 
2208 static int
2209 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2210     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2211 {
2212 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2213 
2214 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2215 #ifdef	INVARIANTS
2216 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2217 #endif
2218 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2219 	/* Record IOC Status and Response Code of TMF for any waiters. */
2220 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2221 	req->ResponseCode = tmf_reply->ResponseCode;
2222 
2223 	mpt_lprt(mpt, MPT_PRT_INFO, "TMF complete: req %p:%u status 0x%x\n",
2224 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2225 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2226 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2227 		req->state |= REQ_STATE_DONE;
2228 		wakeup(req);
2229 	} else {
2230 		mpt->tmf_req->state = REQ_STATE_FREE;
2231 	}
2232 	return (TRUE);
2233 }
2234 
2235 /*
2236  * XXX: Move to definitions file
2237  */
2238 #define	ELS	0x22
2239 #define	FC4LS	0x32
2240 #define	ABTS	0x81
2241 #define	BA_ACC	0x84
2242 
2243 #define	LS_RJT	0x01
2244 #define	LS_ACC	0x02
2245 #define	PLOGI	0x03
2246 #define	LOGO	0x05
2247 #define SRR	0x14
2248 #define PRLI	0x20
2249 #define PRLO	0x21
2250 #define ADISC	0x52
2251 #define RSCN	0x61
2252 
2253 static void
2254 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2255     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2256 {
2257 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2258 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2259 
2260 	/*
2261 	 * We are going to reuse the ELS request to send this response back.
2262 	 */
2263 	rsp = &tmp;
2264 	memset(rsp, 0, sizeof(*rsp));
2265 
2266 #ifdef	USE_IMMEDIATE_LINK_DATA
2267 	/*
2268 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2269 	 */
2270 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2271 #endif
2272 	rsp->RspLength = length;
2273 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2274 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2275 
2276 	/*
2277 	 * Copy over information from the original reply frame to
2278 	 * it's correct place in the response.
2279 	 */
2280 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2281 
2282 	/*
2283 	 * And now copy back the temporary area to the original frame.
2284 	 */
2285 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2286 	rsp = req->req_vbuf;
2287 
2288 #ifdef	USE_IMMEDIATE_LINK_DATA
2289 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2290 #else
2291 {
2292 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2293 	bus_addr_t paddr = req->req_pbuf;
2294 	paddr += MPT_RQSL(mpt);
2295 
2296 	se->FlagsLength =
2297 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2298 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2299 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2300 		MPI_SGE_FLAGS_END_OF_LIST	|
2301 		MPI_SGE_FLAGS_END_OF_BUFFER;
2302 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2303 	se->FlagsLength |= (length);
2304 	se->Address = (uint32_t) paddr;
2305 }
2306 #endif
2307 
2308 	/*
2309 	 * Send it on...
2310 	 */
2311 	mpt_send_cmd(mpt, req);
2312 }
2313 
2314 static int
2315 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2316     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2317 {
2318 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2319 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2320 	U8 rctl;
2321 	U8 type;
2322 	U8 cmd;
2323 	U16 status = le16toh(reply_frame->IOCStatus);
2324 	U32 *elsbuf;
2325 	int ioindex;
2326 	int do_refresh = TRUE;
2327 
2328 #ifdef	INVARIANTS
2329 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2330 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2331 	    req, req->serno, rp->Function));
2332 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2333 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2334 	} else {
2335 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2336 	}
2337 #endif
2338 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2339 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2340 	    req, req->serno, reply_frame, reply_frame->Function);
2341 
2342 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2343 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2344 		    status, reply_frame->Function);
2345 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2346 			/*
2347 			 * XXX: to get around shutdown issue
2348 			 */
2349 			mpt->disabled = 1;
2350 			return (TRUE);
2351 		}
2352 		return (TRUE);
2353 	}
2354 
2355 	/*
2356 	 * If the function of a link service response, we recycle the
2357 	 * response to be a refresh for a new link service request.
2358 	 *
2359 	 * The request pointer is bogus in this case and we have to fetch
2360 	 * it based upon the TransactionContext.
2361 	 */
2362 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2363 		/* Freddie Uncle Charlie Katie */
2364 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2365 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2366 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2367 				break;
2368 			}
2369 
2370 		KASSERT(ioindex < mpt->els_cmds_allocated,
2371 		    ("can't find my mommie!"));
2372 
2373 		/* remove from active list as we're going to re-post it */
2374 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2375 		req->state &= ~REQ_STATE_QUEUED;
2376 		req->state |= REQ_STATE_DONE;
2377 		mpt_fc_post_els(mpt, req, ioindex);
2378 		return (TRUE);
2379 	}
2380 
2381 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2382 		/* remove from active list as we're done */
2383 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2384 		req->state &= ~REQ_STATE_QUEUED;
2385 		req->state |= REQ_STATE_DONE;
2386 		if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2387 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2388 			    "Async Primitive Send Complete\n");
2389 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2390 			mpt_free_request(mpt, req);
2391 		} else {
2392 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2393 			    "Sync Primitive Send Complete\n");
2394 			wakeup(req);
2395 		}
2396 		return (TRUE);
2397 	}
2398 
2399 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2400 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2401 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2402 		    rp->MsgLength, rp->MsgFlags);
2403 		return (TRUE);
2404 	}
2405 
2406 	if (rp->MsgLength <= 5) {
2407 		/*
2408 		 * This is just a ack of an original ELS buffer post
2409 		 */
2410 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2411 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2412 		return (TRUE);
2413 	}
2414 
2415 
2416 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2417 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2418 
2419 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2420 	cmd = be32toh(elsbuf[0]) >> 24;
2421 
2422 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2423 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2424 		return (TRUE);
2425 	}
2426 
2427 	ioindex = le32toh(rp->TransactionContext);
2428 	req = mpt->els_cmd_ptrs[ioindex];
2429 
2430 	if (rctl == ELS && type == 1) {
2431 		switch (cmd) {
2432 		case PRLI:
2433 			/*
2434 			 * Send back a PRLI ACC
2435 			 */
2436 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2437 			    le32toh(rp->Wwn.PortNameHigh),
2438 			    le32toh(rp->Wwn.PortNameLow));
2439 			elsbuf[0] = htobe32(0x02100014);
2440 			elsbuf[1] |= htobe32(0x00000100);
2441 			elsbuf[4] = htobe32(0x00000002);
2442 			if (mpt->role & MPT_ROLE_TARGET)
2443 				elsbuf[4] |= htobe32(0x00000010);
2444 			if (mpt->role & MPT_ROLE_INITIATOR)
2445 				elsbuf[4] |= htobe32(0x00000020);
2446 			/* remove from active list as we're done */
2447 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2448 			req->state &= ~REQ_STATE_QUEUED;
2449 			req->state |= REQ_STATE_DONE;
2450 			mpt_fc_els_send_response(mpt, req, rp, 20);
2451 			do_refresh = FALSE;
2452 			break;
2453 		case PRLO:
2454 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2455 			elsbuf[0] = htobe32(0x02100014);
2456 			elsbuf[1] = htobe32(0x08000100);
2457 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2458 			    le32toh(rp->Wwn.PortNameHigh),
2459 			    le32toh(rp->Wwn.PortNameLow));
2460 			/* remove from active list as we're done */
2461 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2462 			req->state &= ~REQ_STATE_QUEUED;
2463 			req->state |= REQ_STATE_DONE;
2464 			mpt_fc_els_send_response(mpt, req, rp, 20);
2465 			do_refresh = FALSE;
2466 			break;
2467 		default:
2468 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2469 			break;
2470 		}
2471 	} else if (rctl == ABTS && type == 0) {
2472 		uint16_t rx_id = le16toh(rp->Rxid);
2473 		uint16_t ox_id = le16toh(rp->Oxid);
2474 		request_t *tgt_req = NULL;
2475 
2476 		mpt_prt(mpt,
2477 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2478 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2479 		    le32toh(rp->Wwn.PortNameLow));
2480 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2481 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2482 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2483 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2484 		} else {
2485 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2486 		}
2487 		if (tgt_req) {
2488 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2489 			uint8_t *vbuf;
2490 			union ccb *ccb = tgt->ccb;
2491 			uint32_t ct_id;
2492 
2493 			vbuf = tgt_req->req_vbuf;
2494 			vbuf += MPT_RQSL(mpt);
2495 
2496 			/*
2497 			 * Check to make sure we have the correct command
2498 			 * The reply descriptor in the target state should
2499 			 * should contain an IoIndex that should match the
2500 			 * RX_ID.
2501 			 *
2502 			 * It'd be nice to have OX_ID to crosscheck with
2503 			 * as well.
2504 			 */
2505 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2506 
2507 			if (ct_id != rx_id) {
2508 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2509 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2510 				    rx_id, ct_id);
2511 				goto skip;
2512 			}
2513 
2514 			ccb = tgt->ccb;
2515 			if (ccb) {
2516 				mpt_prt(mpt,
2517 				    "CCB (%p): lun %u flags %x status %x\n",
2518 				    ccb, ccb->ccb_h.target_lun,
2519 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2520 			}
2521 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2522 			    "%x nxfers %x\n", tgt->state,
2523 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2524 			    tgt->nxfers);
2525   skip:
2526 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2527 				mpt_prt(mpt, "unable to start TargetAbort\n");
2528 			}
2529 		} else {
2530 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2531 		}
2532 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2533 		elsbuf[0] = htobe32(0);
2534 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2535 		elsbuf[2] = htobe32(0x000ffff);
2536 		/*
2537 		 * Dork with the reply frame so that the reponse to it
2538 		 * will be correct.
2539 		 */
2540 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2541 		/* remove from active list as we're done */
2542 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2543 		req->state &= ~REQ_STATE_QUEUED;
2544 		req->state |= REQ_STATE_DONE;
2545 		mpt_fc_els_send_response(mpt, req, rp, 12);
2546 		do_refresh = FALSE;
2547 	} else {
2548 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2549 	}
2550 	if (do_refresh == TRUE) {
2551 		/* remove from active list as we're done */
2552 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2553 		req->state &= ~REQ_STATE_QUEUED;
2554 		req->state |= REQ_STATE_DONE;
2555 		mpt_fc_post_els(mpt, req, ioindex);
2556 	}
2557 	return (TRUE);
2558 }
2559 
2560 /*
2561  * Clean up all SCSI Initiator personality state in response
2562  * to a controller reset.
2563  */
2564 static void
2565 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2566 {
2567 	/*
2568 	 * The pending list is already run down by
2569 	 * the generic handler.  Perform the same
2570 	 * operation on the timed out request list.
2571 	 */
2572 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
2573 				   MPI_IOCSTATUS_INVALID_STATE);
2574 
2575 	/*
2576 	 * XXX: We need to repost ELS and Target Command Buffers?
2577 	 */
2578 
2579 	/*
2580 	 * Inform the XPT that a bus reset has occurred.
2581 	 */
2582 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
2583 }
2584 
2585 /*
2586  * Parse additional completion information in the reply
2587  * frame for SCSI I/O requests.
2588  */
2589 static int
2590 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
2591 			     MSG_DEFAULT_REPLY *reply_frame)
2592 {
2593 	union ccb *ccb;
2594 	MSG_SCSI_IO_REPLY *scsi_io_reply;
2595 	u_int ioc_status;
2596 	u_int sstate;
2597 	u_int loginfo;
2598 
2599 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
2600 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
2601 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
2602 		("MPT SCSI I/O Handler called with incorrect reply type"));
2603 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
2604 		("MPT SCSI I/O Handler called with continuation reply"));
2605 
2606 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
2607 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
2608 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
2609 	ioc_status &= MPI_IOCSTATUS_MASK;
2610 	sstate = scsi_io_reply->SCSIState;
2611 
2612 	ccb = req->ccb;
2613 	ccb->csio.resid =
2614 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
2615 
2616 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
2617 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
2618 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2619 		ccb->csio.sense_resid =
2620 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
2621 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
2622 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
2623 	}
2624 
2625 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
2626 		/*
2627 		 * Tag messages rejected, but non-tagged retry
2628 		 * was successful.
2629 XXXX
2630 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
2631 		 */
2632 	}
2633 
2634 	switch(ioc_status) {
2635 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2636 		/*
2637 		 * XXX
2638 		 * Linux driver indicates that a zero
2639 		 * transfer length with this error code
2640 		 * indicates a CRC error.
2641 		 *
2642 		 * No need to swap the bytes for checking
2643 		 * against zero.
2644 		 */
2645 		if (scsi_io_reply->TransferCount == 0) {
2646 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2647 			break;
2648 		}
2649 		/* FALLTHROUGH */
2650 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
2651 	case MPI_IOCSTATUS_SUCCESS:
2652 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
2653 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
2654 			/*
2655 			 * Status was never returned for this transaction.
2656 			 */
2657 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
2658 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
2659 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
2660 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
2661 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
2662 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
2663 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
2664 
2665 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
2666 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2667 		} else
2668 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2669 		break;
2670 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
2671 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
2672 		break;
2673 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
2674 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2675 		break;
2676 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2677 		/*
2678 		 * Since selection timeouts and "device really not
2679 		 * there" are grouped into this error code, report
2680 		 * selection timeout.  Selection timeouts are
2681 		 * typically retried before giving up on the device
2682 		 * whereas "device not there" errors are considered
2683 		 * unretryable.
2684 		 */
2685 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2686 		break;
2687 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2688 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
2689 		break;
2690 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
2691 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
2692 		break;
2693 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
2694 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
2695 		break;
2696 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2697 		ccb->ccb_h.status = CAM_UA_TERMIO;
2698 		break;
2699 	case MPI_IOCSTATUS_INVALID_STATE:
2700 		/*
2701 		 * The IOC has been reset.  Emulate a bus reset.
2702 		 */
2703 		/* FALLTHROUGH */
2704 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
2705 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2706 		break;
2707 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
2708 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
2709 		/*
2710 		 * Don't clobber any timeout status that has
2711 		 * already been set for this transaction.  We
2712 		 * want the SCSI layer to be able to differentiate
2713 		 * between the command we aborted due to timeout
2714 		 * and any innocent bystanders.
2715 		 */
2716 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
2717 			break;
2718 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
2719 		break;
2720 
2721 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2722 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
2723 		break;
2724 	case MPI_IOCSTATUS_BUSY:
2725 		mpt_set_ccb_status(ccb, CAM_BUSY);
2726 		break;
2727 	case MPI_IOCSTATUS_INVALID_FUNCTION:
2728 	case MPI_IOCSTATUS_INVALID_SGL:
2729 	case MPI_IOCSTATUS_INTERNAL_ERROR:
2730 	case MPI_IOCSTATUS_INVALID_FIELD:
2731 	default:
2732 		/* XXX
2733 		 * Some of the above may need to kick
2734 		 * of a recovery action!!!!
2735 		 */
2736 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2737 		break;
2738 	}
2739 
2740 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2741 		mpt_freeze_ccb(ccb);
2742 	}
2743 
2744 	return (TRUE);
2745 }
2746 
2747 static void
2748 mpt_action(struct cam_sim *sim, union ccb *ccb)
2749 {
2750 	struct mpt_softc *mpt;
2751 	struct ccb_trans_settings *cts;
2752 	target_id_t tgt;
2753 	lun_id_t lun;
2754 	int raid_passthru;
2755 
2756 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2757 
2758 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
2759 	KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
2760 	raid_passthru = (sim == mpt->phydisk_sim);
2761 
2762 	tgt = ccb->ccb_h.target_id;
2763 	lun = ccb->ccb_h.target_lun;
2764 	if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ &&
2765 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
2766 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
2767 		CAMLOCK_2_MPTLOCK(mpt);
2768 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2769 			MPTLOCK_2_CAMLOCK(mpt);
2770 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2771 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2772 			xpt_done(ccb);
2773 			return;
2774 		}
2775 		MPTLOCK_2_CAMLOCK(mpt);
2776 	}
2777 	ccb->ccb_h.ccb_mpt_ptr = mpt;
2778 
2779 	switch (ccb->ccb_h.func_code) {
2780 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2781 		/*
2782 		 * Do a couple of preliminary checks...
2783 		 */
2784 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2785 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2786 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2787 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2788 				break;
2789 			}
2790 		}
2791 		/* Max supported CDB length is 16 bytes */
2792 		/* XXX Unless we implement the new 32byte message type */
2793 		if (ccb->csio.cdb_len >
2794 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
2795 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2796 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2797 			break;
2798 		}
2799 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2800 		mpt_start(sim, ccb);
2801 		return;
2802 
2803 	case XPT_RESET_BUS:
2804 	case XPT_RESET_DEV:
2805 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2806 			ccb->ccb_h.func_code == XPT_RESET_BUS ?
2807 			"XPT_RESET_BUS\n" : "XPT_RESET_DEV\n");
2808 
2809 		CAMLOCK_2_MPTLOCK(mpt);
2810 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
2811 		MPTLOCK_2_CAMLOCK(mpt);
2812 
2813 		/*
2814 		 * mpt_bus_reset is always successful in that it
2815 		 * will fall back to a hard reset should a bus
2816 		 * reset attempt fail.
2817 		 */
2818 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2819 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2820 		break;
2821 
2822 	case XPT_ABORT:
2823 	{
2824 		union ccb *accb = ccb->cab.abort_ccb;
2825 		CAMLOCK_2_MPTLOCK(mpt);
2826 		switch (accb->ccb_h.func_code) {
2827 		case XPT_ACCEPT_TARGET_IO:
2828 		case XPT_IMMED_NOTIFY:
2829 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
2830 			break;
2831 		case XPT_CONT_TARGET_IO:
2832 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
2833 			ccb->ccb_h.status = CAM_UA_ABORT;
2834 			break;
2835 		case XPT_SCSI_IO:
2836 			ccb->ccb_h.status = CAM_UA_ABORT;
2837 			break;
2838 		default:
2839 			ccb->ccb_h.status = CAM_REQ_INVALID;
2840 			break;
2841 		}
2842 		MPTLOCK_2_CAMLOCK(mpt);
2843 		break;
2844 	}
2845 
2846 #ifdef	CAM_NEW_TRAN_CODE
2847 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
2848 #else
2849 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
2850 #endif
2851 #define	DP_DISC_ENABLE	0x1
2852 #define	DP_DISC_DISABL	0x2
2853 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
2854 
2855 #define	DP_TQING_ENABLE	0x4
2856 #define	DP_TQING_DISABL	0x8
2857 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
2858 
2859 #define	DP_WIDE		0x10
2860 #define	DP_NARROW	0x20
2861 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
2862 
2863 #define	DP_SYNC		0x40
2864 
2865 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2866 	{
2867 #ifdef	CAM_NEW_TRAN_CODE
2868 		struct ccb_trans_settings_scsi *scsi;
2869 		struct ccb_trans_settings_spi *spi;
2870 #endif
2871 		uint8_t dval;
2872 		u_int period;
2873 		u_int offset;
2874 		int i, j;
2875 
2876 		cts = &ccb->cts;
2877 
2878 		if (mpt->is_fc || mpt->is_sas) {
2879 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2880 			break;
2881 		}
2882 
2883 		/*
2884 		 * Skip attempting settings on RAID volume disks.
2885 		 * Other devices on the bus get the normal treatment.
2886 		 */
2887 		if (mpt->phydisk_sim && raid_passthru == 0 &&
2888 		    mpt_is_raid_volume(mpt, tgt) != 0) {
2889 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
2890 			    "skipping transfer settings for RAID volumes\n");
2891 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2892 			break;
2893 		}
2894 
2895 		i = mpt->mpt_port_page2.PortSettings &
2896 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
2897 		j = mpt->mpt_port_page2.PortFlags &
2898 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
2899 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
2900 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
2901 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
2902 			    "honoring BIOS transfer negotiations\n");
2903 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2904 			break;
2905 		}
2906 
2907 		dval = 0;
2908 		period = 0;
2909 		offset = 0;
2910 
2911 #ifndef	CAM_NEW_TRAN_CODE
2912 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
2913 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
2914 			    DP_DISC_ENABLE : DP_DISC_DISABL;
2915 		}
2916 
2917 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
2918 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
2919 			    DP_TQING_ENABLE : DP_TQING_DISABL;
2920 		}
2921 
2922 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
2923 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
2924 		}
2925 
2926 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2927 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
2928 			dval |= DP_SYNC;
2929 			period = cts->sync_period;
2930 			offset = cts->sync_offset;
2931 		}
2932 #else
2933 		scsi = &cts->proto_specific.scsi;
2934 		spi = &cts->xport_specific.spi;
2935 
2936 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2937 			dval |= (spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
2938 			    DP_DISC_ENABLE : DP_DISC_DISABL;
2939 		}
2940 
2941 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2942 			dval |= (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
2943 			    DP_TQING_ENABLE : DP_TQING_DISABL;
2944 		}
2945 
2946 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2947 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
2948 			    DP_WIDE : DP_NARROW;
2949 		}
2950 
2951 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2952 		    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2953 		    (spi->sync_period && spi->sync_offset)) {
2954 			dval |= DP_SYNC;
2955 			period = spi->sync_period;
2956 			offset = spi->sync_offset;
2957 		}
2958 #endif
2959 		CAMLOCK_2_MPTLOCK(mpt);
2960 		if (dval & DP_DISC_ENABLE) {
2961 			mpt->mpt_disc_enable |= (1 << tgt);
2962 		} else if (dval & DP_DISC_DISABL) {
2963 			mpt->mpt_disc_enable &= ~(1 << tgt);
2964 		}
2965 		if (dval & DP_TQING_ENABLE) {
2966 			mpt->mpt_tag_enable |= (1 << tgt);
2967 		} else if (dval & DP_TQING_DISABL) {
2968 			mpt->mpt_tag_enable &= ~(1 << tgt);
2969 		}
2970 		if (dval & DP_WIDTH) {
2971 			mpt_setwidth(mpt, tgt, 1);
2972 		}
2973 		if (dval & DP_SYNC) {
2974 			mpt_setsync(mpt, tgt, period, offset);
2975 		}
2976 
2977 		if (mpt_update_spi_config(mpt, tgt)) {
2978 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2979 		} else {
2980 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2981 		}
2982 		MPTLOCK_2_CAMLOCK(mpt);
2983 		break;
2984 	}
2985 	case XPT_GET_TRAN_SETTINGS:
2986 		cts = &ccb->cts;
2987 		if (mpt->is_fc) {
2988 #ifndef	CAM_NEW_TRAN_CODE
2989 			/*
2990 			 * a lot of normal SCSI things don't make sense.
2991 			 */
2992 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2993 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2994 			/*
2995 			 * How do you measure the width of a high
2996 			 * speed serial bus? Well, in bytes.
2997 			 *
2998 			 * Offset and period make no sense, though, so we set
2999 			 * (above) a 'base' transfer speed to be gigabit.
3000 			 */
3001 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3002 #else
3003 			struct ccb_trans_settings_fc *fc =
3004 			    &cts->xport_specific.fc;
3005 
3006 			cts->protocol = PROTO_SCSI;
3007 			cts->protocol_version = SCSI_REV_2;
3008 			cts->transport = XPORT_FC;
3009 			cts->transport_version = 0;
3010 
3011 			fc->valid = CTS_FC_VALID_SPEED;
3012 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
3013 			/* XXX: need a port database for each target */
3014 #endif
3015 		} else if (mpt->is_sas) {
3016 #ifndef	CAM_NEW_TRAN_CODE
3017 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3018 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3019 			/*
3020 			 * How do you measure the width of a high
3021 			 * speed serial bus? Well, in bytes.
3022 			 *
3023 			 * Offset and period make no sense, though, so we set
3024 			 * (above) a 'base' transfer speed to be gigabit.
3025 			 */
3026 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3027 #else
3028 			struct ccb_trans_settings_sas *sas =
3029 			    &cts->xport_specific.sas;
3030 
3031 			cts->protocol = PROTO_SCSI;
3032 			cts->protocol_version = SCSI_REV_3;
3033 			cts->transport = XPORT_SAS;
3034 			cts->transport_version = 0;
3035 
3036 			sas->valid = CTS_SAS_VALID_SPEED;
3037 			sas->bitrate = 300000;	/* XXX: Default 3Gbps */
3038 #endif
3039 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3040 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3041 			break;
3042 		}
3043 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3044 		break;
3045 
3046 	case XPT_CALC_GEOMETRY:
3047 	{
3048 		struct ccb_calc_geometry *ccg;
3049 
3050 		ccg = &ccb->ccg;
3051 		if (ccg->block_size == 0) {
3052 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3053 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3054 			break;
3055 		}
3056 		mpt_calc_geometry(ccg, /*extended*/1);
3057 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3058 		break;
3059 	}
3060 	case XPT_PATH_INQ:		/* Path routing inquiry */
3061 	{
3062 		struct ccb_pathinq *cpi = &ccb->cpi;
3063 
3064 		cpi->version_num = 1;
3065 		cpi->target_sprt = 0;
3066 		cpi->hba_eng_cnt = 0;
3067 		cpi->max_target = mpt->mpt_max_devices - 1;
3068 		/*
3069 		 * XXX: FC cards report MAX_DEVICES of 512- but we
3070 		 * XXX: seem to hang when going higher than 255.
3071 		 */
3072 		if (cpi->max_target > 255)
3073 			cpi->max_target = 255;
3074 		/*
3075 		 * XXX: VMware ESX reports > 16 devices and then dies
3076 		 * XXX: when we probe.
3077 		 */
3078 		if (mpt->is_spi && cpi->max_target > 15)
3079 			cpi->max_target = 15;
3080 		cpi->max_lun = 7;
3081 		cpi->initiator_id = mpt->mpt_ini_id;
3082 
3083 		cpi->bus_id = cam_sim_bus(sim);
3084 		/*
3085 		 * Actual speed for each device varies.
3086 		 *
3087 		 * The base speed is the speed of the underlying connection.
3088 		 * This is strictly determined for SPI (async, narrow). If
3089 		 * link is up for Fibre Channel, then speed can be gotten
3090 		 * from that.
3091 		 */
3092 		if (mpt->is_fc) {
3093 			cpi->hba_misc = PIM_NOBUSRESET;
3094 			cpi->base_transfer_speed =
3095 			    mpt->mpt_fcport_speed * 100000;
3096 			cpi->hba_inquiry = PI_TAG_ABLE;
3097 		} else if (mpt->is_sas) {
3098 			cpi->hba_misc = PIM_NOBUSRESET;
3099 			cpi->base_transfer_speed = 300000;
3100 			cpi->hba_inquiry = PI_TAG_ABLE;
3101 		} else {
3102 			cpi->hba_misc = PIM_SEQSCAN;
3103 			cpi->base_transfer_speed = 3300;
3104 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3105 		}
3106 
3107 		/*
3108 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3109 		 * wide, restrict it to one lun and have it *not* be a bus
3110 		 * that can have a SCSI bus reset.
3111 		 */
3112 		if (raid_passthru) {
3113 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3114 			cpi->initiator_id = cpi->max_target + 1;
3115 			cpi->max_lun = 0;
3116 			cpi->hba_misc |= PIM_NOBUSRESET;
3117 		}
3118 
3119 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3120 			cpi->hba_misc |= PIM_NOINITIATOR;
3121 		}
3122 		if ((mpt->role & MPT_ROLE_TARGET) != 0) {
3123 			cpi->target_sprt =
3124 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3125 		} else {
3126 			cpi->target_sprt = 0;
3127 		}
3128 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3129 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3130 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3131 		cpi->unit_number = cam_sim_unit(sim);
3132 		cpi->ccb_h.status = CAM_REQ_CMP;
3133 		break;
3134 	}
3135 	case XPT_EN_LUN:		/* Enable LUN as a target */
3136 	{
3137 		int result;
3138 
3139 		CAMLOCK_2_MPTLOCK(mpt);
3140 		if (ccb->cel.enable)
3141 			result = mpt_enable_lun(mpt,
3142 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3143 		else
3144 			result = mpt_disable_lun(mpt,
3145 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3146 		MPTLOCK_2_CAMLOCK(mpt);
3147 		if (result == 0) {
3148 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3149 		} else {
3150 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3151 		}
3152 		break;
3153 	}
3154 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3155 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3156 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3157 	{
3158 		tgt_resource_t *trtp;
3159 		lun_id_t lun = ccb->ccb_h.target_lun;
3160 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3161 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3162 		ccb->ccb_h.flags = 0;
3163 
3164 		if (lun == CAM_LUN_WILDCARD) {
3165 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3166 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3167 				break;
3168 			}
3169 			trtp = &mpt->trt_wildcard;
3170 		} else if (lun >= MPT_MAX_LUNS) {
3171 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3172 			break;
3173 		} else {
3174 			trtp = &mpt->trt[lun];
3175 		}
3176 		CAMLOCK_2_MPTLOCK(mpt);
3177 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3178 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3179 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3180 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3181 			    sim_links.stqe);
3182 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3183 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3184 			    "Put FREE INOT lun %d\n", lun);
3185 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3186 			    sim_links.stqe);
3187 		} else {
3188 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3189 		}
3190 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3191 		MPTLOCK_2_CAMLOCK(mpt);
3192 		return;
3193 	}
3194 	case XPT_CONT_TARGET_IO:
3195 		CAMLOCK_2_MPTLOCK(mpt);
3196 		mpt_target_start_io(mpt, ccb);
3197 		MPTLOCK_2_CAMLOCK(mpt);
3198 		return;
3199 
3200 	default:
3201 		ccb->ccb_h.status = CAM_REQ_INVALID;
3202 		break;
3203 	}
3204 	xpt_done(ccb);
3205 }
3206 
3207 static int
3208 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3209 {
3210 #ifdef	CAM_NEW_TRAN_CODE
3211 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3212 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3213 #endif
3214 	target_id_t tgt;
3215 	uint8_t dval, pval, oval;
3216 	int rv;
3217 
3218 	if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3219 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3220 			return (-1);
3221 		}
3222 	} else {
3223 		tgt = cts->ccb_h.target_id;
3224 	}
3225 
3226 	/*
3227 	 * XXX: We aren't looking Port Page 2 BIOS settings here.
3228 	 * XXX: For goal settings, we pick the max from port page 0
3229 	 *
3230 	 * For current settings we read the current settings out from
3231 	 * device page 0 for that target.
3232 	 */
3233 	if (IS_CURRENT_SETTINGS(cts)) {
3234 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3235 		dval = 0;
3236 
3237 		CAMLOCK_2_MPTLOCK(mpt);
3238 		tmp = mpt->mpt_dev_page0[tgt];
3239 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3240 		    sizeof(tmp), FALSE, 5000);
3241 		if (rv) {
3242 			MPTLOCK_2_CAMLOCK(mpt);
3243 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3244 			return (rv);
3245 		}
3246 		MPTLOCK_2_CAMLOCK(mpt);
3247 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3248 		    DP_WIDE : DP_NARROW;
3249 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3250 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3251 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3252 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3253 		oval = (tmp.NegotiatedParameters >> 16) & 0xff;
3254 		pval = (tmp.NegotiatedParameters >>  8) & 0xff;
3255 		mpt->mpt_dev_page0[tgt] = tmp;
3256 	} else {
3257 		/*
3258 		 * XXX: Just make theoretical maximum.
3259 		 */
3260 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE;
3261 		oval = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
3262 		pval = (mpt->mpt_port_page0.Capabilities >>  8) & 0xff;
3263 	}
3264 #ifndef	CAM_NEW_TRAN_CODE
3265 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3266 	if (dval & DP_DISC_ENABLE) {
3267 		cts->flags |= CCB_TRANS_DISC_ENB;
3268 	}
3269 	if (dval & DP_TQING_ENABLE) {
3270 		cts->flags |= CCB_TRANS_TAG_ENB;
3271 	}
3272 	if (dval & DP_WIDE) {
3273 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3274 	} else {
3275 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3276 	}
3277 	cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
3278 	    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3279 	if (oval) {
3280 		cts->sync_period = pval;
3281 		cts->sync_offset = oval;
3282 		cts->valid |=
3283 		    CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID;
3284 	}
3285 #else
3286 	cts->protocol = PROTO_SCSI;
3287 	cts->protocol_version = SCSI_REV_2;
3288 	cts->transport = XPORT_SPI;
3289 	cts->transport_version = 2;
3290 
3291 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
3292 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
3293 	if (dval & DP_DISC_ENABLE) {
3294 		spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3295 	}
3296 	if (dval & DP_TQING_ENABLE) {
3297 		scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3298 	}
3299 	if (oval && pval) {
3300 		spi->sync_offset = oval;
3301 		spi->sync_period = pval;
3302 		spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3303 		spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3304 	}
3305 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3306 	if (dval & DP_WIDE) {
3307 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3308 	} else {
3309 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3310 	}
3311 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3312 		scsi->valid = CTS_SCSI_VALID_TQ;
3313 		spi->valid |= CTS_SPI_VALID_DISC;
3314 	} else {
3315 		scsi->valid = 0;
3316 	}
3317 #endif
3318 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3319 	    "mpt_get_spi_settings[%d]: %s 0x%x period 0x%x offset %d\n", tgt,
3320 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3321 	return (0);
3322 }
3323 
3324 static void
3325 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3326 {
3327 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3328 
3329 	ptr = &mpt->mpt_dev_page1[tgt];
3330 	if (onoff) {
3331 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3332 	} else {
3333 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3334 	}
3335 }
3336 
3337 static void
3338 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3339 {
3340 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3341 
3342 	ptr = &mpt->mpt_dev_page1[tgt];
3343 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3344 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3345 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3346 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3347 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3348 	ptr->RequestedParameters |= (period << 8) | (offset << 16);
3349 	if (period < 0xa) {
3350 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3351 	}
3352 	if (period < 0x9) {
3353 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3354 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3355 	}
3356 }
3357 
3358 static int
3359 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3360 {
3361 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3362 	int rv;
3363 
3364 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3365 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3366 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3367 	tmp = mpt->mpt_dev_page1[tgt];
3368 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3369 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3370 	if (rv) {
3371 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3372 		return (-1);
3373 	}
3374 	return (0);
3375 }
3376 
3377 static void
3378 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3379 {
3380 #if __FreeBSD_version >= 500000
3381 	cam_calc_geometry(ccg, extended);
3382 #else
3383 	uint32_t size_mb;
3384 	uint32_t secs_per_cylinder;
3385 
3386 	if (ccg->block_size == 0) {
3387 		ccg->ccb_h.status = CAM_REQ_INVALID;
3388 		return;
3389 	}
3390 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3391 	if (size_mb > 1024 && extended) {
3392 		ccg->heads = 255;
3393 		ccg->secs_per_track = 63;
3394 	} else {
3395 		ccg->heads = 64;
3396 		ccg->secs_per_track = 32;
3397 	}
3398 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3399 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3400 	ccg->ccb_h.status = CAM_REQ_CMP;
3401 #endif
3402 }
3403 
3404 /****************************** Timeout Recovery ******************************/
3405 static int
3406 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3407 {
3408 	int error;
3409 
3410 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3411 	    &mpt->recovery_thread, /*flags*/0,
3412 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3413 	return (error);
3414 }
3415 
3416 static void
3417 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3418 {
3419 	if (mpt->recovery_thread == NULL) {
3420 		return;
3421 	}
3422 	mpt->shutdwn_recovery = 1;
3423 	wakeup(mpt);
3424 	/*
3425 	 * Sleep on a slightly different location
3426 	 * for this interlock just for added safety.
3427 	 */
3428 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3429 }
3430 
3431 static void
3432 mpt_recovery_thread(void *arg)
3433 {
3434 	struct mpt_softc *mpt;
3435 
3436 #if __FreeBSD_version >= 500000
3437 	mtx_lock(&Giant);
3438 #endif
3439 	mpt = (struct mpt_softc *)arg;
3440 	MPT_LOCK(mpt);
3441 	for (;;) {
3442 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3443 			if (mpt->shutdwn_recovery == 0) {
3444 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3445 			}
3446 		}
3447 		if (mpt->shutdwn_recovery != 0) {
3448 			break;
3449 		}
3450 		mpt_recover_commands(mpt);
3451 	}
3452 	mpt->recovery_thread = NULL;
3453 	wakeup(&mpt->recovery_thread);
3454 	MPT_UNLOCK(mpt);
3455 #if __FreeBSD_version >= 500000
3456 	mtx_unlock(&Giant);
3457 #endif
3458 	kthread_exit(0);
3459 }
3460 
3461 static int
3462 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3463     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3464 {
3465 	MSG_SCSI_TASK_MGMT *tmf_req;
3466 	int		    error;
3467 
3468 	/*
3469 	 * Wait for any current TMF request to complete.
3470 	 * We're only allowed to issue one TMF at a time.
3471 	 */
3472 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3473 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3474 	if (error != 0) {
3475 		mpt_reset(mpt, TRUE);
3476 		return (ETIMEDOUT);
3477 	}
3478 
3479 	mpt_assign_serno(mpt, mpt->tmf_req);
3480 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3481 
3482 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3483 	memset(tmf_req, 0, sizeof(*tmf_req));
3484 	tmf_req->TargetID = target;
3485 	tmf_req->Bus = channel;
3486 	tmf_req->ChainOffset = 0;
3487 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3488 	tmf_req->Reserved = 0;
3489 	tmf_req->TaskType = type;
3490 	tmf_req->Reserved1 = 0;
3491 	tmf_req->MsgFlags = flags;
3492 	tmf_req->MsgContext =
3493 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3494 	memset(&tmf_req->LUN, 0,
3495 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
3496 	if (lun > 256) {
3497 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3498 		tmf_req->LUN[1] = lun & 0xff;
3499 	} else {
3500 		tmf_req->LUN[1] = lun;
3501 	}
3502 	tmf_req->TaskMsgContext = abort_ctx;
3503 
3504 	mpt_lprt(mpt, MPT_PRT_INFO,
3505 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3506 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3507 	if (mpt->verbose > MPT_PRT_DEBUG) {
3508 		mpt_print_request(tmf_req);
3509 	}
3510 
3511 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3512 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3513 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3514 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3515 	if (error != MPT_OK) {
3516 		mpt_reset(mpt, TRUE);
3517 	}
3518 	return (error);
3519 }
3520 
3521 /*
3522  * When a command times out, it is placed on the requeust_timeout_list
3523  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3524  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3525  * the timedout transactions.  The next TMF is issued either by the
3526  * completion handler of the current TMF waking our recovery thread,
3527  * or the TMF timeout handler causing a hard reset sequence.
3528  */
3529 static void
3530 mpt_recover_commands(struct mpt_softc *mpt)
3531 {
3532 	request_t	   *req;
3533 	union ccb	   *ccb;
3534 	int		    error;
3535 
3536 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3537 		/*
3538 		 * No work to do- leave.
3539 		 */
3540 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3541 		return;
3542 	}
3543 
3544 	/*
3545 	 * Flush any commands whose completion coincides with their timeout.
3546 	 */
3547 	mpt_intr(mpt);
3548 
3549 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3550 		/*
3551 		 * The timedout commands have already
3552 		 * completed.  This typically means
3553 		 * that either the timeout value was on
3554 		 * the hairy edge of what the device
3555 		 * requires or - more likely - interrupts
3556 		 * are not happening.
3557 		 */
3558 		mpt_prt(mpt, "Timedout requests already complete. "
3559 		    "Interrupts may not be functioning.\n");
3560 		mpt_enable_ints(mpt);
3561 		return;
3562 	}
3563 
3564 	/*
3565 	 * We have no visibility into the current state of the
3566 	 * controller, so attempt to abort the commands in the
3567 	 * order they timed-out. For initiator commands, we
3568 	 * depend on the reply handler pulling requests off
3569 	 * the timeout list.
3570 	 */
3571 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3572 		uint16_t status;
3573 		uint8_t response;
3574 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3575 
3576 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3577 		    req, req->serno, hdrp->Function);
3578 		ccb = req->ccb;
3579 		if (ccb == NULL) {
3580 			mpt_prt(mpt, "null ccb in timed out request. "
3581 			    "Resetting Controller.\n");
3582 			mpt_reset(mpt, TRUE);
3583 			continue;
3584 		}
3585 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3586 
3587 		/*
3588 		 * Check to see if this is not an initiator command and
3589 		 * deal with it differently if it is.
3590 		 */
3591 		switch (hdrp->Function) {
3592 		case MPI_FUNCTION_SCSI_IO_REQUEST:
3593 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
3594 			break;
3595 		default:
3596 			/*
3597 			 * XXX: FIX ME: need to abort target assists...
3598 			 */
3599 			mpt_prt(mpt, "just putting it back on the pend q\n");
3600 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
3601 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
3602 			    links);
3603 			continue;
3604 		}
3605 
3606 		error = mpt_scsi_send_tmf(mpt,
3607 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3608 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
3609 		    htole32(req->index | scsi_io_handler_id), TRUE);
3610 
3611 		if (error != 0) {
3612 			/*
3613 			 * mpt_scsi_send_tmf hard resets on failure, so no
3614 			 * need to do so here.  Our queue should be emptied
3615 			 * by the hard reset.
3616 			 */
3617 			continue;
3618 		}
3619 
3620 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
3621 		    REQ_STATE_DONE, TRUE, 500);
3622 
3623 		status = mpt->tmf_req->IOCStatus;
3624 		response = mpt->tmf_req->ResponseCode;
3625 		mpt->tmf_req->state = REQ_STATE_FREE;
3626 
3627 		if (error != 0) {
3628 			/*
3629 			 * If we've errored out,, reset the controller.
3630 			 */
3631 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
3632 			    "Resetting controller\n");
3633 			mpt_reset(mpt, TRUE);
3634 			continue;
3635 		}
3636 
3637 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
3638 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
3639 			    "Resetting controller.\n", status);
3640 			mpt_reset(mpt, TRUE);
3641 			continue;
3642 		}
3643 
3644 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
3645 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
3646 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
3647 			    "Resetting controller.\n", response);
3648 			mpt_reset(mpt, TRUE);
3649 			continue;
3650 		}
3651 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
3652 	}
3653 }
3654 
3655 /************************ Target Mode Support ****************************/
3656 static void
3657 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
3658 {
3659 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3660 	PTR_SGE_TRANSACTION32 tep;
3661 	PTR_SGE_SIMPLE32 se;
3662 	bus_addr_t paddr;
3663 
3664 	paddr = req->req_pbuf;
3665 	paddr += MPT_RQSL(mpt);
3666 
3667 	fc = req->req_vbuf;
3668 	memset(fc, 0, MPT_REQUEST_AREA);
3669 	fc->BufferCount = 1;
3670 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3671 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
3672 
3673 	/*
3674 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
3675 	 * consist of a TE SGL element (with details length of zero)
3676 	 * followe by a SIMPLE SGL element which holds the address
3677 	 * of the buffer.
3678 	 */
3679 
3680 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3681 
3682 	tep->ContextSize = 4;
3683 	tep->Flags = 0;
3684 	tep->TransactionContext[0] = htole32(ioindex);
3685 
3686 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3687 	se->FlagsLength =
3688 		MPI_SGE_FLAGS_HOST_TO_IOC	|
3689 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
3690 		MPI_SGE_FLAGS_LAST_ELEMENT	|
3691 		MPI_SGE_FLAGS_END_OF_LIST	|
3692 		MPI_SGE_FLAGS_END_OF_BUFFER;
3693 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
3694 	se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3695 	se->Address = (uint32_t) paddr;
3696 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3697 	    "add ELS index %d ioindex %d for %p:%u\n",
3698 	    req->index, ioindex, req, req->serno);
3699 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
3700 	    ("mpt_fc_post_els: request not locked"));
3701 	mpt_send_cmd(mpt, req);
3702 }
3703 
3704 static void
3705 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3706 {
3707 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3708 	PTR_CMD_BUFFER_DESCRIPTOR cb;
3709 	bus_addr_t paddr;
3710 
3711 	paddr = req->req_pbuf;
3712 	paddr += MPT_RQSL(mpt);
3713 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3714 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
3715 
3716 	fc = req->req_vbuf;
3717 	fc->BufferCount = 1;
3718 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3719 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3720 
3721 	cb = &fc->Buffer[0];
3722 	cb->IoIndex = htole16(ioindex);
3723 	cb->u.PhysicalAddress32 = (U32) paddr;
3724 
3725 	mpt_check_doorbell(mpt);
3726 	mpt_send_cmd(mpt, req);
3727 }
3728 
3729 static int
3730 mpt_add_els_buffers(struct mpt_softc *mpt)
3731 {
3732 	int i;
3733 
3734 	if (mpt->is_fc == 0) {
3735 		return (TRUE);
3736 	}
3737 
3738 	if (mpt->els_cmds_allocated) {
3739 		return (TRUE);
3740 	}
3741 
3742 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
3743 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3744 
3745 	if (mpt->els_cmd_ptrs == NULL) {
3746 		return (FALSE);
3747 	}
3748 
3749 	/*
3750 	 * Feed the chip some ELS buffer resources
3751 	 */
3752 	for (i = 0; i < MPT_MAX_ELS; i++) {
3753 		request_t *req = mpt_get_request(mpt, FALSE);
3754 		if (req == NULL) {
3755 			break;
3756 		}
3757 		req->state |= REQ_STATE_LOCKED;
3758 		mpt->els_cmd_ptrs[i] = req;
3759 		mpt_fc_post_els(mpt, req, i);
3760 	}
3761 
3762 	if (i == 0) {
3763 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
3764 		free(mpt->els_cmd_ptrs, M_DEVBUF);
3765 		mpt->els_cmd_ptrs = NULL;
3766 		return (FALSE);
3767 	}
3768 	if (i != MPT_MAX_ELS) {
3769 		mpt_lprt(mpt, MPT_PRT_INFO,
3770 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
3771 	}
3772 	mpt->els_cmds_allocated = i;
3773 	return(TRUE);
3774 }
3775 
3776 static int
3777 mpt_add_target_commands(struct mpt_softc *mpt)
3778 {
3779 	int i, max;
3780 
3781 	if (mpt->tgt_cmd_ptrs) {
3782 		return (TRUE);
3783 	}
3784 
3785 	max = MPT_MAX_REQUESTS(mpt) >> 1;
3786 	if (max > mpt->mpt_max_tgtcmds) {
3787 		max = mpt->mpt_max_tgtcmds;
3788 	}
3789 	mpt->tgt_cmd_ptrs =
3790 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
3791 	if (mpt->tgt_cmd_ptrs == NULL) {
3792 		mpt_prt(mpt,
3793 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
3794 		return (FALSE);
3795 	}
3796 
3797 	for (i = 0; i < max; i++) {
3798 		request_t *req;
3799 
3800 		req = mpt_get_request(mpt, FALSE);
3801 		if (req == NULL) {
3802 			break;
3803 		}
3804 		req->state |= REQ_STATE_LOCKED;
3805 		mpt->tgt_cmd_ptrs[i] = req;
3806 		mpt_post_target_command(mpt, req, i);
3807 	}
3808 
3809 
3810 	if (i == 0) {
3811 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
3812 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
3813 		mpt->tgt_cmd_ptrs = NULL;
3814 		return (FALSE);
3815 	}
3816 
3817 	mpt->tgt_cmds_allocated = i;
3818 
3819 	if (i < max) {
3820 		mpt_lprt(mpt, MPT_PRT_INFO,
3821 		    "added %d of %d target bufs\n", i, max);
3822 	}
3823 	return (i);
3824 }
3825 
3826 static void
3827 mpt_free_els_buffers(struct mpt_softc *mpt)
3828 {
3829 	mpt_prt(mpt, "fix me! need to implement mpt_free_els_buffers");
3830 }
3831 
3832 static void
3833 mpt_free_target_commands(struct mpt_softc *mpt)
3834 {
3835 	mpt_prt(mpt, "fix me! need to implement mpt_free_target_commands");
3836 }
3837 
3838 
3839 static int
3840 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3841 {
3842 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3843 		mpt->twildcard = 1;
3844 	} else if (lun >= MPT_MAX_LUNS) {
3845 		return (EINVAL);
3846 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3847 		return (EINVAL);
3848 	}
3849 	if (mpt->tenabled == 0) {
3850 		/*
3851 		 * Try to add some target command resources
3852 		 */
3853 		if (mpt_add_target_commands(mpt) == FALSE) {
3854 			mpt_free_els_buffers(mpt);
3855 			return (ENOMEM);
3856 		}
3857 		if (mpt->is_fc) {
3858 			(void) mpt_fc_reset_link(mpt, 0);
3859 		}
3860 		mpt->tenabled = 1;
3861 	}
3862 	if (lun == CAM_LUN_WILDCARD) {
3863 		mpt->trt_wildcard.enabled = 1;
3864 	} else {
3865 		mpt->trt[lun].enabled = 1;
3866 	}
3867 	return (0);
3868 }
3869 
3870 static int
3871 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3872 {
3873 	int i;
3874 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3875 		mpt->twildcard = 0;
3876 	} else if (lun >= MPT_MAX_LUNS) {
3877 		return (EINVAL);
3878 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3879 		return (EINVAL);
3880 	}
3881 	if (lun == CAM_LUN_WILDCARD) {
3882 		mpt->trt_wildcard.enabled = 0;
3883 	} else {
3884 		mpt->trt[lun].enabled = 0;
3885 	}
3886 	for (i = 0; i < MPT_MAX_LUNS; i++) {
3887 		if (mpt->trt[lun].enabled) {
3888 			break;
3889 		}
3890 	}
3891 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
3892 		mpt_free_els_buffers(mpt);
3893 		mpt_free_target_commands(mpt);
3894 		if (mpt->is_fc) {
3895 			(void) mpt_fc_reset_link(mpt, 0);
3896 		}
3897 		mpt->tenabled = 0;
3898 	}
3899 	return (0);
3900 }
3901 
3902 /*
3903  * Called with MPT lock held
3904  */
3905 static void
3906 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
3907 {
3908 	struct ccb_scsiio *csio = &ccb->csio;
3909 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
3910 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
3911 
3912 	switch (tgt->state) {
3913 	case TGT_STATE_IN_CAM:
3914 		break;
3915 	case TGT_STATE_MOVING_DATA:
3916 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3917 		xpt_freeze_simq(mpt->sim, 1);
3918 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3919 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3920 		MPTLOCK_2_CAMLOCK(mpt);
3921 		xpt_done(ccb);
3922 		CAMLOCK_2_MPTLOCK(mpt);
3923 		return;
3924 	default:
3925 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
3926 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
3927 		mpt_tgt_dump_req_state(mpt, cmd_req);
3928 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3929 		MPTLOCK_2_CAMLOCK(mpt);
3930 		xpt_done(ccb);
3931 		CAMLOCK_2_MPTLOCK(mpt);
3932 		return;
3933 	}
3934 
3935 	if (csio->dxfer_len) {
3936 		bus_dmamap_callback_t *cb;
3937 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
3938 		request_t *req;
3939 
3940 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
3941 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
3942 
3943 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
3944 			if (mpt->outofbeer == 0) {
3945 				mpt->outofbeer = 1;
3946 				xpt_freeze_simq(mpt->sim, 1);
3947 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
3948 			}
3949 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3950 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3951 			MPTLOCK_2_CAMLOCK(mpt);
3952 			xpt_done(ccb);
3953 			CAMLOCK_2_MPTLOCK(mpt);
3954 			return;
3955 		}
3956 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
3957 		if (sizeof (bus_addr_t) > 4) {
3958 			cb = mpt_execute_req_a64;
3959 		} else {
3960 			cb = mpt_execute_req;
3961 		}
3962 
3963 		req->ccb = ccb;
3964 		ccb->ccb_h.ccb_req_ptr = req;
3965 
3966 		/*
3967 		 * Record the currently active ccb and the
3968 		 * request for it in our target state area.
3969 		 */
3970 		tgt->ccb = ccb;
3971 		tgt->req = req;
3972 
3973 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
3974 		ta = req->req_vbuf;
3975 
3976 		if (mpt->is_sas == 0) {
3977 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
3978 			     cmd_req->req_vbuf;
3979 			ta->QueueTag = ssp->InitiatorTag;
3980 		} else if (mpt->is_spi) {
3981 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
3982 			     cmd_req->req_vbuf;
3983 			ta->QueueTag = sp->Tag;
3984 		}
3985 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
3986 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3987 		ta->ReplyWord = htole32(tgt->reply_desc);
3988 		if (csio->ccb_h.target_lun > 256) {
3989 			ta->LUN[0] =
3990 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
3991 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
3992 		} else {
3993 			ta->LUN[1] = csio->ccb_h.target_lun;
3994 		}
3995 
3996 		ta->RelativeOffset = tgt->bytes_xfered;
3997 		ta->DataLength = ccb->csio.dxfer_len;
3998 		if (ta->DataLength > tgt->resid) {
3999 			ta->DataLength = tgt->resid;
4000 		}
4001 
4002 		/*
4003 		 * XXX Should be done after data transfer completes?
4004 		 */
4005 		tgt->resid -= csio->dxfer_len;
4006 		tgt->bytes_xfered += csio->dxfer_len;
4007 
4008 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4009 			ta->TargetAssistFlags |=
4010 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4011 		}
4012 
4013 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4014 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4015 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4016 			ta->TargetAssistFlags |=
4017 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4018 		}
4019 #endif
4020 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4021 
4022 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4023 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4024 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4025 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4026 
4027 		MPTLOCK_2_CAMLOCK(mpt);
4028 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4029 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4030 				int error;
4031 				int s = splsoftvm();
4032 				error = bus_dmamap_load(mpt->buffer_dmat,
4033 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4034 				    cb, req, 0);
4035 				splx(s);
4036 				if (error == EINPROGRESS) {
4037 					xpt_freeze_simq(mpt->sim, 1);
4038 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4039 				}
4040 			} else {
4041 				/*
4042 				 * We have been given a pointer to single
4043 				 * physical buffer.
4044 				 */
4045 				struct bus_dma_segment seg;
4046 				seg.ds_addr = (bus_addr_t)
4047 				    (vm_offset_t)csio->data_ptr;
4048 				seg.ds_len = csio->dxfer_len;
4049 				(*cb)(req, &seg, 1, 0);
4050 			}
4051 		} else {
4052 			/*
4053 			 * We have been given a list of addresses.
4054 			 * This case could be easily supported but they are not
4055 			 * currently generated by the CAM subsystem so there
4056 			 * is no point in wasting the time right now.
4057 			 */
4058 			struct bus_dma_segment *sgs;
4059 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4060 				(*cb)(req, NULL, 0, EFAULT);
4061 			} else {
4062 				/* Just use the segments provided */
4063 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4064 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4065 			}
4066 		}
4067 		CAMLOCK_2_MPTLOCK(mpt);
4068 	} else {
4069 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4070 
4071 		/*
4072 		 * XXX: I don't know why this seems to happen, but
4073 		 * XXX: completing the CCB seems to make things happy.
4074 		 * XXX: This seems to happen if the initiator requests
4075 		 * XXX: enough data that we have to do multiple CTIOs.
4076 		 */
4077 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4078 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4079 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4080 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4081 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4082 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4083 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4084 			MPTLOCK_2_CAMLOCK(mpt);
4085 			xpt_done(ccb);
4086 			CAMLOCK_2_MPTLOCK(mpt);
4087 			return;
4088 		}
4089 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4090 			sp = sense;
4091 			memcpy(sp, &csio->sense_data,
4092 			   min(csio->sense_len, MPT_SENSE_SIZE));
4093 		}
4094 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4095 	}
4096 }
4097 
4098 /*
4099  * Abort queued up CCBs
4100  */
4101 static cam_status
4102 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4103 {
4104 	struct mpt_hdr_stailq *lp;
4105 	struct ccb_hdr *srch;
4106 	int found = 0;
4107 	union ccb *accb = ccb->cab.abort_ccb;
4108 	tgt_resource_t *trtp;
4109 
4110 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4111 
4112 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4113 		trtp = &mpt->trt_wildcard;
4114 	} else {
4115 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4116 	}
4117 
4118 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4119 		lp = &trtp->atios;
4120 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4121 		lp = &trtp->inots;
4122 	} else {
4123 		return (CAM_REQ_INVALID);
4124 	}
4125 
4126 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4127 		if (srch == &accb->ccb_h) {
4128 			found = 1;
4129 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4130 			break;
4131 		}
4132 	}
4133 	if (found) {
4134 		accb->ccb_h.status = CAM_REQ_ABORTED;
4135 		xpt_done(accb);
4136 		return (CAM_REQ_CMP);
4137 	}
4138 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4139 	return (CAM_PATH_INVALID);
4140 }
4141 
4142 /*
4143  * Ask the MPT to abort the current target command
4144  */
4145 static int
4146 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4147 {
4148 	int error;
4149 	request_t *req;
4150 	PTR_MSG_TARGET_MODE_ABORT abtp;
4151 
4152 	req = mpt_get_request(mpt, FALSE);
4153 	if (req == NULL) {
4154 		return (-1);
4155 	}
4156 	abtp = req->req_vbuf;
4157 	memset(abtp, 0, sizeof (*abtp));
4158 
4159 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4160 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4161 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4162 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4163 	error = 0;
4164 	if (mpt->is_fc || mpt->is_sas) {
4165 		mpt_send_cmd(mpt, req);
4166 	} else {
4167 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4168 	}
4169 	return (error);
4170 }
4171 
4172 /*
4173  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4174  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4175  * FC929 to set bogus FC_RSP fields (nonzero residuals
4176  * but w/o RESID fields set). This causes QLogic initiators
4177  * to think maybe that a frame was lost.
4178  *
4179  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4180  * we use allocated requests to do TARGET_ASSIST and we
4181  * need to know when to release them.
4182  */
4183 
4184 static void
4185 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4186     uint8_t status, uint8_t const *sense_data)
4187 {
4188 	uint8_t *cmd_vbuf;
4189 	mpt_tgt_state_t *tgt;
4190 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4191 	request_t *req;
4192 	bus_addr_t paddr;
4193 	int resplen = 0;
4194 
4195 	cmd_vbuf = cmd_req->req_vbuf;
4196 	cmd_vbuf += MPT_RQSL(mpt);
4197 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4198 
4199 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4200 		if (mpt->outofbeer == 0) {
4201 			mpt->outofbeer = 1;
4202 			xpt_freeze_simq(mpt->sim, 1);
4203 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4204 		}
4205 		if (ccb) {
4206 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4207 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4208 			MPTLOCK_2_CAMLOCK(mpt);
4209 			xpt_done(ccb);
4210 			CAMLOCK_2_MPTLOCK(mpt);
4211 		} else {
4212 			mpt_prt(mpt,
4213 			    "XXXX could not allocate status req- dropping\n");
4214 		}
4215 		return;
4216 	}
4217 	req->ccb = ccb;
4218 	if (ccb) {
4219 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4220 		ccb->ccb_h.ccb_req_ptr = req;
4221 	}
4222 
4223 	/*
4224 	 * Record the currently active ccb, if any, and the
4225 	 * request for it in our target state area.
4226 	 */
4227 	tgt->ccb = ccb;
4228 	tgt->req = req;
4229 	tgt->state = TGT_STATE_SENDING_STATUS;
4230 
4231 	tp = req->req_vbuf;
4232 	paddr = req->req_pbuf;
4233 	paddr += MPT_RQSL(mpt);
4234 
4235 	memset(tp, 0, sizeof (*tp));
4236 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4237 	if (mpt->is_fc) {
4238 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4239 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4240 		uint8_t *sts_vbuf;
4241 		uint32_t *rsp;
4242 
4243 		sts_vbuf = req->req_vbuf;
4244 		sts_vbuf += MPT_RQSL(mpt);
4245 		rsp = (uint32_t *) sts_vbuf;
4246 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4247 
4248 		/*
4249 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4250 		 * It has to be big-endian in memory and is organized
4251 		 * in 32 bit words, which are much easier to deal with
4252 		 * as words which are swizzled as needed.
4253 		 *
4254 		 * All we're filling here is the FC_RSP payload.
4255 		 * We may just have the chip synthesize it if
4256 		 * we have no residual and an OK status.
4257 		 *
4258 		 */
4259 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4260 
4261 		rsp[2] = status;
4262 		if (tgt->resid) {
4263 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4264 			rsp[3] = htobe32(tgt->resid);
4265 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4266 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4267 #endif
4268 		}
4269 		if (status == SCSI_STATUS_CHECK_COND) {
4270 			int i;
4271 
4272 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4273 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4274 			if (sense_data) {
4275 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4276 			} else {
4277 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4278 				    "TION but no sense data?\n");
4279 				memset(&rsp, 0, MPT_SENSE_SIZE);
4280 			}
4281 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4282 				rsp[i] = htobe32(rsp[i]);
4283 			}
4284 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4285 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4286 #endif
4287 		}
4288 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4289 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4290 #endif
4291 		rsp[2] = htobe32(rsp[2]);
4292 	} else if (mpt->is_sas) {
4293 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4294 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4295 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4296 	} else {
4297 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4298 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4299 		tp->StatusCode = status;
4300 		tp->QueueTag = htole16(sp->Tag);
4301 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4302 	}
4303 
4304 	tp->ReplyWord = htole32(tgt->reply_desc);
4305 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4306 
4307 #ifdef	WE_CAN_USE_AUTO_REPOST
4308 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4309 #endif
4310 	if (status == SCSI_STATUS_OK && resplen == 0) {
4311 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4312 	} else {
4313 		tp->StatusDataSGE.u.Address32 = (uint32_t) paddr;
4314 		tp->StatusDataSGE.FlagsLength =
4315 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4316 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4317 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4318 			MPI_SGE_FLAGS_END_OF_LIST	|
4319 			MPI_SGE_FLAGS_END_OF_BUFFER;
4320 		tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
4321 		tp->StatusDataSGE.FlagsLength |= resplen;
4322 	}
4323 
4324 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4325 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4326 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4327 	    req->serno, tgt->resid);
4328 	if (ccb) {
4329 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4330 		ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
4331 	}
4332 	mpt_send_cmd(mpt, req);
4333 }
4334 
4335 static void
4336 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4337     tgt_resource_t *trtp, int init_id)
4338 {
4339 	struct ccb_immed_notify *inot;
4340 	mpt_tgt_state_t *tgt;
4341 
4342 	tgt = MPT_TGT_STATE(mpt, req);
4343 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4344 	if (inot == NULL) {
4345 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4346 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4347 		return;
4348 	}
4349 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4350 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4351 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4352 
4353 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4354 	inot->sense_len = 0;
4355 	memset(inot->message_args, 0, sizeof (inot->message_args));
4356 	inot->initiator_id = init_id;	/* XXX */
4357 
4358 	/*
4359 	 * This is a somewhat grotesque attempt to map from task management
4360 	 * to old style SCSI messages. God help us all.
4361 	 */
4362 	switch (fc) {
4363 	case MPT_ABORT_TASK_SET:
4364 		inot->message_args[0] = MSG_ABORT_TAG;
4365 		break;
4366 	case MPT_CLEAR_TASK_SET:
4367 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4368 		break;
4369 	case MPT_TARGET_RESET:
4370 		inot->message_args[0] = MSG_TARGET_RESET;
4371 		break;
4372 	case MPT_CLEAR_ACA:
4373 		inot->message_args[0] = MSG_CLEAR_ACA;
4374 		break;
4375 	case MPT_TERMINATE_TASK:
4376 		inot->message_args[0] = MSG_ABORT_TAG;
4377 		break;
4378 	default:
4379 		inot->message_args[0] = MSG_NOOP;
4380 		break;
4381 	}
4382 	tgt->ccb = (union ccb *) inot;
4383 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4384 	MPTLOCK_2_CAMLOCK(mpt);
4385 	xpt_done((union ccb *)inot);
4386 	CAMLOCK_2_MPTLOCK(mpt);
4387 }
4388 
4389 static void
4390 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4391 {
4392 	struct ccb_accept_tio *atiop;
4393 	lun_id_t lun;
4394 	int tag_action = 0;
4395 	mpt_tgt_state_t *tgt;
4396 	tgt_resource_t *trtp = NULL;
4397 	U8 *lunptr;
4398 	U8 *vbuf;
4399 	U16 itag;
4400 	U16 ioindex;
4401 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4402 	uint8_t *cdbp;
4403 
4404 	/*
4405 	 * First, DMA sync the received command- which is in the *request*
4406 	 * phys area.
4407 	 * XXX: We could optimize this for a range
4408 	 */
4409 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
4410 	    BUS_DMASYNC_POSTREAD);
4411 
4412 	/*
4413 	 * Stash info for the current command where we can get at it later.
4414 	 */
4415 	vbuf = req->req_vbuf;
4416 	vbuf += MPT_RQSL(mpt);
4417 
4418 	/*
4419 	 * Get our state pointer set up.
4420 	 */
4421 	tgt = MPT_TGT_STATE(mpt, req);
4422 	if (tgt->state != TGT_STATE_LOADED) {
4423 		mpt_tgt_dump_req_state(mpt, req);
4424 		panic("bad target state in mpt_scsi_tgt_atio");
4425 	}
4426 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4427 	tgt->state = TGT_STATE_IN_CAM;
4428 	tgt->reply_desc = reply_desc;
4429 	ioindex = GET_IO_INDEX(reply_desc);
4430 
4431 	if (mpt->is_fc) {
4432 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4433 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4434 		if (fc->FcpCntl[2]) {
4435 			/*
4436 			 * Task Management Request
4437 			 */
4438 			switch (fc->FcpCntl[2]) {
4439 			case 0x2:
4440 				fct = MPT_ABORT_TASK_SET;
4441 				break;
4442 			case 0x4:
4443 				fct = MPT_CLEAR_TASK_SET;
4444 				break;
4445 			case 0x20:
4446 				fct = MPT_TARGET_RESET;
4447 				break;
4448 			case 0x40:
4449 				fct = MPT_CLEAR_ACA;
4450 				break;
4451 			case 0x80:
4452 				fct = MPT_TERMINATE_TASK;
4453 				break;
4454 			default:
4455 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4456 				    fc->FcpCntl[2]);
4457 				mpt_scsi_tgt_status(mpt, 0, req,
4458 				    SCSI_STATUS_OK, 0);
4459 				return;
4460 			}
4461 		} else {
4462 			switch (fc->FcpCntl[1]) {
4463 			case 0:
4464 				tag_action = MSG_SIMPLE_Q_TAG;
4465 				break;
4466 			case 1:
4467 				tag_action = MSG_HEAD_OF_Q_TAG;
4468 				break;
4469 			case 2:
4470 				tag_action = MSG_ORDERED_Q_TAG;
4471 				break;
4472 			default:
4473 				/*
4474 				 * Bah. Ignore Untagged Queing and ACA
4475 				 */
4476 				tag_action = MSG_SIMPLE_Q_TAG;
4477 				break;
4478 			}
4479 		}
4480 		tgt->resid = be32toh(fc->FcpDl);
4481 		cdbp = fc->FcpCdb;
4482 		lunptr = fc->FcpLun;
4483 		itag = be16toh(fc->OptionalOxid);
4484 	} else if (mpt->is_sas) {
4485 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4486 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4487 		cdbp = ssp->CDB;
4488 		lunptr = ssp->LogicalUnitNumber;
4489 		itag = ssp->InitiatorTag;
4490 	} else {
4491 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4492 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4493 		cdbp = sp->CDB;
4494 		lunptr = sp->LogicalUnitNumber;
4495 		itag = sp->Tag;
4496 	}
4497 
4498 	/*
4499 	 * Generate a simple lun
4500 	 */
4501 	switch (lunptr[0] & 0xc0) {
4502 	case 0x40:
4503 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4504 		break;
4505 	case 0:
4506 		lun = lunptr[1];
4507 		break;
4508 	default:
4509 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4510 		lun = 0xffff;
4511 		break;
4512 	}
4513 
4514 	/*
4515 	 * Deal with non-enabled or bad luns here.
4516 	 */
4517 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4518 	    mpt->trt[lun].enabled == 0) {
4519 		if (mpt->twildcard) {
4520 			trtp = &mpt->trt_wildcard;
4521 		} else if (fct != MPT_NIL_TMT_VALUE) {
4522 			const uint8_t sp[MPT_SENSE_SIZE] = {
4523 				0xf0, 0, 0x5, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0x25
4524 			};
4525 			mpt_scsi_tgt_status(mpt, NULL, req,
4526 			    SCSI_STATUS_CHECK_COND, sp);
4527 			return;
4528 		}
4529 	} else {
4530 		trtp = &mpt->trt[lun];
4531 	}
4532 
4533 	/*
4534 	 * Deal with any task management
4535 	 */
4536 	if (fct != MPT_NIL_TMT_VALUE) {
4537 		if (trtp == NULL) {
4538 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
4539 			    fct);
4540 			mpt_scsi_tgt_status(mpt, 0, req,
4541 			    SCSI_STATUS_OK, 0);
4542 		} else {
4543 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
4544 			    GET_INITIATOR_INDEX(reply_desc));
4545 		}
4546 		return;
4547 	}
4548 
4549 
4550 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
4551 	if (atiop == NULL) {
4552 		mpt_lprt(mpt, MPT_PRT_WARN,
4553 		    "no ATIOs for lun %u- sending back %s\n", lun,
4554 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
4555 		mpt_scsi_tgt_status(mpt, NULL, req,
4556 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
4557 		    NULL);
4558 		return;
4559 	}
4560 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
4561 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4562 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
4563 	atiop->ccb_h.ccb_mpt_ptr = mpt;
4564 	atiop->ccb_h.status = CAM_CDB_RECVD;
4565 	atiop->ccb_h.target_lun = lun;
4566 	atiop->sense_len = 0;
4567 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
4568 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
4569 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
4570 
4571 	/*
4572 	 * The tag we construct here allows us to find the
4573 	 * original request that the command came in with.
4574 	 *
4575 	 * This way we don't have to depend on anything but the
4576 	 * tag to find things when CCBs show back up from CAM.
4577 	 */
4578 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4579 	tgt->tag_id = atiop->tag_id;
4580 	if (tag_action) {
4581 		atiop->tag_action = tag_action;
4582 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
4583 	}
4584 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4585 		int i;
4586 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
4587 		    atiop->ccb_h.target_lun);
4588 		for (i = 0; i < atiop->cdb_len; i++) {
4589 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
4590 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
4591 		}
4592 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
4593 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
4594 	}
4595 
4596 	MPTLOCK_2_CAMLOCK(mpt);
4597 	xpt_done((union ccb *)atiop);
4598 	CAMLOCK_2_MPTLOCK(mpt);
4599 }
4600 
4601 static void
4602 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
4603 {
4604 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4605 
4606 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
4607 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
4608 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
4609 	    tgt->tag_id, tgt->state);
4610 }
4611 
4612 static void
4613 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
4614 {
4615 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
4616 	    req->index, req->index, req->state);
4617 	mpt_tgt_dump_tgt_state(mpt, req);
4618 }
4619 
4620 static int
4621 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
4622     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4623 {
4624 	int dbg;
4625 	union ccb *ccb;
4626 	U16 status;
4627 
4628 	if (reply_frame == NULL) {
4629 		/*
4630 		 * Figure out what the state of the command is.
4631 		 */
4632 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4633 
4634 #ifdef	INVARIANTS
4635 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
4636 		if (tgt->req) {
4637 			mpt_req_not_spcl(mpt, tgt->req,
4638 			    "turbo scsi_tgt_reply associated req", __LINE__);
4639 		}
4640 #endif
4641 		switch(tgt->state) {
4642 		case TGT_STATE_LOADED:
4643 			/*
4644 			 * This is a new command starting.
4645 			 */
4646 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
4647 			break;
4648 		case TGT_STATE_MOVING_DATA:
4649 		{
4650 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4651 
4652 			ccb = tgt->ccb;
4653 			if (tgt->req == NULL) {
4654 				panic("mpt: turbo target reply with null "
4655 				    "associated request moving data");
4656 				/* NOTREACHED */
4657 			}
4658 			if (ccb == NULL) {
4659 				panic("mpt: turbo target reply with null "
4660 				    "associated ccb moving data");
4661 				/* NOTREACHED */
4662 			}
4663 			tgt->ccb = NULL;
4664 			tgt->nxfers++;
4665 			untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
4666 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4667 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
4668 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
4669 			/*
4670 			 * Free the Target Assist Request
4671 			 */
4672 			KASSERT(tgt->req->ccb == ccb,
4673 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
4674 			    tgt->req->serno, tgt->req->ccb));
4675 			TAILQ_REMOVE(&mpt->request_pending_list,
4676 			    tgt->req, links);
4677 			mpt_free_request(mpt, tgt->req);
4678 			tgt->req = NULL;
4679 
4680 			/*
4681 			 * Do we need to send status now? That is, are
4682 			 * we done with all our data transfers?
4683 			 */
4684 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4685 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4686 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4687 				KASSERT(ccb->ccb_h.status,
4688 				    ("zero ccb sts at %d\n", __LINE__));
4689 				tgt->state = TGT_STATE_IN_CAM;
4690 				if (mpt->outofbeer) {
4691 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4692 					mpt->outofbeer = 0;
4693 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4694 				}
4695 				MPTLOCK_2_CAMLOCK(mpt);
4696 				xpt_done(ccb);
4697 				CAMLOCK_2_MPTLOCK(mpt);
4698 				break;
4699 			}
4700 			/*
4701 			 * Otherwise, send status (and sense)
4702 			 */
4703 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4704 				sp = sense;
4705 				memcpy(sp, &ccb->csio.sense_data,
4706 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
4707 			}
4708 			mpt_scsi_tgt_status(mpt, ccb, req,
4709 			    ccb->csio.scsi_status, sp);
4710 			break;
4711 		}
4712 		case TGT_STATE_SENDING_STATUS:
4713 		case TGT_STATE_MOVING_DATA_AND_STATUS:
4714 		{
4715 			int ioindex;
4716 			ccb = tgt->ccb;
4717 
4718 			if (tgt->req == NULL) {
4719 				panic("mpt: turbo target reply with null "
4720 				    "associated request sending status");
4721 				/* NOTREACHED */
4722 			}
4723 
4724 			if (ccb) {
4725 				tgt->ccb = NULL;
4726 				if (tgt->state ==
4727 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
4728 					tgt->nxfers++;
4729 				}
4730 				untimeout(mpt_timeout, ccb,
4731 				    ccb->ccb_h.timeout_ch);
4732 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4733 					ccb->ccb_h.status |= CAM_SENT_SENSE;
4734 				}
4735 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4736 				    "TARGET_STATUS tag %x sts %x flgs %x req "
4737 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
4738 				    ccb->ccb_h.flags, tgt->req);
4739 				/*
4740 				 * Free the Target Send Status Request
4741 				 */
4742 				KASSERT(tgt->req->ccb == ccb,
4743 				    ("tgt->req %p:%u tgt->req->ccb %p",
4744 				    tgt->req, tgt->req->serno, tgt->req->ccb));
4745 				/*
4746 				 * Notify CAM that we're done
4747 				 */
4748 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4749 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4750 				KASSERT(ccb->ccb_h.status,
4751 				    ("ZERO ccb sts at %d\n", __LINE__));
4752 				tgt->ccb = NULL;
4753 			} else {
4754 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4755 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
4756 				    tgt->req, tgt->req->serno);
4757 			}
4758 			TAILQ_REMOVE(&mpt->request_pending_list,
4759 			    tgt->req, links);
4760 			mpt_free_request(mpt, tgt->req);
4761 			tgt->req = NULL;
4762 
4763 			/*
4764 			 * And re-post the Command Buffer.
4765 			 * This wil reset the state.
4766 			 */
4767 			ioindex = GET_IO_INDEX(reply_desc);
4768 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4769 			mpt_post_target_command(mpt, req, ioindex);
4770 
4771 			/*
4772 			 * And post a done for anyone who cares
4773 			 */
4774 			if (ccb) {
4775 				if (mpt->outofbeer) {
4776 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4777 					mpt->outofbeer = 0;
4778 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4779 				}
4780 				MPTLOCK_2_CAMLOCK(mpt);
4781 				xpt_done(ccb);
4782 				CAMLOCK_2_MPTLOCK(mpt);
4783 			}
4784 			break;
4785 		}
4786 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
4787 			tgt->state = TGT_STATE_LOADED;
4788 			break;
4789 		default:
4790 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
4791 			    "Reply Function\n", tgt->state);
4792 		}
4793 		return (TRUE);
4794 	}
4795 
4796 	status = le16toh(reply_frame->IOCStatus);
4797 	if (status != MPI_IOCSTATUS_SUCCESS) {
4798 		dbg = MPT_PRT_ERROR;
4799 	} else {
4800 		dbg = MPT_PRT_DEBUG1;
4801 	}
4802 
4803 	mpt_lprt(mpt, dbg,
4804 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
4805 	     req, req->serno, reply_frame, reply_frame->Function, status);
4806 
4807 	switch (reply_frame->Function) {
4808 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
4809 	{
4810 		mpt_tgt_state_t *tgt;
4811 #ifdef	INVARIANTS
4812 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
4813 #endif
4814 		if (status != MPI_IOCSTATUS_SUCCESS) {
4815 			/*
4816 			 * XXX What to do?
4817 			 */
4818 			break;
4819 		}
4820 		tgt = MPT_TGT_STATE(mpt, req);
4821 		KASSERT(tgt->state == TGT_STATE_LOADING,
4822 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
4823 		mpt_assign_serno(mpt, req);
4824 		tgt->state = TGT_STATE_LOADED;
4825 		break;
4826 	}
4827 	case MPI_FUNCTION_TARGET_ASSIST:
4828 #ifdef	INVARIANTS
4829 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
4830 #endif
4831 		mpt_prt(mpt, "target assist completion\n");
4832 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4833 		mpt_free_request(mpt, req);
4834 		break;
4835 	case MPI_FUNCTION_TARGET_STATUS_SEND:
4836 #ifdef	INVARIANTS
4837 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
4838 #endif
4839 		mpt_prt(mpt, "status send completion\n");
4840 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4841 		mpt_free_request(mpt, req);
4842 		break;
4843 	case MPI_FUNCTION_TARGET_MODE_ABORT:
4844 	{
4845 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
4846 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
4847 		PTR_MSG_TARGET_MODE_ABORT abtp =
4848 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
4849 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
4850 #ifdef	INVARIANTS
4851 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
4852 #endif
4853 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
4854 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
4855 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4856 		mpt_free_request(mpt, req);
4857 		break;
4858 	}
4859 	default:
4860 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
4861 		    "0x%x\n", reply_frame->Function);
4862 		break;
4863 	}
4864 	return (TRUE);
4865 }
4866