xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 4f29da19bd44f0e99f021510460a81bf754c21d2)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  */
61 /*-
62  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
63  * Copyright (c) 2005, WHEEL Sp. z o.o.
64  * Copyright (c) 2004, 2005 Justin T. Gibbs
65  * All rights reserved.
66  *
67  * Redistribution and use in source and binary forms, with or without
68  * modification, are permitted provided that the following conditions are
69  * met:
70  * 1. Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
73  *    substantially similar to the "NO WARRANTY" disclaimer below
74  *    ("Disclaimer") and any redistribution must be conditioned upon including
75  *    a substantially similar Disclaimer requirement for further binary
76  *    redistribution.
77  * 3. Neither the names of the above listed copyright holders nor the names
78  *    of any contributors may be used to endorse or promote products derived
79  *    from this software without specific prior written permission.
80  *
81  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
82  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
83  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
84  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
85  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
86  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
87  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
91  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
92  */
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
95 
96 #include <dev/mpt/mpt.h>
97 #include <dev/mpt/mpt_cam.h>
98 #include <dev/mpt/mpt_raid.h>
99 
100 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
101 #include "dev/mpt/mpilib/mpi_init.h"
102 #include "dev/mpt/mpilib/mpi_targ.h"
103 #include "dev/mpt/mpilib/mpi_fc.h"
104 
105 #include <sys/callout.h>
106 #include <sys/kthread.h>
107 
108 static void mpt_poll(struct cam_sim *);
109 static timeout_t mpt_timeout;
110 static void mpt_action(struct cam_sim *, union ccb *);
111 static int mpt_setwidth(struct mpt_softc *, int, int);
112 static int mpt_setsync(struct mpt_softc *, int, int, int);
113 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
114 static mpt_reply_handler_t mpt_scsi_reply_handler;
115 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
116 static mpt_reply_handler_t mpt_fc_els_reply_handler;
117 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
118 					MSG_DEFAULT_REPLY *);
119 static int mpt_bus_reset(struct mpt_softc *, int);
120 static int mpt_fc_reset_link(struct mpt_softc *, int);
121 
122 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
123 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
124 static void mpt_recovery_thread(void *arg);
125 static void mpt_recover_commands(struct mpt_softc *mpt);
126 
127 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
128     u_int, u_int, u_int, int);
129 
130 static void mpt_fc_add_els(struct mpt_softc *mpt, request_t *);
131 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
132 static void mpt_add_target_commands(struct mpt_softc *mpt);
133 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
134 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
135 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
136 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
137 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
138 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
139     uint8_t, uint8_t const *);
140 static void
141 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
142     tgt_resource_t *, int);
143 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
144 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
145 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
146 
147 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
148 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
149 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
150 
151 static mpt_probe_handler_t	mpt_cam_probe;
152 static mpt_attach_handler_t	mpt_cam_attach;
153 static mpt_enable_handler_t	mpt_cam_enable;
154 static mpt_event_handler_t	mpt_cam_event;
155 static mpt_reset_handler_t	mpt_cam_ioc_reset;
156 static mpt_detach_handler_t	mpt_cam_detach;
157 
158 static struct mpt_personality mpt_cam_personality =
159 {
160 	.name		= "mpt_cam",
161 	.probe		= mpt_cam_probe,
162 	.attach		= mpt_cam_attach,
163 	.enable		= mpt_cam_enable,
164 	.event		= mpt_cam_event,
165 	.reset		= mpt_cam_ioc_reset,
166 	.detach		= mpt_cam_detach,
167 };
168 
169 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
170 
171 int
172 mpt_cam_probe(struct mpt_softc *mpt)
173 {
174 	/*
175 	 * Only attach to nodes that support the initiator or target
176 	 * role or have RAID physical devices that need CAM pass-thru support.
177 	 */
178 	if ((mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_INITIATOR) != 0
179 	 || (mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_TARGET) != 0
180 	 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
181 		return (0);
182 	}
183 	return (ENODEV);
184 }
185 
186 int
187 mpt_cam_attach(struct mpt_softc *mpt)
188 {
189 	struct cam_devq *devq;
190 	mpt_handler_t	 handler;
191 	int		 maxq;
192 	int		 error;
193 
194 	TAILQ_INIT(&mpt->request_timeout_list);
195 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
196 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
197 
198 	handler.reply_handler = mpt_scsi_reply_handler;
199 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
200 				     &scsi_io_handler_id);
201 	if (error != 0) {
202 		goto cleanup;
203 	}
204 
205 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
206 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
207 				     &scsi_tmf_handler_id);
208 	if (error != 0) {
209 		goto cleanup;
210 	}
211 
212 	/*
213 	 * We keep two requests reserved for ELS replies/responses
214 	 * if we're fibre channel and target mode.
215 	 */
216 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
217 		request_t *req;
218 		int i;
219 
220 		handler.reply_handler = mpt_fc_els_reply_handler;
221 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
222 		    &fc_els_handler_id);
223 		if (error != 0) {
224 			goto cleanup;
225 		}
226 
227 		/*
228 		 * Feed the chip some ELS buffer resources
229 		 */
230 		for (i = 0; i < MPT_MAX_ELS; i++) {
231 			req = mpt_get_request(mpt, FALSE);
232 			if (req == NULL) {
233 				break;
234 			}
235 			mpt_fc_add_els(mpt, req);
236 		}
237 		if (i == 0) {
238 			mpt_prt(mpt, "Unable to add ELS buffer resources\n");
239 			goto cleanup;
240 		}
241 		maxq -= i;
242 	}
243 
244 	/*
245 	 * If we're in target mode, register a reply
246 	 * handler for it and add some commands.
247 	 */
248 	if ((mpt->role & MPT_ROLE_TARGET) != 0) {
249 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
250 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
251 		    &mpt->scsi_tgt_handler_id);
252 		if (error != 0) {
253 			goto cleanup;
254 		}
255 
256 		/*
257 		 * Add some target command resources
258 		 */
259 		mpt_add_target_commands(mpt);
260 	}
261 
262 	/*
263 	 * We keep one request reserved for timeout TMF requests.
264 	 */
265 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
266 	if (mpt->tmf_req == NULL) {
267 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
268 		error = ENOMEM;
269 		goto cleanup;
270 	}
271 
272 	/*
273 	 * Mark the request as free even though not on the free list.
274 	 * There is only one TMF request allowed to be outstanding at
275 	 * a time and the TMF routines perform their own allocation
276 	 * tracking using the standard state flags.
277 	 */
278 	mpt->tmf_req->state = REQ_STATE_FREE;
279 	maxq--;
280 
281 	if (mpt_spawn_recovery_thread(mpt) != 0) {
282 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
283 		error = ENOMEM;
284 		goto cleanup;
285 	}
286 
287 	/*
288 	 * Create the device queue for our SIM(s).
289 	 */
290 	devq = cam_simq_alloc(maxq);
291 	if (devq == NULL) {
292 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
293 		error = ENOMEM;
294 		goto cleanup;
295 	}
296 
297 	/*
298 	 * Construct our SIM entry.
299 	 */
300 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
301 	    mpt->unit, 1, maxq, devq);
302 	if (mpt->sim == NULL) {
303 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
304 		cam_simq_free(devq);
305 		error = ENOMEM;
306 		goto cleanup;
307 	}
308 
309 	/*
310 	 * Register exactly the bus.
311 	 */
312 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
313 		mpt_prt(mpt, "Bus registration Failed!\n");
314 		error = ENOMEM;
315 		goto cleanup;
316 	}
317 
318 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
319 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
320 		mpt_prt(mpt, "Unable to allocate Path!\n");
321 		error = ENOMEM;
322 		goto cleanup;
323 	}
324 
325 	/*
326 	 * Only register a second bus for RAID physical
327 	 * devices if the controller supports RAID.
328 	 */
329 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
330 		return (0);
331 	}
332 
333 	/*
334 	 * Create a "bus" to export all hidden disks to CAM.
335 	 */
336 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
337 	    mpt->unit, 1, maxq, devq);
338 	if (mpt->phydisk_sim == NULL) {
339 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
340 		error = ENOMEM;
341 		goto cleanup;
342 	}
343 
344 	/*
345 	 * Register exactly the bus.
346 	 */
347 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
348 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
349 		error = ENOMEM;
350 		goto cleanup;
351 	}
352 
353 	if (xpt_create_path(&mpt->phydisk_path, NULL,
354 	    cam_sim_path(mpt->phydisk_sim),
355 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
356 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
357 		error = ENOMEM;
358 		goto cleanup;
359 	}
360 	return (0);
361 cleanup:
362 	mpt_cam_detach(mpt);
363 	return (error);
364 }
365 
366 /*
367  * Read FC configuration information
368  */
369 static int
370 mpt_read_config_info_fc(struct mpt_softc *mpt)
371 {
372 	char *topology = NULL;
373 	int rv, speed = 0;
374 
375 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
376 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
377 	if (rv) {
378 		return (-1);
379 	}
380 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
381 		 mpt->mpt_fcport_page0.Header.PageVersion,
382 		 mpt->mpt_fcport_page0.Header.PageLength,
383 		 mpt->mpt_fcport_page0.Header.PageNumber,
384 		 mpt->mpt_fcport_page0.Header.PageType);
385 
386 
387 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
388 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
389 	if (rv) {
390 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
391 		return (-1);
392 	}
393 
394 	speed = mpt->mpt_fcport_page0.CurrentSpeed;
395 
396 	switch (mpt->mpt_fcport_page0.Flags &
397 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
398 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
399 		speed = 0;
400 		topology = "<NO LOOP>";
401 		break;
402 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
403 		topology = "N-Port";
404 		break;
405 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
406 		topology = "NL-Port";
407 		break;
408 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
409 		topology = "F-Port";
410 		break;
411 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
412 		topology = "FL-Port";
413 		break;
414 	default:
415 		speed = 0;
416 		topology = "?";
417 		break;
418 	}
419 
420 	mpt_lprt(mpt, MPT_PRT_INFO,
421 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
422 	    "Speed %u-Gbit\n", topology,
423 	    mpt->mpt_fcport_page0.WWNN.High,
424 	    mpt->mpt_fcport_page0.WWNN.Low,
425 	    mpt->mpt_fcport_page0.WWPN.High,
426 	    mpt->mpt_fcport_page0.WWPN.Low,
427 	    speed);
428 
429 	return (0);
430 }
431 
432 /*
433  * Set FC configuration information.
434  */
435 static int
436 mpt_set_initial_config_fc(struct mpt_softc *mpt)
437 {
438 #if	0
439 	CONFIG_PAGE_FC_PORT_1 fc;
440 	U32 fl;
441 	int r, doit = 0;
442 
443 	if ((mpt->role & MPT_ROLE_TARGET) == 0) {
444 		return (0);
445 	}
446 
447 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
448 	    &fc.Header, FALSE, 5000);
449 	if (r) {
450 		return (mpt_fc_reset_link(mpt, 1));
451 	}
452 
453 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 0,
454 	    &fc.Header, sizeof (fc), FALSE, 5000);
455 	if (r) {
456 		return (mpt_fc_reset_link(mpt, 1));
457 	}
458 
459 	fl = le32toh(fc.Flags);
460 	if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
461 		fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
462 		doit = 1;
463 	}
464 	if ((fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) &&
465 	    (mpt->role & MPT_ROLE_INITIATOR) == 0) {
466 		fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
467 		doit = 1;
468 	}
469 	if ((fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) &&
470 	    (mpt->role & MPT_ROLE_TARGET) == 0) {
471 		fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
472 		doit = 1;
473 	}
474 	if (doit) {
475 		const char *cc;
476 
477 		mpt_lprt(mpt, MPT_PRT_INFO,
478 		    "FC Port Page 1: New Flags %x \n", fl);
479 		fc.Flags = htole32(fl);
480 		r = mpt_write_cfg_page(mpt,
481 		    MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, 0, &fc.Header,
482 		    sizeof(fc), FALSE, 5000);
483 		if (r != 0) {
484 			cc = "FC PORT PAGE1 UPDATE: FAILED\n";
485 		} else {
486 			cc = "FC PORT PAGE1 UPDATED: SYSTEM NEEDS RESET\n";
487 		}
488 		mpt_prt(mpt, cc);
489 	}
490 #else
491 	if ((mpt->role & MPT_ROLE_TARGET) == 0) {
492 		return (0);
493 	}
494 #endif
495 	return (mpt_fc_reset_link(mpt, 1));
496 }
497 
498 /*
499  * Read SAS configuration information. Nothing to do yet.
500  */
501 static int
502 mpt_read_config_info_sas(struct mpt_softc *mpt)
503 {
504 	return (0);
505 }
506 
507 /*
508  * Set SAS configuration information. Nothing to do yet.
509  */
510 static int
511 mpt_set_initial_config_sas(struct mpt_softc *mpt)
512 {
513 	return (0);
514 }
515 
516 /*
517  * Read SCSI configuration information
518  */
519 static int
520 mpt_read_config_info_spi(struct mpt_softc *mpt)
521 {
522 	int rv, i;
523 
524 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0,
525 				 0, &mpt->mpt_port_page0.Header,
526 				 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
527 	if (rv)
528 		return (-1);
529 	mpt_lprt(mpt, MPT_PRT_DEBUG,
530 		 "SPI Port Page 0 Header: %x %x %x %x\n",
531 		 mpt->mpt_port_page0.Header.PageVersion,
532 		 mpt->mpt_port_page0.Header.PageLength,
533 		 mpt->mpt_port_page0.Header.PageNumber,
534 		 mpt->mpt_port_page0.Header.PageType);
535 
536 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1,
537 				 0, &mpt->mpt_port_page1.Header,
538 				 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
539 	if (rv)
540 		return (-1);
541 
542 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
543 		 mpt->mpt_port_page1.Header.PageVersion,
544 		 mpt->mpt_port_page1.Header.PageLength,
545 		 mpt->mpt_port_page1.Header.PageNumber,
546 		 mpt->mpt_port_page1.Header.PageType);
547 
548 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2,
549 				 /*PageAddress*/0, &mpt->mpt_port_page2.Header,
550 				 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
551 	if (rv)
552 		return (-1);
553 
554 	mpt_lprt(mpt, MPT_PRT_DEBUG,
555 		 "SPI Port Page 2 Header: %x %x %x %x\n",
556 		 mpt->mpt_port_page1.Header.PageVersion,
557 		 mpt->mpt_port_page1.Header.PageLength,
558 		 mpt->mpt_port_page1.Header.PageNumber,
559 		 mpt->mpt_port_page1.Header.PageType);
560 
561 	for (i = 0; i < 16; i++) {
562 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
563 					 0, i, &mpt->mpt_dev_page0[i].Header,
564 					 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
565 		if (rv)
566 			return (-1);
567 
568 		mpt_lprt(mpt, MPT_PRT_DEBUG,
569 			 "SPI Target %d Device Page 0 Header: %x %x %x %x\n",
570 			 i, mpt->mpt_dev_page0[i].Header.PageVersion,
571 			 mpt->mpt_dev_page0[i].Header.PageLength,
572 			 mpt->mpt_dev_page0[i].Header.PageNumber,
573 			 mpt->mpt_dev_page0[i].Header.PageType);
574 
575 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
576 					 1, i, &mpt->mpt_dev_page1[i].Header,
577 					 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
578 		if (rv)
579 			return (-1);
580 
581 		mpt_lprt(mpt, MPT_PRT_DEBUG,
582 			 "SPI Target %d Device Page 1 Header: %x %x %x %x\n",
583 			 i, mpt->mpt_dev_page1[i].Header.PageVersion,
584 			 mpt->mpt_dev_page1[i].Header.PageLength,
585 			 mpt->mpt_dev_page1[i].Header.PageNumber,
586 			 mpt->mpt_dev_page1[i].Header.PageType);
587 	}
588 
589 	/*
590 	 * At this point, we don't *have* to fail. As long as we have
591 	 * valid config header information, we can (barely) lurch
592 	 * along.
593 	 */
594 
595 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
596 				   &mpt->mpt_port_page0.Header,
597 				   sizeof(mpt->mpt_port_page0),
598 				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
599 	if (rv) {
600 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
601 	} else {
602 		mpt_lprt(mpt, MPT_PRT_DEBUG,
603 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
604 		    mpt->mpt_port_page0.Capabilities,
605 		    mpt->mpt_port_page0.PhysicalInterface);
606 	}
607 
608 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
609 				   &mpt->mpt_port_page1.Header,
610 				   sizeof(mpt->mpt_port_page1),
611 				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
612 	if (rv) {
613 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
614 	} else {
615 		mpt_lprt(mpt, MPT_PRT_DEBUG,
616 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
617 		    mpt->mpt_port_page1.Configuration,
618 		    mpt->mpt_port_page1.OnBusTimerValue);
619 	}
620 
621 	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
622 				   &mpt->mpt_port_page2.Header,
623 				   sizeof(mpt->mpt_port_page2),
624 				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
625 	if (rv) {
626 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
627 	} else {
628 		mpt_lprt(mpt, MPT_PRT_DEBUG,
629 		    "SPI Port Page 2: Flags %x Settings %x\n",
630 		    mpt->mpt_port_page2.PortFlags,
631 		    mpt->mpt_port_page2.PortSettings);
632 		for (i = 0; i < 16; i++) {
633 			mpt_lprt(mpt, MPT_PRT_DEBUG,
634 		  	    "SPI Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
635 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
636 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
637 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
638 		}
639 	}
640 
641 	for (i = 0; i < 16; i++) {
642 		rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
643 					   &mpt->mpt_dev_page0[i].Header,
644 					   sizeof(*mpt->mpt_dev_page0),
645 					   /*sleep_ok*/FALSE,
646 					   /*timeout_ms*/5000);
647 		if (rv) {
648 			mpt_prt(mpt,
649 			    "cannot read SPI Tgt %d Device Page 0\n", i);
650 			continue;
651 		}
652 		mpt_lprt(mpt, MPT_PRT_DEBUG,
653 			 "SPI Tgt %d Page 0: NParms %x Information %x",
654 			 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
655 			 mpt->mpt_dev_page0[i].Information);
656 
657 		rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
658 					   &mpt->mpt_dev_page1[i].Header,
659 					   sizeof(*mpt->mpt_dev_page1),
660 					   /*sleep_ok*/FALSE,
661 					   /*timeout_ms*/5000);
662 		if (rv) {
663 			mpt_prt(mpt,
664 			    "cannot read SPI Tgt %d Device Page 1\n", i);
665 			continue;
666 		}
667 		mpt_lprt(mpt, MPT_PRT_DEBUG,
668 			 "SPI Tgt %d Page 1: RParms %x Configuration %x\n",
669 			 i, mpt->mpt_dev_page1[i].RequestedParameters,
670 			 mpt->mpt_dev_page1[i].Configuration);
671 	}
672 	return (0);
673 }
674 
675 /*
676  * Validate SPI configuration information.
677  *
678  * In particular, validate SPI Port Page 1.
679  */
680 static int
681 mpt_set_initial_config_spi(struct mpt_softc *mpt)
682 {
683 	int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
684 	int error;
685 
686 	mpt->mpt_disc_enable = 0xff;
687 	mpt->mpt_tag_enable = 0;
688 
689 	if (mpt->mpt_port_page1.Configuration != pp1val) {
690 		CONFIG_PAGE_SCSI_PORT_1 tmp;
691 
692 		mpt_prt(mpt,
693 		    "SPI Port Page 1 Config value bad (%x)- should be %x\n",
694 		    mpt->mpt_port_page1.Configuration, pp1val);
695 		tmp = mpt->mpt_port_page1;
696 		tmp.Configuration = pp1val;
697 		error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/0,
698 					       &tmp.Header, sizeof(tmp),
699 					       /*sleep_ok*/FALSE,
700 					       /*timeout_ms*/5000);
701 		if (error)
702 			return (-1);
703 		error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
704 					      &tmp.Header, sizeof(tmp),
705 					      /*sleep_ok*/FALSE,
706 					      /*timeout_ms*/5000);
707 		if (error)
708 			return (-1);
709 		if (tmp.Configuration != pp1val) {
710 			mpt_prt(mpt,
711 			    "failed to reset SPI Port Page 1 Config value\n");
712 			return (-1);
713 		}
714 		mpt->mpt_port_page1 = tmp;
715 	}
716 
717 	for (i = 0; i < 16; i++) {
718 		CONFIG_PAGE_SCSI_DEVICE_1 tmp;
719 		tmp = mpt->mpt_dev_page1[i];
720 		tmp.RequestedParameters = 0;
721 		tmp.Configuration = 0;
722 		mpt_lprt(mpt, MPT_PRT_DEBUG,
723 			 "Set Tgt %d SPI DevicePage 1 values to %x 0 %x\n",
724 			 i, tmp.RequestedParameters, tmp.Configuration);
725 		error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/i,
726 					       &tmp.Header, sizeof(tmp),
727 					       /*sleep_ok*/FALSE,
728 					       /*timeout_ms*/5000);
729 		if (error)
730 			return (-1);
731 		error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
732 					      &tmp.Header, sizeof(tmp),
733 					      /*sleep_ok*/FALSE,
734 					      /*timeout_ms*/5000);
735 		if (error)
736 			return (-1);
737 		mpt->mpt_dev_page1[i] = tmp;
738 		mpt_lprt(mpt, MPT_PRT_DEBUG,
739 			 "SPI Tgt %d Page 1: RParm %x Configuration %x\n", i,
740 			 mpt->mpt_dev_page1[i].RequestedParameters,
741 			 mpt->mpt_dev_page1[i].Configuration);
742 	}
743 	return (0);
744 }
745 
746 int
747 mpt_cam_enable(struct mpt_softc *mpt)
748 {
749 	if (mpt->is_fc) {
750 		if (mpt_read_config_info_fc(mpt)) {
751 			return (EIO);
752 		}
753 		if (mpt_set_initial_config_fc(mpt)) {
754 			return (EIO);
755 		}
756 	} else if (mpt->is_sas) {
757 		if (mpt_read_config_info_sas(mpt)) {
758 			return (EIO);
759 		}
760 		if (mpt_set_initial_config_sas(mpt)) {
761 			return (EIO);
762 		}
763 	} else {
764 		if (mpt_read_config_info_spi(mpt)) {
765 			return (EIO);
766 		}
767 		if (mpt_set_initial_config_spi(mpt)) {
768 			return (EIO);
769 		}
770 	}
771 	return (0);
772 }
773 
774 void
775 mpt_cam_detach(struct mpt_softc *mpt)
776 {
777 	mpt_handler_t handler;
778 
779 	mpt_terminate_recovery_thread(mpt);
780 
781 	handler.reply_handler = mpt_scsi_reply_handler;
782 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
783 			       scsi_io_handler_id);
784 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
785 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
786 			       scsi_tmf_handler_id);
787 	handler.reply_handler = mpt_fc_els_reply_handler;
788 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
789 			       fc_els_handler_id);
790 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
791 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
792 			       mpt->scsi_tgt_handler_id);
793 
794 	if (mpt->tmf_req != NULL) {
795 		mpt_free_request(mpt, mpt->tmf_req);
796 		mpt->tmf_req = NULL;
797 	}
798 
799 	if (mpt->sim != NULL) {
800 		xpt_free_path(mpt->path);
801 		xpt_bus_deregister(cam_sim_path(mpt->sim));
802 		cam_sim_free(mpt->sim, TRUE);
803 		mpt->sim = NULL;
804 	}
805 
806 	if (mpt->phydisk_sim != NULL) {
807 		xpt_free_path(mpt->phydisk_path);
808 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
809 		cam_sim_free(mpt->phydisk_sim, TRUE);
810 		mpt->phydisk_sim = NULL;
811 	}
812 }
813 
814 /* This routine is used after a system crash to dump core onto the swap device.
815  */
816 static void
817 mpt_poll(struct cam_sim *sim)
818 {
819 	struct mpt_softc *mpt;
820 
821 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
822 	MPT_LOCK(mpt);
823 	mpt_intr(mpt);
824 	MPT_UNLOCK(mpt);
825 }
826 
827 /*
828  * Watchdog timeout routine for SCSI requests.
829  */
830 static void
831 mpt_timeout(void *arg)
832 {
833 	union ccb	 *ccb;
834 	struct mpt_softc *mpt;
835 	request_t	 *req;
836 
837 	ccb = (union ccb *)arg;
838 	mpt = ccb->ccb_h.ccb_mpt_ptr;
839 
840 	MPT_LOCK(mpt);
841 	req = ccb->ccb_h.ccb_req_ptr;
842 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
843 	    req->serno, ccb, req->ccb);
844 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
845 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
846 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
847 		req->state |= REQ_STATE_TIMEDOUT;
848 		mpt_wakeup_recovery_thread(mpt);
849 	}
850 	MPT_UNLOCK(mpt);
851 }
852 
853 /*
854  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
855  *
856  * Takes a list of physical segments and builds the SGL for SCSI IO command
857  * and forwards the commard to the IOC after one last check that CAM has not
858  * aborted the transaction.
859  */
860 static void
861 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
862 {
863 	request_t *req, *trq;
864 	char *mpt_off;
865 	union ccb *ccb;
866 	struct mpt_softc *mpt;
867 	int seg, first_lim;
868 	uint32_t flags, nxt_off;
869 	void *sglp;
870 	MSG_REQUEST_HEADER *hdrp;
871 	SGE_SIMPLE64 *se;
872 	SGE_CHAIN64 *ce;
873 
874 	req = (request_t *)arg;
875 	ccb = req->ccb;
876 
877 	mpt = ccb->ccb_h.ccb_mpt_ptr;
878 	req = ccb->ccb_h.ccb_req_ptr;
879 
880 	hdrp = req->req_vbuf;
881 	mpt_off = req->req_vbuf;
882 
883 	if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
884 		sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
885 	} else /* if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) */ {
886 		sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
887 	}
888 
889 
890 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
891 		error = EFBIG;
892 	}
893 
894 bad:
895 	if (error != 0) {
896 		if (error != EFBIG && error != ENOMEM) {
897 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
898 		}
899 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
900 			cam_status status;
901 			mpt_freeze_ccb(ccb);
902 			if (error == EFBIG) {
903 				status = CAM_REQ_TOO_BIG;
904 			} else if (error == ENOMEM) {
905 				if (mpt->outofbeer == 0) {
906 					mpt->outofbeer = 1;
907 					xpt_freeze_simq(mpt->sim, 1);
908 					mpt_lprt(mpt, MPT_PRT_DEBUG,
909 					    "FREEZEQ\n");
910 				}
911 				status = CAM_REQUEUE_REQ;
912 			} else {
913 				status = CAM_REQ_CMP_ERR;
914 			}
915 			mpt_set_ccb_status(ccb, status);
916 		}
917 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
918 			request_t *cmd_req =
919 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
920 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
921 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
922 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
923 		}
924 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
925 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
926 		xpt_done(ccb);
927 		CAMLOCK_2_MPTLOCK(mpt);
928 		mpt_free_request(mpt, req);
929 		MPTLOCK_2_CAMLOCK(mpt);
930 		return;
931 	}
932 
933 	/*
934 	 * No data to transfer?
935 	 * Just make a single simple SGL with zero length.
936 	 */
937 
938 	if (mpt->verbose >= MPT_PRT_DEBUG) {
939 		int tidx = ((char *)sglp) - mpt_off;
940 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
941 	}
942 
943 	if (nseg == 0) {
944 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
945 		MPI_pSGE_SET_FLAGS(se1,
946 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
947 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
948 		goto out;
949 	}
950 
951 
952 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
953 	if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
954 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
955 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
956 		}
957 	} else {
958 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
959 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
960 		}
961 	}
962 
963 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
964 		bus_dmasync_op_t op;
965 		if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
966 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
967 				op = BUS_DMASYNC_PREREAD;
968 			} else {
969 				op = BUS_DMASYNC_PREWRITE;
970 			}
971 		} else {
972 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
973 				op = BUS_DMASYNC_PREWRITE;
974 			} else {
975 				op = BUS_DMASYNC_PREREAD;
976 			}
977 		}
978 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
979 	}
980 
981 	/*
982 	 * Okay, fill in what we can at the end of the command frame.
983 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
984 	 * the command frame.
985 	 *
986 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
987 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
988 	 * that.
989 	 */
990 
991 	if (nseg < MPT_NSGL_FIRST(mpt)) {
992 		first_lim = nseg;
993 	} else {
994 		/*
995 		 * Leave room for CHAIN element
996 		 */
997 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
998 	}
999 
1000 	se = (SGE_SIMPLE64 *) sglp;
1001 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1002 		uint32_t tf;
1003 
1004 		memset(se, 0, sizeof (*se));
1005 		se->Address.Low = dm_segs->ds_addr;
1006 		if (sizeof(bus_addr_t) > 4) {
1007 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
1008 		}
1009 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1010 		tf = flags;
1011 		if (seg == first_lim - 1) {
1012 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1013 		}
1014 		if (seg == nseg - 1) {
1015 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1016 				MPI_SGE_FLAGS_END_OF_BUFFER;
1017 		}
1018 		MPI_pSGE_SET_FLAGS(se, tf);
1019 	}
1020 
1021 	if (seg == nseg) {
1022 		goto out;
1023 	}
1024 
1025 	/*
1026 	 * Tell the IOC where to find the first chain element.
1027 	 */
1028 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1029 	nxt_off = MPT_RQSL(mpt);
1030 	trq = req;
1031 
1032 	/*
1033 	 * Make up the rest of the data segments out of a chain element
1034 	 * (contiained in the current request frame) which points to
1035 	 * SIMPLE64 elements in the next request frame, possibly ending
1036 	 * with *another* chain element (if there's more).
1037 	 */
1038 	while (seg < nseg) {
1039 		int this_seg_lim;
1040 		uint32_t tf, cur_off;
1041 		bus_addr_t chain_list_addr;
1042 
1043 		/*
1044 		 * Point to the chain descriptor. Note that the chain
1045 		 * descriptor is at the end of the *previous* list (whether
1046 		 * chain or simple).
1047 		 */
1048 		ce = (SGE_CHAIN64 *) se;
1049 
1050 		/*
1051 		 * Before we change our current pointer, make  sure we won't
1052 		 * overflow the request area with this frame. Note that we
1053 		 * test against 'greater than' here as it's okay in this case
1054 		 * to have next offset be just outside the request area.
1055 		 */
1056 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1057 			nxt_off = MPT_REQUEST_AREA;
1058 			goto next_chain;
1059 		}
1060 
1061 		/*
1062 		 * Set our SGE element pointer to the beginning of the chain
1063 		 * list and update our next chain list offset.
1064 		 */
1065 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1066 		cur_off = nxt_off;
1067 		nxt_off += MPT_RQSL(mpt);
1068 
1069 		/*
1070 		 * Now initialized the chain descriptor.
1071 		 */
1072 		memset(ce, 0, sizeof (*ce));
1073 
1074 		/*
1075 		 * Get the physical address of the chain list.
1076 		 */
1077 		chain_list_addr = trq->req_pbuf;
1078 		chain_list_addr += cur_off;
1079 		if (sizeof (bus_addr_t) > 4) {
1080 			ce->Address.High =
1081 			    (uint32_t) ((uint64_t)chain_list_addr >> 32);
1082 		}
1083 		ce->Address.Low = (uint32_t) chain_list_addr;
1084 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1085 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1086 
1087 		/*
1088 		 * If we have more than a frame's worth of segments left,
1089 		 * set up the chain list to have the last element be another
1090 		 * chain descriptor.
1091 		 */
1092 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1093 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1094 			/*
1095 			 * The length of the chain is the length in bytes of the
1096 			 * number of segments plus the next chain element.
1097 			 *
1098 			 * The next chain descriptor offset is the length,
1099 			 * in words, of the number of segments.
1100 			 */
1101 			ce->Length = (this_seg_lim - seg) *
1102 			    sizeof (SGE_SIMPLE64);
1103 			ce->NextChainOffset = ce->Length >> 2;
1104 			ce->Length += sizeof (SGE_CHAIN64);
1105 		} else {
1106 			this_seg_lim = nseg;
1107 			ce->Length = (this_seg_lim - seg) *
1108 			    sizeof (SGE_SIMPLE64);
1109 		}
1110 
1111 		/*
1112 		 * Fill in the chain list SGE elements with our segment data.
1113 		 *
1114 		 * If we're the last element in this chain list, set the last
1115 		 * element flag. If we're the completely last element period,
1116 		 * set the end of list and end of buffer flags.
1117 		 */
1118 		while (seg < this_seg_lim) {
1119 			memset(se, 0, sizeof (*se));
1120 			se->Address.Low = dm_segs->ds_addr;
1121 			if (sizeof (bus_addr_t) > 4) {
1122 				se->Address.High =
1123 				    ((uint64_t)dm_segs->ds_addr) >> 32;
1124 			}
1125 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1126 			tf = flags;
1127 			if (seg ==  this_seg_lim - 1) {
1128 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1129 			}
1130 			if (seg == nseg - 1) {
1131 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1132 					MPI_SGE_FLAGS_END_OF_BUFFER;
1133 			}
1134 			MPI_pSGE_SET_FLAGS(se, tf);
1135 			se++;
1136 			seg++;
1137 			dm_segs++;
1138 		}
1139 
1140     next_chain:
1141 		/*
1142 		 * If we have more segments to do and we've used up all of
1143 		 * the space in a request area, go allocate another one
1144 		 * and chain to that.
1145 		 */
1146 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1147 			request_t *nrq = mpt_get_request(mpt, FALSE);
1148 
1149 			if (nrq == NULL) {
1150 				error = ENOMEM;
1151 				goto bad;
1152 			}
1153 
1154 			/*
1155 			 * Append the new request area on the tail of our list.
1156 			 */
1157 			if ((trq = req->chain) == NULL) {
1158 				req->chain = nrq;
1159 			} else {
1160 				while (trq->chain != NULL) {
1161 					trq = trq->chain;
1162 				}
1163 				trq->chain = nrq;
1164 			}
1165 			trq = nrq;
1166 			mpt_off = trq->req_vbuf;
1167 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1168 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1169 			}
1170 			nxt_off = 0;
1171 		}
1172 	}
1173 out:
1174 
1175 	/*
1176 	 * Last time we need to check if this CCB needs to be aborted.
1177 	 */
1178 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1179 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1180 			request_t *cmd_req =
1181 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1182 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1183 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1184 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1185 		}
1186 		mpt_prt(mpt,
1187 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1188 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1189 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1190 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1191 		}
1192 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1193 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1194 		xpt_done(ccb);
1195 		CAMLOCK_2_MPTLOCK(mpt);
1196 		mpt_free_request(mpt, req);
1197 		MPTLOCK_2_CAMLOCK(mpt);
1198 		return;
1199 	}
1200 
1201 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1202 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1203 		ccb->ccb_h.timeout_ch =
1204 			timeout(mpt_timeout, (caddr_t)ccb,
1205 				(ccb->ccb_h.timeout * hz) / 1000);
1206 	} else {
1207 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1208 	}
1209 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1210 		int nc = 0;
1211 		mpt_print_request(req->req_vbuf);
1212 		for (trq = req->chain; trq; trq = trq->chain) {
1213 			printf("  Additional Chain Area %d\n", nc++);
1214 			mpt_dump_sgl(trq->req_vbuf, 0);
1215 		}
1216 	}
1217 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1218 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1219 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1220 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1221 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1222 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1223 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1224 		} else {
1225 			tgt->state = TGT_STATE_MOVING_DATA;
1226 		}
1227 #else
1228 		tgt->state = TGT_STATE_MOVING_DATA;
1229 #endif
1230 	}
1231 	CAMLOCK_2_MPTLOCK(mpt);
1232 	mpt_send_cmd(mpt, req);
1233 	MPTLOCK_2_CAMLOCK(mpt);
1234 }
1235 
1236 static void
1237 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1238 {
1239 	request_t *req, *trq;
1240 	char *mpt_off;
1241 	union ccb *ccb;
1242 	struct mpt_softc *mpt;
1243 	int seg, first_lim;
1244 	uint32_t flags, nxt_off;
1245 	void *sglp;
1246 	MSG_REQUEST_HEADER *hdrp;
1247 	SGE_SIMPLE32 *se;
1248 	SGE_CHAIN32 *ce;
1249 
1250 	req = (request_t *)arg;
1251 	ccb = req->ccb;
1252 
1253 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1254 	req = ccb->ccb_h.ccb_req_ptr;
1255 
1256 	hdrp = req->req_vbuf;
1257 	mpt_off = req->req_vbuf;
1258 
1259 
1260 	if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
1261 		sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1262 	} else /* if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) */ {
1263 		sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1264 	}
1265 
1266 
1267 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1268 		error = EFBIG;
1269 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1270 		    nseg, mpt->max_seg_cnt);
1271 	}
1272 
1273 bad:
1274 	if (error != 0) {
1275 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1276 			request_t *cmd_req =
1277 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1278 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1279 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1280 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1281 		}
1282 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1283 			cam_status status;
1284 			mpt_freeze_ccb(ccb);
1285 			if (error == EFBIG) {
1286 				status = CAM_REQ_TOO_BIG;
1287 			} else if (error == ENOMEM) {
1288 				if (mpt->outofbeer == 0) {
1289 					mpt->outofbeer = 1;
1290 					xpt_freeze_simq(mpt->sim, 1);
1291 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1292 					    "FREEZEQ\n");
1293 				}
1294 				status = CAM_REQUEUE_REQ;
1295 			} else {
1296 				status = CAM_REQ_CMP_ERR;
1297 			}
1298 			mpt_set_ccb_status(ccb, status);
1299 		}
1300 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1301 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1302 		xpt_done(ccb);
1303 		CAMLOCK_2_MPTLOCK(mpt);
1304 		mpt_free_request(mpt, req);
1305 		MPTLOCK_2_CAMLOCK(mpt);
1306 		return;
1307 	}
1308 
1309 	/*
1310 	 * No data to transfer?
1311 	 * Just make a single simple SGL with zero length.
1312 	 */
1313 
1314 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1315 		int tidx = ((char *)sglp) - mpt_off;
1316 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1317 	}
1318 
1319 	if (nseg == 0) {
1320 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1321 		MPI_pSGE_SET_FLAGS(se1,
1322 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1323 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1324 		goto out;
1325 	}
1326 
1327 
1328 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1329 	if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
1330 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1331 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1332 		}
1333 	} else {
1334 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1335 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1336 		}
1337 	}
1338 
1339 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1340 		bus_dmasync_op_t op;
1341 		if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
1342 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1343 				op = BUS_DMASYNC_PREREAD;
1344 			} else {
1345 				op = BUS_DMASYNC_PREWRITE;
1346 			}
1347 		} else {
1348 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1349 				op = BUS_DMASYNC_PREWRITE;
1350 			} else {
1351 				op = BUS_DMASYNC_PREREAD;
1352 			}
1353 		}
1354 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1355 	}
1356 
1357 	/*
1358 	 * Okay, fill in what we can at the end of the command frame.
1359 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1360 	 * the command frame.
1361 	 *
1362 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1363 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1364 	 * that.
1365 	 */
1366 
1367 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1368 		first_lim = nseg;
1369 	} else {
1370 		/*
1371 		 * Leave room for CHAIN element
1372 		 */
1373 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1374 	}
1375 
1376 	se = (SGE_SIMPLE32 *) sglp;
1377 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1378 		uint32_t tf;
1379 
1380 		memset(se, 0,sizeof (*se));
1381 		se->Address = dm_segs->ds_addr;
1382 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1383 		tf = flags;
1384 		if (seg == first_lim - 1) {
1385 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1386 		}
1387 		if (seg == nseg - 1) {
1388 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1389 				MPI_SGE_FLAGS_END_OF_BUFFER;
1390 		}
1391 		MPI_pSGE_SET_FLAGS(se, tf);
1392 	}
1393 
1394 	if (seg == nseg) {
1395 		goto out;
1396 	}
1397 
1398 	/*
1399 	 * Tell the IOC where to find the first chain element.
1400 	 */
1401 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1402 	nxt_off = MPT_RQSL(mpt);
1403 	trq = req;
1404 
1405 	/*
1406 	 * Make up the rest of the data segments out of a chain element
1407 	 * (contiained in the current request frame) which points to
1408 	 * SIMPLE32 elements in the next request frame, possibly ending
1409 	 * with *another* chain element (if there's more).
1410 	 */
1411 	while (seg < nseg) {
1412 		int this_seg_lim;
1413 		uint32_t tf, cur_off;
1414 		bus_addr_t chain_list_addr;
1415 
1416 		/*
1417 		 * Point to the chain descriptor. Note that the chain
1418 		 * descriptor is at the end of the *previous* list (whether
1419 		 * chain or simple).
1420 		 */
1421 		ce = (SGE_CHAIN32 *) se;
1422 
1423 		/*
1424 		 * Before we change our current pointer, make  sure we won't
1425 		 * overflow the request area with this frame. Note that we
1426 		 * test against 'greater than' here as it's okay in this case
1427 		 * to have next offset be just outside the request area.
1428 		 */
1429 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1430 			nxt_off = MPT_REQUEST_AREA;
1431 			goto next_chain;
1432 		}
1433 
1434 		/*
1435 		 * Set our SGE element pointer to the beginning of the chain
1436 		 * list and update our next chain list offset.
1437 		 */
1438 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1439 		cur_off = nxt_off;
1440 		nxt_off += MPT_RQSL(mpt);
1441 
1442 		/*
1443 		 * Now initialized the chain descriptor.
1444 		 */
1445 		memset(ce, 0, sizeof (*ce));
1446 
1447 		/*
1448 		 * Get the physical address of the chain list.
1449 		 */
1450 		chain_list_addr = trq->req_pbuf;
1451 		chain_list_addr += cur_off;
1452 		ce->Address = chain_list_addr;
1453 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1454 
1455 		/*
1456 		 * If we have more than a frame's worth of segments left,
1457 		 * set up the chain list to have the last element be another
1458 		 * chain descriptor.
1459 		 */
1460 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1461 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1462 			/*
1463 			 * The length of the chain is the length in bytes of the
1464 			 * number of segments plus the next chain element.
1465 			 *
1466 			 * The next chain descriptor offset is the length,
1467 			 * in words, of the number of segments.
1468 			 */
1469 			ce->Length = (this_seg_lim - seg) *
1470 			    sizeof (SGE_SIMPLE32);
1471 			ce->NextChainOffset = ce->Length >> 2;
1472 			ce->Length += sizeof (SGE_CHAIN32);
1473 		} else {
1474 			this_seg_lim = nseg;
1475 			ce->Length = (this_seg_lim - seg) *
1476 			    sizeof (SGE_SIMPLE32);
1477 		}
1478 
1479 		/*
1480 		 * Fill in the chain list SGE elements with our segment data.
1481 		 *
1482 		 * If we're the last element in this chain list, set the last
1483 		 * element flag. If we're the completely last element period,
1484 		 * set the end of list and end of buffer flags.
1485 		 */
1486 		while (seg < this_seg_lim) {
1487 			memset(se, 0, sizeof (*se));
1488 			se->Address = dm_segs->ds_addr;
1489 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1490 			tf = flags;
1491 			if (seg ==  this_seg_lim - 1) {
1492 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1493 			}
1494 			if (seg == nseg - 1) {
1495 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1496 					MPI_SGE_FLAGS_END_OF_BUFFER;
1497 			}
1498 			MPI_pSGE_SET_FLAGS(se, tf);
1499 			se++;
1500 			seg++;
1501 			dm_segs++;
1502 		}
1503 
1504     next_chain:
1505 		/*
1506 		 * If we have more segments to do and we've used up all of
1507 		 * the space in a request area, go allocate another one
1508 		 * and chain to that.
1509 		 */
1510 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1511 			request_t *nrq = mpt_get_request(mpt, FALSE);
1512 
1513 			if (nrq == NULL) {
1514 				error = ENOMEM;
1515 				goto bad;
1516 			}
1517 
1518 			/*
1519 			 * Append the new request area on the tail of our list.
1520 			 */
1521 			if ((trq = req->chain) == NULL) {
1522 				req->chain = nrq;
1523 			} else {
1524 				while (trq->chain != NULL) {
1525 					trq = trq->chain;
1526 				}
1527 				trq->chain = nrq;
1528 			}
1529 			trq = nrq;
1530 			mpt_off = trq->req_vbuf;
1531 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1532 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1533 			}
1534 			nxt_off = 0;
1535 		}
1536 	}
1537 out:
1538 
1539 	/*
1540 	 * Last time we need to check if this CCB needs to be aborted.
1541 	 */
1542 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1543 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1544 			request_t *cmd_req =
1545 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1546 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1547 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1548 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1549 		}
1550 		mpt_prt(mpt, "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1551 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1552 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1553 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1554 		}
1555 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1556 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1557 		xpt_done(ccb);
1558 		CAMLOCK_2_MPTLOCK(mpt);
1559 		mpt_free_request(mpt, req);
1560 		MPTLOCK_2_CAMLOCK(mpt);
1561 		return;
1562 	}
1563 
1564 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1565 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1566 		ccb->ccb_h.timeout_ch =
1567 			timeout(mpt_timeout, (caddr_t)ccb,
1568 				(ccb->ccb_h.timeout * hz) / 1000);
1569 	} else {
1570 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1571 	}
1572 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1573 		int nc = 0;
1574 		mpt_print_request(req->req_vbuf);
1575 		for (trq = req->chain; trq; trq = trq->chain) {
1576 			printf("  Additional Chain Area %d\n", nc++);
1577 			mpt_dump_sgl(trq->req_vbuf, 0);
1578 		}
1579 	}
1580 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1581 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1582 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1583 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1584 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1585 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1586 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1587 		} else {
1588 			tgt->state = TGT_STATE_MOVING_DATA;
1589 		}
1590 #else
1591 		tgt->state = TGT_STATE_MOVING_DATA;
1592 #endif
1593 	}
1594 	CAMLOCK_2_MPTLOCK(mpt);
1595 	mpt_send_cmd(mpt, req);
1596 	MPTLOCK_2_CAMLOCK(mpt);
1597 }
1598 
1599 static void
1600 mpt_start(struct cam_sim *sim, union ccb *ccb)
1601 {
1602 	request_t *req;
1603 	struct mpt_softc *mpt;
1604 	MSG_SCSI_IO_REQUEST *mpt_req;
1605 	struct ccb_scsiio *csio = &ccb->csio;
1606 	struct ccb_hdr *ccbh = &ccb->ccb_h;
1607 	bus_dmamap_callback_t *cb;
1608 	int raid_passthru;
1609 
1610 	/* Get the pointer for the physical addapter */
1611 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1612 	raid_passthru = (sim == mpt->phydisk_sim);
1613 
1614 	CAMLOCK_2_MPTLOCK(mpt);
1615 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
1616 		if (mpt->outofbeer == 0) {
1617 			mpt->outofbeer = 1;
1618 			xpt_freeze_simq(mpt->sim, 1);
1619 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1620 		}
1621 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1622 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
1623 		MPTLOCK_2_CAMLOCK(mpt);
1624 		xpt_done(ccb);
1625 		return;
1626 	}
1627 	MPTLOCK_2_CAMLOCK(mpt);
1628 
1629 	if (sizeof (bus_addr_t) > 4) {
1630 		cb = mpt_execute_req_a64;
1631 	} else {
1632 		cb = mpt_execute_req;
1633 	}
1634 
1635 #if 0
1636 	COWWWWW
1637 	if (raid_passthru) {
1638 		status = mpt_raid_quiesce_disk(mpt, mpt->raid_disks + ccb->ccb_h.target_id,
1639 		     request_t *req)
1640 	}
1641 #endif
1642 
1643 	/*
1644 	 * Link the ccb and the request structure so we can find
1645 	 * the other knowing either the request or the ccb
1646 	 */
1647 	req->ccb = ccb;
1648 	ccb->ccb_h.ccb_req_ptr = req;
1649 
1650 	/* Now we build the command for the IOC */
1651 	mpt_req = req->req_vbuf;
1652 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
1653 
1654 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1655 	if (raid_passthru) {
1656 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1657 	}
1658 	mpt_req->Bus = 0;	/* we don't have multiport devices yet */
1659 	mpt_req->SenseBufferLength =
1660 		(csio->sense_len < MPT_SENSE_SIZE) ?
1661 		 csio->sense_len : MPT_SENSE_SIZE;
1662 
1663 	/*
1664 	 * We use the message context to find the request structure when we
1665 	 * Get the command completion interrupt from the IOC.
1666 	 */
1667 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1668 
1669 	/* Which physical device to do the I/O on */
1670 	mpt_req->TargetID = ccb->ccb_h.target_id;
1671 
1672 	/* We assume a single level LUN type */
1673 	if (ccb->ccb_h.target_lun >= 256) {
1674 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1675 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1676 	} else {
1677 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1678 	}
1679 
1680 	/* Set the direction of the transfer */
1681 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1682 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1683 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1684 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1685 	} else {
1686 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1687 	}
1688 
1689 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1690 		switch(ccb->csio.tag_action) {
1691 		case MSG_HEAD_OF_Q_TAG:
1692 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
1693 			break;
1694 		case MSG_ACA_TASK:
1695 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
1696 			break;
1697 		case MSG_ORDERED_Q_TAG:
1698 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
1699 			break;
1700 		case MSG_SIMPLE_Q_TAG:
1701 		default:
1702 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1703 			break;
1704 		}
1705 	} else {
1706 		if (mpt->is_fc || mpt->is_sas) {
1707 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1708 		} else {
1709 			/* XXX No such thing for a target doing packetized. */
1710 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
1711 		}
1712 	}
1713 
1714 	if (mpt->is_fc == 0 && mpt->is_sas == 0) {
1715 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1716 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
1717 		}
1718 	}
1719 
1720 	/* Copy the scsi command block into place */
1721 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1722 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
1723 	} else {
1724 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
1725 	}
1726 
1727 	mpt_req->CDBLength = csio->cdb_len;
1728 	mpt_req->DataLength = csio->dxfer_len;
1729 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
1730 
1731 	/*
1732 	 * If we have any data to send with this command map it into bus space.
1733 	 */
1734 
1735 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1736 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1737 			/*
1738 			 * We've been given a pointer to a single buffer.
1739 			 */
1740 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1741 				/*
1742 				 * Virtual address that needs to translated into
1743 				 * one or more physical address ranges.
1744 				 */
1745 				int error;
1746 				int s = splsoftvm();
1747 				error = bus_dmamap_load(mpt->buffer_dmat,
1748 				    req->dmap, csio->data_ptr, csio->dxfer_len,
1749 				    cb, req, 0);
1750 				splx(s);
1751 				if (error == EINPROGRESS) {
1752 					/*
1753 					 * So as to maintain ordering,
1754 					 * freeze the controller queue
1755 					 * until our mapping is
1756 					 * returned.
1757 					 */
1758 					xpt_freeze_simq(mpt->sim, 1);
1759 					ccbh->status |= CAM_RELEASE_SIMQ;
1760 				}
1761 			} else {
1762 				/*
1763 				 * We have been given a pointer to single
1764 				 * physical buffer.
1765 				 */
1766 				struct bus_dma_segment seg;
1767 				seg.ds_addr =
1768 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1769 				seg.ds_len = csio->dxfer_len;
1770 				(*cb)(req, &seg, 1, 0);
1771 			}
1772 		} else {
1773 			/*
1774 			 * We have been given a list of addresses.
1775 			 * This case could be easily supported but they are not
1776 			 * currently generated by the CAM subsystem so there
1777 			 * is no point in wasting the time right now.
1778 			 */
1779 			struct bus_dma_segment *segs;
1780 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1781 				(*cb)(req, NULL, 0, EFAULT);
1782 			} else {
1783 				/* Just use the segments provided */
1784 				segs = (struct bus_dma_segment *)csio->data_ptr;
1785 				(*cb)(req, segs, csio->sglist_cnt, 0);
1786 			}
1787 		}
1788 	} else {
1789 		(*cb)(req, NULL, 0, 0);
1790 	}
1791 }
1792 
1793 static int
1794 mpt_bus_reset(struct mpt_softc *mpt, int sleep_ok)
1795 {
1796 	int   error;
1797 	u_int status;
1798 
1799 	error = mpt_scsi_send_tmf(mpt, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1800 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
1801 	    0, 0, 0, 0, sleep_ok);
1802 
1803 	if (error != 0) {
1804 		/*
1805 		 * mpt_scsi_send_tmf hard resets on failure, so no
1806 		 * need to do so here.
1807 		 */
1808 		mpt_prt(mpt,
1809 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
1810 		return (EIO);
1811 	}
1812 
1813 	/* Wait for bus reset to be processed by the IOC. */
1814 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
1815 	    REQ_STATE_DONE, sleep_ok, 5000);
1816 
1817 	status = mpt->tmf_req->IOCStatus;
1818 	mpt->tmf_req->state = REQ_STATE_FREE;
1819 	if (error) {
1820 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out."
1821 			"Resetting controller.\n");
1822 		mpt_reset(mpt, /*reinit*/TRUE);
1823 		return (ETIMEDOUT);
1824 	} else if ((status & MPI_IOCSTATUS_MASK) != MPI_SCSI_STATUS_SUCCESS) {
1825 		mpt_prt(mpt, "mpt_bus_reset: TMF Status %d."
1826 			"Resetting controller.\n", status);
1827 		mpt_reset(mpt, /*reinit*/TRUE);
1828 		return (EIO);
1829 	}
1830 	return (0);
1831 }
1832 
1833 static int
1834 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
1835 {
1836 	int r = 0;
1837 	request_t *req;
1838 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
1839 
1840  	req = mpt_get_request(mpt, FALSE);
1841 	if (req == NULL) {
1842 		return (ENOMEM);
1843 	}
1844 	fc = req->req_vbuf;
1845 	memset(fc, 0, sizeof(*fc));
1846 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
1847 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
1848 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
1849 	mpt_send_cmd(mpt, req);
1850 	if (dowait) {
1851 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
1852 		    REQ_STATE_DONE, FALSE, 60 * 1000);
1853 		if (r == 0) {
1854 			mpt_free_request(mpt, req);
1855 		}
1856 	}
1857 	return (r);
1858 }
1859 
1860 static int
1861 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
1862 	      MSG_EVENT_NOTIFY_REPLY *msg)
1863 {
1864 	switch(msg->Event & 0xFF) {
1865 	case MPI_EVENT_UNIT_ATTENTION:
1866 		mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
1867 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1868 		break;
1869 
1870 	case MPI_EVENT_IOC_BUS_RESET:
1871 		/* We generated a bus reset */
1872 		mpt_prt(mpt, "IOC Bus Reset Port: %d\n",
1873 		    (msg->Data[0] >> 8) & 0xff);
1874 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
1875 		break;
1876 
1877 	case MPI_EVENT_EXT_BUS_RESET:
1878 		/* Someone else generated a bus reset */
1879 		mpt_prt(mpt, "External Bus Reset Detected\n");
1880 		/*
1881 		 * These replies don't return EventData like the MPI
1882 		 * spec says they do
1883 		 */
1884 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
1885 		break;
1886 
1887 	case MPI_EVENT_RESCAN:
1888 		/*
1889 		 * In general this means a device has been added
1890 		 * to the loop.
1891 		 */
1892 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
1893 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
1894 		break;
1895 
1896 	case MPI_EVENT_LINK_STATUS_CHANGE:
1897 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
1898 		    (msg->Data[1] >> 8) & 0xff,
1899 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
1900 		break;
1901 
1902 	case MPI_EVENT_LOOP_STATE_CHANGE:
1903 		switch ((msg->Data[0] >> 16) & 0xff) {
1904 		case 0x01:
1905 			mpt_prt(mpt,
1906 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
1907 			    "(Loop Initialization)\n",
1908 			    (msg->Data[1] >> 8) & 0xff,
1909 			    (msg->Data[0] >> 8) & 0xff,
1910 			    (msg->Data[0]     ) & 0xff);
1911 			switch ((msg->Data[0] >> 8) & 0xff) {
1912 			case 0xF7:
1913 				if ((msg->Data[0] & 0xff) == 0xF7) {
1914 					mpt_prt(mpt, "Device needs AL_PA\n");
1915 				} else {
1916 					mpt_prt(mpt, "Device %02x doesn't like "
1917 					    "FC performance\n",
1918 					    msg->Data[0] & 0xFF);
1919 				}
1920 				break;
1921 			case 0xF8:
1922 				if ((msg->Data[0] & 0xff) == 0xF7) {
1923 					mpt_prt(mpt, "Device had loop failure "
1924 					    "at its receiver prior to acquiring"
1925 					    " AL_PA\n");
1926 				} else {
1927 					mpt_prt(mpt, "Device %02x detected loop"
1928 					    " failure at its receiver\n",
1929 					    msg->Data[0] & 0xFF);
1930 				}
1931 				break;
1932 			default:
1933 				mpt_prt(mpt, "Device %02x requests that device "
1934 				    "%02x reset itself\n",
1935 				    msg->Data[0] & 0xFF,
1936 				    (msg->Data[0] >> 8) & 0xFF);
1937 				break;
1938 			}
1939 			break;
1940 		case 0x02:
1941 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
1942 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
1943 			    (msg->Data[1] >> 8) & 0xff, /* Port */
1944 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
1945 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
1946 			break;
1947 		case 0x03:
1948 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
1949 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
1950 			    (msg->Data[1] >> 8) & 0xff, /* Port */
1951 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
1952 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
1953 			break;
1954 		default:
1955 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
1956 			    "FC event (%02x %02x %02x)\n",
1957 			    (msg->Data[1] >> 8) & 0xff, /* Port */
1958 			    (msg->Data[0] >> 16) & 0xff, /* Event */
1959 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
1960 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
1961 		}
1962 		break;
1963 
1964 	case MPI_EVENT_LOGOUT:
1965 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
1966 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1967 		break;
1968 	case MPI_EVENT_EVENT_CHANGE:
1969 		mpt_lprt(mpt, MPT_PRT_DEBUG,
1970 		    "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
1971 		break;
1972 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1973 		/*
1974 		 * Devices are attachin'.....
1975 		 */
1976 		mpt_prt(mpt,
1977 		    "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n");
1978 		break;
1979 	default:
1980 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
1981 		    msg->Event & 0xFF);
1982 		return (0);
1983 	}
1984 	return (1);
1985 }
1986 
1987 /*
1988  * Reply path for all SCSI I/O requests, called from our
1989  * interrupt handler by extracting our handler index from
1990  * the MsgContext field of the reply from the IOC.
1991  *
1992  * This routine is optimized for the common case of a
1993  * completion without error.  All exception handling is
1994  * offloaded to non-inlined helper routines to minimize
1995  * cache footprint.
1996  */
1997 static int
1998 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
1999     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2000 {
2001 	MSG_SCSI_IO_REQUEST *scsi_req;
2002 	union ccb *ccb;
2003 
2004 	if (req->state == REQ_STATE_FREE) {
2005 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2006 		return (TRUE);
2007 	}
2008 
2009 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2010 	ccb = req->ccb;
2011 	if (ccb == NULL) {
2012 		mpt_prt(mpt, "req %p:%u without CCB (state %#x "
2013 		    "func %#x index %u rf %p)\n", req, req->serno, req->state,
2014 		    scsi_req->Function, req->index, reply_frame);
2015 		mpt_print_scsi_io_request(scsi_req);
2016 		return (TRUE);
2017 	}
2018 
2019 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2020 
2021 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2022 		bus_dmasync_op_t op;
2023 
2024 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2025 			op = BUS_DMASYNC_POSTREAD;
2026 		else
2027 			op = BUS_DMASYNC_POSTWRITE;
2028 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2029 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2030 	}
2031 
2032 	if (reply_frame == NULL) {
2033 		/*
2034 		 * Context only reply, completion without error status.
2035 		 */
2036 		ccb->csio.resid = 0;
2037 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2038 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2039 	} else {
2040 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2041 	}
2042 
2043 	if (mpt->outofbeer) {
2044 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2045 		mpt->outofbeer = 0;
2046 		mpt_lprt(mpt,  MPT_PRT_DEBUG, "THAWQ\n");
2047 	}
2048 	if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH &&
2049 	    scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2050 		struct scsi_inquiry_data *inq;
2051 		/*
2052 		 * Fake out the device type so that only the
2053 		 * pass-thru device will attach.
2054 		 */
2055 		inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2056 		inq->device &= ~0x1F;
2057 		inq->device |= T_NODEVICE;
2058 	}
2059 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2060 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2061 	MPTLOCK_2_CAMLOCK(mpt);
2062 	xpt_done(ccb);
2063 	CAMLOCK_2_MPTLOCK(mpt);
2064 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2065 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2066 	} else {
2067 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2068 	}
2069 	if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2070 		mpt_free_request(mpt, req);
2071 		return (/*free_reply*/TRUE);
2072 	}
2073 	req->state &= ~REQ_STATE_QUEUED;
2074 	req->state |= REQ_STATE_DONE;
2075 	wakeup(req);
2076 	return (/*free_reply*/TRUE);
2077 }
2078 
2079 static int
2080 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2081     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2082 {
2083 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2084 	uint16_t		  status;
2085 
2086 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2087 
2088 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2089 
2090 	/* Record status of TMF for any waiters. */
2091 	req->IOCStatus = tmf_reply->IOCStatus;
2092 	status = le16toh(tmf_reply->IOCStatus);
2093 	mpt_lprt(mpt, MPT_PRT_INFO, "TMF complete: req %p:%u status 0x%x\n",
2094 	    req, req->serno, status);
2095 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2096 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2097 		req->state |= REQ_STATE_DONE;
2098 		wakeup(req);
2099 	} else {
2100 		mpt->tmf_req->state = REQ_STATE_FREE;
2101 	}
2102 	return (TRUE);
2103 }
2104 
2105 
2106 /*
2107  * XXX: Move to definitions file
2108  */
2109 #define	ELS	0x22
2110 #define	FC4LS	0x32
2111 #define	ABTS	0x81
2112 #define	BA_ACC	0x84
2113 
2114 #define	LS_RJT	0x01
2115 #define	LS_ACC	0x02
2116 #define	PLOGI	0x03
2117 #define	LOGO	0x05
2118 #define SRR	0x14
2119 #define PRLI	0x20
2120 #define PRLO	0x21
2121 #define ADISC	0x52
2122 #define RSCN	0x61
2123 
2124 static void
2125 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2126     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2127 {
2128 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2129 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2130 
2131 	/*
2132 	 * We are going to reuse the ELS request to send this response back.
2133 	 */
2134 	rsp = &tmp;
2135 	memset(rsp, 0, sizeof(*rsp));
2136 
2137 #ifdef	USE_IMMEDIATE_LINK_DATA
2138 	/*
2139 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2140 	 */
2141 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2142 #endif
2143 	rsp->RspLength = length;
2144 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2145 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2146 
2147 	/*
2148 	 * Copy over information from the original reply frame to
2149 	 * it's correct place in the response.
2150 	 */
2151 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2152 
2153 	/*
2154 	 * And now copy back the temporary area to the original frame.
2155 	 */
2156 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2157 	rsp = req->req_vbuf;
2158 
2159 #ifdef	USE_IMMEDIATE_LINK_DATA
2160 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2161 #else
2162 {
2163 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2164 	bus_addr_t paddr = req->req_pbuf;
2165 	paddr += MPT_RQSL(mpt);
2166 
2167 	se->FlagsLength =
2168 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2169 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2170 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2171 		MPI_SGE_FLAGS_END_OF_LIST	|
2172 		MPI_SGE_FLAGS_END_OF_BUFFER;
2173 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2174 	se->FlagsLength |= (length);
2175 	se->Address = (uint32_t) paddr;
2176 }
2177 #endif
2178 
2179 	/*
2180 	 * Send it on...
2181 	 */
2182 	mpt_send_cmd(mpt, req);
2183 }
2184 
2185 static int
2186 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2187     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2188 {
2189 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2190 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2191 	U8 rctl;
2192 	U8 type;
2193 	U8 cmd;
2194 	U16 status = le16toh(reply_frame->IOCStatus);
2195 	U32 *elsbuf;
2196 	int do_refresh = TRUE;
2197 
2198 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC_ELS Complete: req %p:%u, reply %p\n",
2199 		 req, req->serno, reply_frame);
2200 
2201 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2202 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2203 		    status, reply_frame->Function);
2204 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2205 			/*
2206 			 * XXX: to get around shutdown issue
2207 			 */
2208 			mpt->disabled = 1;
2209 			return (TRUE);
2210 		}
2211 		return (TRUE);
2212 	}
2213 
2214 	/*
2215 	 * If the function of a link service response, we recycle the
2216 	 * response to be a refresh for a new link service request.
2217 	 */
2218 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2219 		mpt_fc_add_els(mpt, req);
2220 		return (TRUE);
2221 	}
2222 
2223 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2224 		req->state &= ~REQ_STATE_QUEUED;
2225 		req->state |= REQ_STATE_DONE;
2226 		if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2227 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2228 			    "Async Primitive Send Complete\n");
2229 			mpt_free_request(mpt, req);
2230 		} else {
2231 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2232 			    "Sync Primitive Send Complete\n");
2233 			wakeup(req);
2234 		}
2235 		return (TRUE);
2236 	}
2237 
2238 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2239 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2240 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2241 		    rp->MsgLength, rp->MsgFlags);
2242 		return (TRUE);
2243 	}
2244 
2245 	if (rp->MsgLength <= 5) {
2246 		/*
2247 		 * This is just a ack of an original ELS buffer post
2248 		 */
2249 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2250 		    "Recv'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2251 		return (TRUE);
2252 	}
2253 
2254 
2255 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2256 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2257 
2258 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2259 	cmd = be32toh(elsbuf[0]) >> 24;
2260 
2261 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2262 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2263 		return (TRUE);
2264 	}
2265 
2266 
2267 	if (rctl == ELS && type == 1) {
2268 		switch (cmd) {
2269 		case PRLI:
2270 			/*
2271 			 * Send back a PRLI ACC
2272 			 */
2273 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2274 			    le32toh(rp->Wwn.PortNameHigh),
2275 			    le32toh(rp->Wwn.PortNameLow));
2276 			elsbuf[0] = htobe32(0x02100014);
2277 			elsbuf[1] |= htobe32(0x00000100);
2278 			elsbuf[4] = htobe32(0x00000002);
2279 			if (mpt->role & MPT_ROLE_TARGET)
2280 				elsbuf[4] |= htobe32(0x00000010);
2281 			if (mpt->role & MPT_ROLE_INITIATOR)
2282 				elsbuf[4] |= htobe32(0x00000020);
2283 			mpt_fc_els_send_response(mpt, req, rp, 20);
2284 			do_refresh = FALSE;
2285 			break;
2286 		case PRLO:
2287 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2288 			elsbuf[0] = htobe32(0x02100014);
2289 			elsbuf[1] = htobe32(0x08000100);
2290 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2291 			    le32toh(rp->Wwn.PortNameHigh),
2292 			    le32toh(rp->Wwn.PortNameLow));
2293 			mpt_fc_els_send_response(mpt, req, rp, 20);
2294 			do_refresh = FALSE;
2295 			break;
2296 		default:
2297 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2298 			break;
2299 		}
2300 	} else if (rctl == ABTS && type == 0) {
2301 		uint16_t rx_id = le16toh(rp->Rxid);
2302 		uint16_t ox_id = le16toh(rp->Oxid);
2303 		request_t *tgt_req = NULL;
2304 
2305 		mpt_prt(mpt,
2306 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2307 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2308 		    le32toh(rp->Wwn.PortNameLow));
2309 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2310 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2311 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2312 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2313 		} else {
2314 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2315 		}
2316 		if (tgt_req) {
2317 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2318 			uint8_t *vbuf;
2319 			union ccb *ccb = tgt->ccb;
2320 			uint32_t ct_id;
2321 
2322 			vbuf = tgt_req->req_vbuf;
2323 			vbuf += MPT_RQSL(mpt);
2324 
2325 			/*
2326 			 * Check to make sure we have the correct command
2327 			 * The reply descriptor in the target state should
2328 			 * should contain an IoIndex that should match the
2329 			 * RX_ID.
2330 			 *
2331 			 * It'd be nice to have OX_ID to crosscheck with
2332 			 * as well.
2333 			 */
2334 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2335 
2336 			if (ct_id != rx_id) {
2337 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2338 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2339 				    rx_id, ct_id);
2340 				goto skip;
2341 			}
2342 
2343 			ccb = tgt->ccb;
2344 			if (ccb) {
2345 				mpt_prt(mpt,
2346 				    "CCB (%p): lun %u flags %x status %x\n",
2347 				    ccb, ccb->ccb_h.target_lun,
2348 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2349 			}
2350 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2351 			    "%x nxfers %x\n", tgt->state,
2352 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2353 			    tgt->nxfers);
2354   skip:
2355 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2356 				mpt_prt(mpt, "unable to start TargetAbort\n");
2357 			}
2358 		} else {
2359 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2360 		}
2361 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2362 		elsbuf[0] = htobe32(0);
2363 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2364 		elsbuf[2] = htobe32(0x000ffff);
2365 		/*
2366 		 * Dork with the reply frame so that the reponse to it
2367 		 * will be correct.
2368 		 */
2369 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2370 		mpt_fc_els_send_response(mpt, req, rp, 12);
2371 		do_refresh = FALSE;
2372 	} else {
2373 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2374 	}
2375 	if (do_refresh == TRUE) {
2376 		mpt_fc_add_els(mpt, req);
2377 	}
2378 	return (TRUE);
2379 }
2380 
2381 /*
2382  * Clean up all SCSI Initiator personality state in response
2383  * to a controller reset.
2384  */
2385 static void
2386 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2387 {
2388 	/*
2389 	 * The pending list is already run down by
2390 	 * the generic handler.  Perform the same
2391 	 * operation on the timed out request list.
2392 	 */
2393 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
2394 				   MPI_IOCSTATUS_INVALID_STATE);
2395 
2396 	/*
2397 	 * Inform the XPT that a bus reset has occurred.
2398 	 */
2399 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
2400 }
2401 
2402 /*
2403  * Parse additional completion information in the reply
2404  * frame for SCSI I/O requests.
2405  */
2406 static int
2407 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
2408 			     MSG_DEFAULT_REPLY *reply_frame)
2409 {
2410 	union ccb *ccb;
2411 	MSG_SCSI_IO_REPLY *scsi_io_reply;
2412 	u_int ioc_status;
2413 	u_int sstate;
2414 	u_int loginfo;
2415 
2416 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
2417 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
2418 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
2419 		("MPT SCSI I/O Handler called with incorrect reply type"));
2420 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
2421 		("MPT SCSI I/O Handler called with continuation reply"));
2422 
2423 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
2424 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
2425 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
2426 	ioc_status &= MPI_IOCSTATUS_MASK;
2427 	sstate = scsi_io_reply->SCSIState;
2428 
2429 	ccb = req->ccb;
2430 	ccb->csio.resid =
2431 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
2432 
2433 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
2434 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
2435 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2436 		ccb->csio.sense_resid =
2437 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
2438 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
2439 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
2440 	}
2441 
2442 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
2443 		/*
2444 		 * Tag messages rejected, but non-tagged retry
2445 		 * was successful.
2446 XXXX
2447 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
2448 		 */
2449 	}
2450 
2451 	switch(ioc_status) {
2452 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2453 		/*
2454 		 * XXX
2455 		 * Linux driver indicates that a zero
2456 		 * transfer length with this error code
2457 		 * indicates a CRC error.
2458 		 *
2459 		 * No need to swap the bytes for checking
2460 		 * against zero.
2461 		 */
2462 		if (scsi_io_reply->TransferCount == 0) {
2463 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2464 			break;
2465 		}
2466 		/* FALLTHROUGH */
2467 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
2468 	case MPI_IOCSTATUS_SUCCESS:
2469 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
2470 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
2471 			/*
2472 			 * Status was never returned for this transaction.
2473 			 */
2474 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
2475 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
2476 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
2477 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
2478 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
2479 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
2480 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
2481 
2482 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
2483 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2484 		} else
2485 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2486 		break;
2487 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
2488 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
2489 		break;
2490 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
2491 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2492 		break;
2493 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2494 		/*
2495 		 * Since selection timeouts and "device really not
2496 		 * there" are grouped into this error code, report
2497 		 * selection timeout.  Selection timeouts are
2498 		 * typically retried before giving up on the device
2499 		 * whereas "device not there" errors are considered
2500 		 * unretryable.
2501 		 */
2502 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2503 		break;
2504 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2505 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
2506 		break;
2507 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
2508 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
2509 		break;
2510 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
2511 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
2512 		break;
2513 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2514 		ccb->ccb_h.status = CAM_UA_TERMIO;
2515 		break;
2516 	case MPI_IOCSTATUS_INVALID_STATE:
2517 		/*
2518 		 * The IOC has been reset.  Emulate a bus reset.
2519 		 */
2520 		/* FALLTHROUGH */
2521 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
2522 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2523 		break;
2524 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
2525 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
2526 		/*
2527 		 * Don't clobber any timeout status that has
2528 		 * already been set for this transaction.  We
2529 		 * want the SCSI layer to be able to differentiate
2530 		 * between the command we aborted due to timeout
2531 		 * and any innocent bystanders.
2532 		 */
2533 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
2534 			break;
2535 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
2536 		break;
2537 
2538 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2539 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
2540 		break;
2541 	case MPI_IOCSTATUS_BUSY:
2542 		mpt_set_ccb_status(ccb, CAM_BUSY);
2543 		break;
2544 	case MPI_IOCSTATUS_INVALID_FUNCTION:
2545 	case MPI_IOCSTATUS_INVALID_SGL:
2546 	case MPI_IOCSTATUS_INTERNAL_ERROR:
2547 	case MPI_IOCSTATUS_INVALID_FIELD:
2548 	default:
2549 		/* XXX
2550 		 * Some of the above may need to kick
2551 		 * of a recovery action!!!!
2552 		 */
2553 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2554 		break;
2555 	}
2556 
2557 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2558 		mpt_freeze_ccb(ccb);
2559 	}
2560 
2561 	return (TRUE);
2562 }
2563 
2564 static void
2565 mpt_action(struct cam_sim *sim, union ccb *ccb)
2566 {
2567 	struct	mpt_softc *mpt;
2568 	struct	ccb_trans_settings *cts;
2569 	u_int	tgt;
2570 	int	raid_passthru;
2571 
2572 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2573 
2574 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
2575 	raid_passthru = (sim == mpt->phydisk_sim);
2576 
2577 	tgt = ccb->ccb_h.target_id;
2578 	if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ &&
2579 	    ccb->ccb_h.func_code != XPT_RESET_BUS) {
2580 		CAMLOCK_2_MPTLOCK(mpt);
2581 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2582 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2583 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2584 			MPTLOCK_2_CAMLOCK(mpt);
2585 			xpt_done(ccb);
2586 			return;
2587 		}
2588 		MPTLOCK_2_CAMLOCK(mpt);
2589 	}
2590 
2591 	ccb->ccb_h.ccb_mpt_ptr = mpt;
2592 
2593 	switch (ccb->ccb_h.func_code) {
2594 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2595 		/*
2596 		 * Do a couple of preliminary checks...
2597 		 */
2598 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2599 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2600 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2601 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2602 				xpt_done(ccb);
2603 				break;
2604 			}
2605 		}
2606 		/* Max supported CDB length is 16 bytes */
2607 		/* XXX Unless we implement the new 32byte message type */
2608 		if (ccb->csio.cdb_len >
2609 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
2610 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2611 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2612 			xpt_done(ccb);
2613 			return;
2614 		}
2615 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2616 		mpt_start(sim, ccb);
2617 		break;
2618 
2619 	case XPT_RESET_BUS:
2620 		mpt_lprt(mpt, MPT_PRT_DEBUG, "XPT_RESET_BUS\n");
2621 		if (!raid_passthru) {
2622 			CAMLOCK_2_MPTLOCK(mpt);
2623 			(void)mpt_bus_reset(mpt, /*sleep_ok*/FALSE);
2624 			MPTLOCK_2_CAMLOCK(mpt);
2625 		}
2626 		/*
2627 		 * mpt_bus_reset is always successful in that it
2628 		 * will fall back to a hard reset should a bus
2629 		 * reset attempt fail.
2630 		 */
2631 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2632 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2633 		xpt_done(ccb);
2634 		break;
2635 
2636 	case XPT_ABORT:
2637 	{
2638 		union ccb *accb = ccb->cab.abort_ccb;
2639 		CAMLOCK_2_MPTLOCK(mpt);
2640 		switch (accb->ccb_h.func_code) {
2641 		case XPT_ACCEPT_TARGET_IO:
2642 		case XPT_IMMED_NOTIFY:
2643         		ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
2644 			break;
2645 		case XPT_CONT_TARGET_IO:
2646 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
2647 			ccb->ccb_h.status = CAM_UA_ABORT;
2648 			break;
2649 		case XPT_SCSI_IO:
2650 			ccb->ccb_h.status = CAM_UA_ABORT;
2651 			break;
2652 		default:
2653 			ccb->ccb_h.status = CAM_REQ_INVALID;
2654 			break;
2655 		}
2656 		MPTLOCK_2_CAMLOCK(mpt);
2657 		xpt_done(ccb);
2658 		break;
2659 	}
2660 
2661 #ifdef	CAM_NEW_TRAN_CODE
2662 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2663 #else
2664 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2665 #endif
2666 #define	DP_DISC_ENABLE	0x1
2667 #define	DP_DISC_DISABL	0x2
2668 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
2669 
2670 #define	DP_TQING_ENABLE	0x4
2671 #define	DP_TQING_DISABL	0x8
2672 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
2673 
2674 #define	DP_WIDE		0x10
2675 #define	DP_NARROW	0x20
2676 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
2677 
2678 #define	DP_SYNC		0x40
2679 
2680 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2681 		cts = &ccb->cts;
2682 		if (!IS_CURRENT_SETTINGS(cts)) {
2683 			mpt_prt(mpt, "Attempt to set User settings\n");
2684 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2685 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2686 			xpt_done(ccb);
2687 			break;
2688 		}
2689 		if (mpt->is_fc == 0 && mpt->is_sas == 0) {
2690 			uint8_t dval = 0;
2691 			u_int period = 0, offset = 0;
2692 #ifndef	CAM_NEW_TRAN_CODE
2693 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2694 				dval |= DP_DISC_ENABLE;
2695 			}
2696 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2697 				dval |= DP_TQING_ENABLE;
2698 			}
2699 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2700 				if (cts->bus_width)
2701 					dval |= DP_WIDE;
2702 				else
2703 					dval |= DP_NARROW;
2704 			}
2705 			/*
2706 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2707 			 * of nonzero will cause us to go to the
2708 			 * selected (from NVRAM) maximum value for
2709 			 * this device. At a later point, we'll
2710 			 * allow finer control.
2711 			 */
2712 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2713 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
2714 				dval |= DP_SYNC;
2715 				period = cts->sync_period;
2716 				offset = cts->sync_offset;
2717 			}
2718 #else
2719 			struct ccb_trans_settings_scsi *scsi =
2720 			    &cts->proto_specific.scsi;
2721 			struct ccb_trans_settings_spi *spi =
2722 			    &cts->xport_specific.spi;
2723 
2724 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2725 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2726 					dval |= DP_DISC_ENABLE;
2727 				else
2728 					dval |= DP_DISC_DISABL;
2729 			}
2730 
2731 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2732 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2733 					dval |= DP_TQING_ENABLE;
2734 				else
2735 					dval |= DP_TQING_DISABL;
2736 			}
2737 
2738 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2739 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2740 					dval |= DP_WIDE;
2741 				else
2742 					dval |= DP_NARROW;
2743 			}
2744 
2745 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2746 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2747 			    (spi->sync_period && spi->sync_offset)) {
2748 				dval |= DP_SYNC;
2749 				period = spi->sync_period;
2750 				offset = spi->sync_offset;
2751 			}
2752 #endif
2753 			CAMLOCK_2_MPTLOCK(mpt);
2754 			if (dval & DP_DISC_ENABLE) {
2755 				mpt->mpt_disc_enable |= (1 << tgt);
2756 			} else if (dval & DP_DISC_DISABL) {
2757 				mpt->mpt_disc_enable &= ~(1 << tgt);
2758 			}
2759 			if (dval & DP_TQING_ENABLE) {
2760 				mpt->mpt_tag_enable |= (1 << tgt);
2761 			} else if (dval & DP_TQING_DISABL) {
2762 				mpt->mpt_tag_enable &= ~(1 << tgt);
2763 			}
2764 			if (dval & DP_WIDTH) {
2765 				if (mpt_setwidth(mpt, tgt, dval & DP_WIDE)) {
2766 					mpt_prt(mpt, "Set width Failed!\n");
2767 					ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2768 					mpt_set_ccb_status(ccb,
2769 					    CAM_REQ_CMP_ERR);
2770 					MPTLOCK_2_CAMLOCK(mpt);
2771 					xpt_done(ccb);
2772 					break;
2773 				}
2774 			}
2775 			if (dval & DP_SYNC) {
2776 				if (mpt_setsync(mpt, tgt, period, offset)) {
2777 					mpt_prt(mpt, "Set sync Failed!\n");
2778 					ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2779 					mpt_set_ccb_status(ccb,
2780 					    CAM_REQ_CMP_ERR);
2781 					MPTLOCK_2_CAMLOCK(mpt);
2782 					xpt_done(ccb);
2783 					break;
2784 				}
2785 			}
2786 			MPTLOCK_2_CAMLOCK(mpt);
2787 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2788 				 "SET tgt %d flags %x period %x off %x\n",
2789 				 tgt, dval, period, offset);
2790 		}
2791 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2792 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2793 		xpt_done(ccb);
2794 		break;
2795 
2796 	case XPT_GET_TRAN_SETTINGS:
2797 		cts = &ccb->cts;
2798 		if (mpt->is_fc) {
2799 #ifndef	CAM_NEW_TRAN_CODE
2800 			/*
2801 			 * a lot of normal SCSI things don't make sense.
2802 			 */
2803 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2804 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2805 			/*
2806 			 * How do you measure the width of a high
2807 			 * speed serial bus? Well, in bytes.
2808 			 *
2809 			 * Offset and period make no sense, though, so we set
2810 			 * (above) a 'base' transfer speed to be gigabit.
2811 			 */
2812 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2813 #else
2814 			struct ccb_trans_settings_fc *fc =
2815 			    &cts->xport_specific.fc;
2816 
2817 			cts->protocol = PROTO_SCSI;
2818 			cts->protocol_version = SCSI_REV_2;
2819 			cts->transport = XPORT_FC;
2820 			cts->transport_version = 0;
2821 
2822 			fc->valid = CTS_FC_VALID_SPEED;
2823 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
2824 			/* XXX: need a port database for each target */
2825 #endif
2826 		} else if (mpt->is_sas) {
2827 #ifndef	CAM_NEW_TRAN_CODE
2828 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2829 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2830 			/*
2831 			 * How do you measure the width of a high
2832 			 * speed serial bus? Well, in bytes.
2833 			 *
2834 			 * Offset and period make no sense, though, so we set
2835 			 * (above) a 'base' transfer speed to be gigabit.
2836 			 */
2837 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2838 #else
2839 			struct ccb_trans_settings_sas *sas =
2840 			    &cts->xport_specific.sas;
2841 
2842 			cts->protocol = PROTO_SCSI;
2843 			cts->protocol_version = SCSI_REV_3;
2844 			cts->transport = XPORT_SAS;
2845 			cts->transport_version = 0;
2846 
2847 			sas->valid = CTS_SAS_VALID_SPEED;
2848 			sas->bitrate = 300000;	/* XXX: Default 3Gbps */
2849 #endif
2850 		} else {
2851 #ifdef	CAM_NEW_TRAN_CODE
2852 			struct ccb_trans_settings_scsi *scsi =
2853 			    &cts->proto_specific.scsi;
2854 			struct ccb_trans_settings_spi *spi =
2855 			    &cts->xport_specific.spi;
2856 #endif
2857 			uint8_t dval, pval, oval;
2858 			int rv;
2859 
2860 			/*
2861 			 * We aren't going off of Port PAGE2 params for
2862 			 * tagged queuing or disconnect capabilities
2863 			 * for current settings. For goal settings,
2864 			 * we assert all capabilities- we've had some
2865 			 * problems with reading NVRAM data.
2866 			 */
2867 			if (IS_CURRENT_SETTINGS(cts)) {
2868 				CONFIG_PAGE_SCSI_DEVICE_0 tmp;
2869 				dval = 0;
2870 
2871 				tmp = mpt->mpt_dev_page0[tgt];
2872 				CAMLOCK_2_MPTLOCK(mpt);
2873 				rv = mpt_read_cur_cfg_page(mpt, tgt,
2874 							   &tmp.Header,
2875 							   sizeof(tmp),
2876 							   /*sleep_ok*/FALSE,
2877 							   /*timeout_ms*/5000);
2878 				if (rv) {
2879 					mpt_prt(mpt,
2880 					    "cannot get target %d DP0\n", tgt);
2881 				}
2882 				mpt_lprt(mpt, MPT_PRT_DEBUG,
2883 					 "SPI Tgt %d Page 0: NParms %x "
2884 					 "Information %x\n", tgt,
2885 					 tmp.NegotiatedParameters,
2886 					 tmp.Information);
2887 				MPTLOCK_2_CAMLOCK(mpt);
2888 
2889 				if (tmp.NegotiatedParameters &
2890 				    MPI_SCSIDEVPAGE0_NP_WIDE)
2891 					dval |= DP_WIDE;
2892 
2893 				if (mpt->mpt_disc_enable & (1 << tgt)) {
2894 					dval |= DP_DISC_ENABLE;
2895 				}
2896 				if (mpt->mpt_tag_enable & (1 << tgt)) {
2897 					dval |= DP_TQING_ENABLE;
2898 				}
2899 				oval = (tmp.NegotiatedParameters >> 16) & 0xff;
2900 				pval = (tmp.NegotiatedParameters >>  8) & 0xff;
2901 			} else {
2902 				/*
2903 				 * XXX: Fix wrt NVRAM someday. Attempts
2904 				 * XXX: to read port page2 device data
2905 				 * XXX: just returns zero in these areas.
2906 				 */
2907 				dval = DP_WIDE|DP_DISC|DP_TQING;
2908 				oval = (mpt->mpt_port_page0.Capabilities >> 16);
2909 				pval = (mpt->mpt_port_page0.Capabilities >>  8);
2910 			}
2911 #ifndef	CAM_NEW_TRAN_CODE
2912 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2913 			if (dval & DP_DISC_ENABLE) {
2914 				cts->flags |= CCB_TRANS_DISC_ENB;
2915 			}
2916 			if (dval & DP_TQING_ENABLE) {
2917 				cts->flags |= CCB_TRANS_TAG_ENB;
2918 			}
2919 			if (dval & DP_WIDE) {
2920 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2921 			} else {
2922 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2923 			}
2924 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2925 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2926 			if (oval) {
2927 				cts->sync_period = pval;
2928 				cts->sync_offset = oval;
2929 				cts->valid |=
2930 				    CCB_TRANS_SYNC_RATE_VALID |
2931 				    CCB_TRANS_SYNC_OFFSET_VALID;
2932 			}
2933 #else
2934 			cts->protocol = PROTO_SCSI;
2935 			cts->protocol_version = SCSI_REV_2;
2936 			cts->transport = XPORT_SPI;
2937 			cts->transport_version = 2;
2938 
2939 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2940 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2941 			if (dval & DP_DISC_ENABLE) {
2942 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2943 			}
2944 			if (dval & DP_TQING_ENABLE) {
2945 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2946 			}
2947 			if (oval && pval) {
2948 				spi->sync_offset = oval;
2949 				spi->sync_period = pval;
2950 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2951 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2952 			}
2953 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2954 			if (dval & DP_WIDE) {
2955 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2956 			} else {
2957 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2958 			}
2959 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2960 				scsi->valid = CTS_SCSI_VALID_TQ;
2961 				spi->valid |= CTS_SPI_VALID_DISC;
2962 			} else {
2963 				scsi->valid = 0;
2964 			}
2965 #endif
2966 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2967 				 "GET %s tgt %d flags %x period %x offset %x\n",
2968 				 IS_CURRENT_SETTINGS(cts)
2969 			       ? "ACTIVE" : "NVRAM",
2970 				 tgt, dval, pval, oval);
2971 		}
2972 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2973 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2974 		xpt_done(ccb);
2975 		break;
2976 
2977 	case XPT_CALC_GEOMETRY:
2978 	{
2979 		struct ccb_calc_geometry *ccg;
2980 
2981 		ccg = &ccb->ccg;
2982 		if (ccg->block_size == 0) {
2983 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2984 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2985 			xpt_done(ccb);
2986 			break;
2987 		}
2988 		mpt_calc_geometry(ccg, /*extended*/1);
2989 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2990 		xpt_done(ccb);
2991 		break;
2992 	}
2993 	case XPT_PATH_INQ:		/* Path routing inquiry */
2994 	{
2995 		struct ccb_pathinq *cpi = &ccb->cpi;
2996 
2997 		cpi->version_num = 1;
2998 		cpi->target_sprt = 0;
2999 		cpi->hba_eng_cnt = 0;
3000 		cpi->max_lun = 7;
3001 		cpi->bus_id = cam_sim_bus(sim);
3002 		/* XXX Report base speed more accurately for FC/SAS, etc.*/
3003 		if (raid_passthru) {
3004 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks;
3005 			cpi->hba_misc = PIM_NOBUSRESET;
3006 			cpi->initiator_id = cpi->max_target + 1;
3007 			cpi->hba_inquiry = PI_TAG_ABLE;
3008 			if (mpt->is_fc) {
3009 				cpi->base_transfer_speed = 100000;
3010 			} else if (mpt->is_sas) {
3011 				cpi->base_transfer_speed = 300000;
3012 			} else {
3013 				cpi->base_transfer_speed = 3300;
3014 				cpi->hba_inquiry |=
3015 				    PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3016 			}
3017 		} else if (mpt->is_fc) {
3018 			/* XXX SHOULD BE BASED UPON IOC FACTS XXX XXX */
3019 			cpi->max_target = 255;
3020 			cpi->hba_misc = PIM_NOBUSRESET;
3021 			cpi->initiator_id = mpt->mpt_ini_id;
3022 			cpi->base_transfer_speed = 100000;
3023 			cpi->hba_inquiry = PI_TAG_ABLE;
3024 		} else if (mpt->is_sas) {
3025 			cpi->max_target = 63;	/* XXX */
3026 			cpi->hba_misc = PIM_NOBUSRESET;
3027 			cpi->initiator_id = mpt->mpt_ini_id;
3028 			cpi->base_transfer_speed = 300000;
3029 			cpi->hba_inquiry = PI_TAG_ABLE;
3030 		} else {
3031 			cpi->initiator_id = mpt->mpt_ini_id;
3032 			cpi->base_transfer_speed = 3300;
3033 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3034 			cpi->hba_misc = 0;
3035 			cpi->max_target = 15;
3036 		}
3037 
3038 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3039 			cpi->hba_misc |= PIM_NOINITIATOR;
3040 		}
3041 		if ((mpt->role & MPT_ROLE_TARGET) != 0) {
3042 			cpi->target_sprt =
3043 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3044 		} else {
3045 			cpi->target_sprt = 0;
3046 		}
3047 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3048 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3049 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3050 		cpi->unit_number = cam_sim_unit(sim);
3051 		cpi->ccb_h.status = CAM_REQ_CMP;
3052 		xpt_done(ccb);
3053 		break;
3054 	}
3055 	case XPT_EN_LUN:		/* Enable LUN as a target */
3056 	{
3057 		int result;
3058 
3059 		CAMLOCK_2_MPTLOCK(mpt);
3060 		if (ccb->cel.enable)
3061 			result = mpt_enable_lun(mpt,
3062 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3063 		else
3064 			result = mpt_disable_lun(mpt,
3065 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3066 		MPTLOCK_2_CAMLOCK(mpt);
3067 		if (result == 0) {
3068 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3069 		} else {
3070 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3071 		}
3072 		xpt_done(ccb);
3073 		break;
3074 	}
3075 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3076 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3077 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3078 	{
3079 		tgt_resource_t *trtp;
3080 		lun_id_t lun = ccb->ccb_h.target_lun;
3081 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3082 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3083 		ccb->ccb_h.flags = 0;
3084 
3085 		if (lun == CAM_LUN_WILDCARD) {
3086 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3087 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3088 				xpt_done(ccb);
3089 				break;
3090 			}
3091 			trtp = &mpt->trt_wildcard;
3092 		} else if (lun >= MPT_MAX_LUNS) {
3093 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3094 			xpt_done(ccb);
3095 			break;
3096 		} else {
3097 			trtp = &mpt->trt[lun];
3098 		}
3099 		CAMLOCK_2_MPTLOCK(mpt);
3100 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3101 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3102 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3103 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3104 			    sim_links.stqe);
3105 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3106 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3107 			    "Put FREE INOT lun %d\n", lun);
3108 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3109 			    sim_links.stqe);
3110 		} else {
3111 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3112 		}
3113 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3114 		MPTLOCK_2_CAMLOCK(mpt);
3115 		break;
3116 	}
3117 	case XPT_CONT_TARGET_IO:
3118 		CAMLOCK_2_MPTLOCK(mpt);
3119 		mpt_target_start_io(mpt, ccb);
3120 		MPTLOCK_2_CAMLOCK(mpt);
3121 		break;
3122 	default:
3123 		ccb->ccb_h.status = CAM_REQ_INVALID;
3124 		xpt_done(ccb);
3125 		break;
3126 	}
3127 }
3128 
3129 static int
3130 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3131 {
3132 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3133 	int rv;
3134 
3135 	tmp = mpt->mpt_dev_page1[tgt];
3136 	if (onoff) {
3137 		tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3138 	} else {
3139 		tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3140 	}
3141 	rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
3142 				    /*sleep_ok*/FALSE, /*timeout_ms*/5000);
3143 	if (rv) {
3144 		mpt_prt(mpt, "mpt_setwidth: write cur page failed\n");
3145 		return (-1);
3146 	}
3147 	rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
3148 				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
3149 	if (rv) {
3150 		mpt_prt(mpt, "mpt_setwidth: read cur page failed\n");
3151 		return (-1);
3152 	}
3153 	mpt->mpt_dev_page1[tgt] = tmp;
3154 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3155 		 "SPI Target %d Page 1: RequestedParameters %x Config %x\n",
3156 		 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
3157 		 mpt->mpt_dev_page1[tgt].Configuration);
3158 	return (0);
3159 }
3160 
3161 static int
3162 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3163 {
3164 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3165 	int rv;
3166 
3167 	tmp = mpt->mpt_dev_page1[tgt];
3168 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3169 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3170 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3171 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3172 	tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3173 	/*
3174 	 * XXX: For now, we're ignoring specific settings
3175 	 */
3176 	if (period && offset) {
3177 		int factor, offset, np;
3178 		factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
3179 		offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
3180 		np = 0;
3181 		if (factor < 0x9) {
3182 			np |= MPI_SCSIDEVPAGE1_RP_QAS;
3183 			np |= MPI_SCSIDEVPAGE1_RP_IU;
3184 		}
3185 		if (factor < 0xa) {
3186 			np |= MPI_SCSIDEVPAGE1_RP_DT;
3187 		}
3188 		np |= (factor << 8) | (offset << 16);
3189 		tmp.RequestedParameters |= np;
3190 	}
3191 	rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
3192 				    /*sleep_ok*/FALSE, /*timeout_ms*/5000);
3193 	if (rv) {
3194 		mpt_prt(mpt, "mpt_setsync: write cur page failed\n");
3195 		return (-1);
3196 	}
3197 	rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp),
3198 				   /*sleep_ok*/FALSE, /*timeout_ms*/500);
3199 	if (rv) {
3200 		mpt_prt(mpt, "mpt_setsync: read cur page failed\n");
3201 		return (-1);
3202 	}
3203 	mpt->mpt_dev_page1[tgt] = tmp;
3204 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3205 		 "SPI Target %d Page 1: RParams %x Config %x\n",
3206 		 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
3207 		 mpt->mpt_dev_page1[tgt].Configuration);
3208 	return (0);
3209 }
3210 
3211 static void
3212 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3213 {
3214 #if __FreeBSD_version >= 500000
3215 	cam_calc_geometry(ccg, extended);
3216 #else
3217 	uint32_t size_mb;
3218 	uint32_t secs_per_cylinder;
3219 
3220 	if (ccg->block_size == 0) {
3221 		ccg->ccb_h.status = CAM_REQ_INVALID;
3222 		return;
3223 	}
3224 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3225 	if (size_mb > 1024 && extended) {
3226 		ccg->heads = 255;
3227 		ccg->secs_per_track = 63;
3228 	} else {
3229 		ccg->heads = 64;
3230 		ccg->secs_per_track = 32;
3231 	}
3232 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3233 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3234 	ccg->ccb_h.status = CAM_REQ_CMP;
3235 #endif
3236 }
3237 
3238 /****************************** Timeout Recovery ******************************/
3239 static int
3240 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3241 {
3242 	int error;
3243 
3244 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3245 	    &mpt->recovery_thread, /*flags*/0,
3246 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3247 	return (error);
3248 }
3249 
3250 static void
3251 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3252 {
3253 	if (mpt->recovery_thread == NULL) {
3254 		return;
3255 	}
3256 	mpt->shutdwn_recovery = 1;
3257 	wakeup(mpt);
3258 	/*
3259 	 * Sleep on a slightly different location
3260 	 * for this interlock just for added safety.
3261 	 */
3262 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3263 }
3264 
3265 static void
3266 mpt_recovery_thread(void *arg)
3267 {
3268 	struct mpt_softc *mpt;
3269 
3270 #if __FreeBSD_version >= 500000
3271 	mtx_lock(&Giant);
3272 #endif
3273 	mpt = (struct mpt_softc *)arg;
3274 	MPT_LOCK(mpt);
3275 	for (;;) {
3276 
3277 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0
3278 		 && mpt->shutdwn_recovery == 0)
3279 			mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3280 
3281 		if (mpt->shutdwn_recovery != 0) {
3282 			break;
3283 		}
3284 		mpt_recover_commands(mpt);
3285 	}
3286 	mpt->recovery_thread = NULL;
3287 	wakeup(&mpt->recovery_thread);
3288 	MPT_UNLOCK(mpt);
3289 #if __FreeBSD_version >= 500000
3290 	mtx_unlock(&Giant);
3291 #endif
3292 	kthread_exit(0);
3293 }
3294 
3295 static int
3296 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3297     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3298 {
3299 	MSG_SCSI_TASK_MGMT *tmf_req;
3300 	int		    error;
3301 
3302 	/*
3303 	 * Wait for any current TMF request to complete.
3304 	 * We're only allowed to issue one TMF at a time.
3305 	 */
3306 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_MASK,
3307 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3308 	if (error != 0) {
3309 		mpt_reset(mpt, TRUE);
3310 		return (ETIMEDOUT);
3311 	}
3312 
3313 	if ((mpt->tmf_req->serno = mpt->sequence++) == 0) {
3314 		mpt->tmf_req->serno = mpt->sequence++;
3315 	}
3316 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3317 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3318 
3319 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3320 	memset(tmf_req, 0, sizeof(*tmf_req));
3321 	tmf_req->TargetID = target;
3322 	tmf_req->Bus = channel;
3323 	tmf_req->ChainOffset = 0;
3324 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3325 	tmf_req->Reserved = 0;
3326 	tmf_req->TaskType = type;
3327 	tmf_req->Reserved1 = 0;
3328 	tmf_req->MsgFlags = flags;
3329 	tmf_req->MsgContext =
3330 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3331 	memset(&tmf_req->LUN, 0,
3332 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
3333 	if (lun > 256) {
3334 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3335 		tmf_req->LUN[1] = lun & 0xff;
3336 	} else {
3337 		tmf_req->LUN[1] = lun;
3338 	}
3339 	tmf_req->TaskMsgContext = abort_ctx;
3340 
3341 	mpt_lprt(mpt, MPT_PRT_INFO,
3342 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3343 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3344 	if (mpt->verbose > MPT_PRT_DEBUG)
3345 		mpt_print_request(tmf_req);
3346 
3347 	if (mpt->is_fc || mpt->is_sas) {
3348 		mpt_send_cmd(mpt, mpt->tmf_req);
3349 		error = MPT_OK;
3350 	} else  {
3351 		error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3352 	}
3353 	if (error != MPT_OK) {
3354 		mpt_reset(mpt, TRUE);
3355 	}
3356 	return (error);
3357 }
3358 
3359 /*
3360  * When a command times out, it is placed on the requeust_timeout_list
3361  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3362  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3363  * the timedout transactions.  The next TMF is issued either by the
3364  * completion handler of the current TMF waking our recovery thread,
3365  * or the TMF timeout handler causing a hard reset sequence.
3366  */
3367 static void
3368 mpt_recover_commands(struct mpt_softc *mpt)
3369 {
3370 	request_t	   *req;
3371 	union ccb	   *ccb;
3372 	int		    error;
3373 
3374 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3375 		/*
3376 		 * No work to do- leave.
3377 		 */
3378 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3379 		return;
3380 	}
3381 
3382 	/*
3383 	 * Flush any commands whose completion coincides with their timeout.
3384 	 */
3385 	mpt_intr(mpt);
3386 
3387 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3388 		/*
3389 		 * The timedout commands have already
3390 		 * completed.  This typically means
3391 		 * that either the timeout value was on
3392 		 * the hairy edge of what the device
3393 		 * requires or - more likely - interrupts
3394 		 * are not happening.
3395 		 */
3396 		mpt_prt(mpt, "Timedout requests already complete. "
3397                        "Interrupts may not be functioning.\n");
3398                 mpt_enable_ints(mpt);
3399                 return;
3400 	}
3401 
3402 	/*
3403 	 * We have no visibility into the current state of the
3404 	 * controller, so attempt to abort the commands in the
3405 	 * order they timed-out.
3406 	 */
3407 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3408 		u_int status;
3409 
3410 		mpt_prt(mpt,
3411 		    "Attempting to abort req %p:%u\n", req, req->serno);
3412 		ccb = req->ccb;
3413 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3414 		error = mpt_scsi_send_tmf(mpt,
3415 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3416 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
3417 		    htole32(req->index | scsi_io_handler_id), TRUE);
3418 
3419 		if (error != 0) {
3420 			/*
3421 			 * mpt_scsi_send_tmf hard resets on failure, so no
3422 			 * need to do so here.  Our queue should be emptied
3423 			 * by the hard reset.
3424 			 */
3425 			continue;
3426 		}
3427 
3428 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
3429 		    REQ_STATE_DONE, TRUE, 500);
3430 
3431 		if (error != 0) {
3432 			/*
3433 			 * If we've errored out and the transaction is still
3434 			 * pending, reset the controller.
3435 			 */
3436 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
3437 				"Resetting controller\n");
3438 			mpt_reset(mpt, TRUE);
3439 			continue;
3440 		}
3441 
3442 		/*
3443 		 * TMF is complete.
3444 		 */
3445 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
3446 		mpt->tmf_req->state = REQ_STATE_FREE;
3447 
3448 		status = mpt->tmf_req->IOCStatus;
3449 		if ((status & MPI_IOCSTATUS_MASK) == MPI_SCSI_STATUS_SUCCESS) {
3450 			mpt_prt(mpt, "abort of req %p:%u completed\n",
3451 			    req, req->serno);
3452 			continue;
3453 		}
3454 
3455 		mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_recover_commands: abort of "
3456 		    "%p:%u failed with status 0x%x\n. Resetting controller.",
3457 		    req, req->serno, status);
3458 
3459 		/*
3460 		 * If the abort attempt fails for any reason, reset the bus.
3461 		 * We should find all of the timed-out commands on our
3462 		 * list are in the done state after this completes.
3463 		 */
3464 		mpt_bus_reset(mpt, TRUE);
3465 	}
3466 }
3467 
3468 /************************ Target Mode Support ****************************/
3469 static void
3470 mpt_fc_add_els(struct mpt_softc *mpt, request_t *req)
3471 {
3472 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3473 	PTR_SGE_TRANSACTION32 tep;
3474 	PTR_SGE_SIMPLE32 se;
3475 	bus_addr_t paddr;
3476 
3477 	paddr = req->req_pbuf;
3478 	paddr += MPT_RQSL(mpt);
3479 
3480 	fc = req->req_vbuf;
3481 	memset(fc, 0, MPT_REQUEST_AREA);
3482 	fc->BufferCount = 1;
3483 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3484 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
3485 
3486 	/*
3487 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
3488 	 * consist of a TE SGL element (with details length of zero)
3489 	 * followe by a SIMPLE SGL element which holds the address
3490 	 * of the buffer.
3491 	 */
3492 
3493 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3494 
3495 	tep->ContextSize = 4;
3496 	tep->Flags = 0;
3497 	tep->TransactionContext[0] = htole32(req->index | fc_els_handler_id);
3498 
3499 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3500 	se->FlagsLength =
3501 		MPI_SGE_FLAGS_HOST_TO_IOC	|
3502 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
3503 		MPI_SGE_FLAGS_LAST_ELEMENT	|
3504 		MPI_SGE_FLAGS_END_OF_LIST	|
3505 		MPI_SGE_FLAGS_END_OF_BUFFER;
3506 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
3507 	se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3508 	se->Address = (uint32_t) paddr;
3509 	mpt_check_doorbell(mpt);
3510 	mpt_send_cmd(mpt, req);
3511 }
3512 
3513 static void
3514 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3515 {
3516 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3517 	PTR_CMD_BUFFER_DESCRIPTOR cb;
3518 	bus_addr_t paddr;
3519 
3520 	paddr = req->req_pbuf;
3521 	paddr += MPT_RQSL(mpt);
3522 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3523 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
3524 
3525 	fc = req->req_vbuf;
3526 	fc->BufferCount = 1;
3527 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3528 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3529 
3530 	cb = &fc->Buffer[0];
3531 	cb->IoIndex = htole16(ioindex);
3532 	cb->u.PhysicalAddress32 = (U32) paddr;
3533 
3534 	mpt_check_doorbell(mpt);
3535 	mpt_send_cmd(mpt, req);
3536 }
3537 
3538 static void
3539 mpt_add_target_commands(struct mpt_softc *mpt)
3540 {
3541 	int i, max;
3542 
3543 	if (mpt->tgt_cmd_ptrs) {
3544 		return;
3545 	}
3546 
3547 	max = MPT_MAX_REQUESTS(mpt) >> 1;
3548 	if (max > mpt->mpt_max_tgtcmds) {
3549 		max = mpt->mpt_max_tgtcmds;
3550 	}
3551 	mpt->tgt_cmd_ptrs =
3552 	    malloc(max * sizeof (void *), M_DEVBUF, M_NOWAIT | M_ZERO);
3553 	if (mpt->tgt_cmd_ptrs == NULL) {
3554 		mpt_prt(mpt, "could not allocate cmdptrs\n");
3555 		return;
3556 	}
3557 	mpt->tgt_cmds_allocated = max;
3558 
3559 	for (i = 0; i < max; i++) {
3560 		request_t *req;
3561 
3562 		req = mpt_get_request(mpt, FALSE);
3563 		if (req == NULL) {
3564 			break;
3565 		}
3566 		req->state |= REQ_STATE_LOCKED;
3567 		mpt->tgt_cmd_ptrs[i] = req;
3568 		mpt_post_target_command(mpt, req, i);
3569 	}
3570 
3571 	if (i == 0) {
3572 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
3573 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
3574 		mpt->tgt_cmd_ptrs = NULL;
3575 		mpt->tgt_cmds_allocated = 0;
3576 	} else if (i < max) {
3577 		mpt_lprt(mpt, MPT_PRT_WARN, "added %d of %d target bufs\n",
3578 		    i, max);
3579 	}
3580 }
3581 
3582 static int
3583 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3584 {
3585 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3586 		mpt->twildcard = 1;
3587 	} else if (lun >= MPT_MAX_LUNS) {
3588 		return (EINVAL);
3589 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3590 		return (EINVAL);
3591 	}
3592 	if (mpt->tenabled == 0) {
3593 #if	0
3594 		if (mpt->is_fc) {
3595 			(void) mpt_fc_reset_link(mpt, 0);
3596 		}
3597 #endif
3598 		mpt->tenabled = 1;
3599 	}
3600 	if (lun == CAM_LUN_WILDCARD) {
3601 		mpt->trt_wildcard.enabled = 1;
3602 	} else {
3603 		mpt->trt[lun].enabled = 1;
3604 	}
3605 	return (0);
3606 }
3607 
3608 static int
3609 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3610 {
3611 	int i;
3612 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3613 		mpt->twildcard = 0;
3614 	} else if (lun >= MPT_MAX_LUNS) {
3615 		return (EINVAL);
3616 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3617 		return (EINVAL);
3618 	}
3619 	if (lun == CAM_LUN_WILDCARD) {
3620 		mpt->trt_wildcard.enabled = 0;
3621 	} else {
3622 		mpt->trt[lun].enabled = 0;
3623 	}
3624 	for (i = 0; i < MPT_MAX_LUNS; i++) {
3625 		if (mpt->trt[lun].enabled) {
3626 			break;
3627 		}
3628 	}
3629 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
3630 		mpt->tenabled = 0;
3631 #if	0
3632 		if (mpt->is_fc) {
3633 			(void) mpt_fc_reset_link(mpt, 0);
3634 		}
3635 #endif
3636 	}
3637 	return (0);
3638 }
3639 
3640 /*
3641  * Called with MPT lock held
3642  */
3643 static void
3644 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
3645 {
3646 	struct ccb_scsiio *csio = &ccb->csio;
3647 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
3648 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
3649 
3650 	if (tgt->state != TGT_STATE_IN_CAM) {
3651 		mpt_prt(mpt, "ccb flags 0x%x tag 0x%08x had bad request "
3652 		    "starting I/O\n", csio->ccb_h.flags, csio->tag_id);
3653 		mpt_tgt_dump_req_state(mpt, cmd_req);
3654 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3655 		MPTLOCK_2_CAMLOCK(mpt);
3656 		xpt_done(ccb);
3657 		CAMLOCK_2_MPTLOCK(mpt);
3658 		return;
3659 	}
3660 
3661 	if (csio->dxfer_len) {
3662 		bus_dmamap_callback_t *cb;
3663 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
3664 		request_t *req;
3665 
3666 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
3667 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
3668 
3669 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
3670 			if (mpt->outofbeer == 0) {
3671 				mpt->outofbeer = 1;
3672 				xpt_freeze_simq(mpt->sim, 1);
3673 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
3674 			}
3675 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3676 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3677 			MPTLOCK_2_CAMLOCK(mpt);
3678 			xpt_done(ccb);
3679 			CAMLOCK_2_MPTLOCK(mpt);
3680 			return;
3681 		}
3682 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
3683 		if (sizeof (bus_addr_t) > 4) {
3684 			cb = mpt_execute_req_a64;
3685 		} else {
3686 			cb = mpt_execute_req;
3687 		}
3688 
3689 		req->ccb = ccb;
3690 		ccb->ccb_h.ccb_req_ptr = req;
3691 
3692 		/*
3693 		 * Record the currently active ccb and the
3694 		 * request for it in our target state area.
3695 		 */
3696 		tgt->ccb = ccb;
3697 		tgt->req = req;
3698 
3699 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
3700 		ta = req->req_vbuf;
3701 
3702 		if (mpt->is_fc) {
3703 			;
3704 		} else if (mpt->is_sas == 0) {
3705 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
3706 			     cmd_req->req_vbuf;
3707 			ta->QueueTag = ssp->InitiatorTag;
3708 		} else {
3709 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
3710 			     cmd_req->req_vbuf;
3711 			ta->QueueTag = sp->Tag;
3712 		}
3713 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
3714 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3715 		ta->ReplyWord = htole32(tgt->reply_desc);
3716 		if (csio->ccb_h.target_lun > 256) {
3717 			ta->LUN[0] =
3718 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
3719 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
3720 		} else {
3721 			ta->LUN[1] = csio->ccb_h.target_lun;
3722 		}
3723 
3724 		ta->RelativeOffset = tgt->bytes_xfered;
3725 		ta->DataLength = ccb->csio.dxfer_len;
3726 		if (ta->DataLength > tgt->resid) {
3727 			ta->DataLength = tgt->resid;
3728 		}
3729 
3730 		/*
3731 		 * XXX Should be done after data transfer completes?
3732 		 */
3733 		tgt->resid -= csio->dxfer_len;
3734 		tgt->bytes_xfered += csio->dxfer_len;
3735 
3736 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
3737 			ta->TargetAssistFlags |=
3738 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
3739 		}
3740 
3741 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
3742 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
3743 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
3744 			ta->TargetAssistFlags |=
3745 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
3746 		}
3747 #endif
3748 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
3749 
3750 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3751 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
3752 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
3753 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
3754 
3755 		MPTLOCK_2_CAMLOCK(mpt);
3756 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
3757 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
3758 				int error;
3759 				int s = splsoftvm();
3760 				error = bus_dmamap_load(mpt->buffer_dmat,
3761 				    req->dmap, csio->data_ptr, csio->dxfer_len,
3762 				    cb, req, 0);
3763 				splx(s);
3764 				if (error == EINPROGRESS) {
3765 					xpt_freeze_simq(mpt->sim, 1);
3766 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3767 				}
3768 			} else {
3769 				/*
3770 				 * We have been given a pointer to single
3771 				 * physical buffer.
3772 				 */
3773 				struct bus_dma_segment seg;
3774 				seg.ds_addr = (bus_addr_t)
3775 				    (vm_offset_t)csio->data_ptr;
3776 				seg.ds_len = csio->dxfer_len;
3777 				(*cb)(req, &seg, 1, 0);
3778 			}
3779 		} else {
3780 			/*
3781 			 * We have been given a list of addresses.
3782 			 * This case could be easily supported but they are not
3783 			 * currently generated by the CAM subsystem so there
3784 			 * is no point in wasting the time right now.
3785 			 */
3786 			struct bus_dma_segment *sgs;
3787 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
3788 				(*cb)(req, NULL, 0, EFAULT);
3789 			} else {
3790 				/* Just use the segments provided */
3791 				sgs = (struct bus_dma_segment *)csio->data_ptr;
3792 				(*cb)(req, sgs, csio->sglist_cnt, 0);
3793 			}
3794 		}
3795 		CAMLOCK_2_MPTLOCK(mpt);
3796 	} else {
3797 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
3798 
3799 		/*
3800 		 * XXX: I don't know why this seems to happen, but
3801 		 * XXX: completing the CCB seems to make things happy.
3802 		 * XXX: This seems to happen if the initiator requests
3803 		 * XXX: enough data that we have to do multiple CTIOs.
3804 		 */
3805 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
3806 			mpt_lprt(mpt, MPT_PRT_DEBUG,
3807 			    "Meaningless STATUS CCB (%p): flags %x status %x "
3808 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
3809 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
3810 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3811 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3812 			MPTLOCK_2_CAMLOCK(mpt);
3813 			xpt_done(ccb);
3814 			CAMLOCK_2_MPTLOCK(mpt);
3815 			return;
3816 		}
3817 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
3818 			sp = sense;
3819 			memcpy(sp, &csio->sense_data,
3820 			   min(csio->sense_len, MPT_SENSE_SIZE));
3821 		}
3822 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
3823 	}
3824 }
3825 
3826 /*
3827  * Abort queued up CCBs
3828  */
3829 static cam_status
3830 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
3831 {
3832 	struct mpt_hdr_stailq *lp;
3833 	struct ccb_hdr *srch;
3834 	int found = 0;
3835 	union ccb *accb = ccb->cab.abort_ccb;
3836 	tgt_resource_t *trtp;
3837 
3838 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
3839 
3840 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
3841 		trtp = &mpt->trt_wildcard;
3842 	} else {
3843 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
3844 	}
3845 
3846 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3847 		lp = &trtp->atios;
3848 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3849 		lp = &trtp->inots;
3850 	} else {
3851 		return (CAM_REQ_INVALID);
3852 	}
3853 
3854 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
3855 		if (srch == &accb->ccb_h) {
3856 			found = 1;
3857 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
3858 			break;
3859 		}
3860 	}
3861 	if (found) {
3862 		accb->ccb_h.status = CAM_REQ_ABORTED;
3863 		xpt_done(accb);
3864 		return (CAM_REQ_CMP);
3865 	}
3866 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
3867 	return (CAM_PATH_INVALID);
3868 }
3869 
3870 /*
3871  * Ask the MPT to abort the current target command
3872  */
3873 static int
3874 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
3875 {
3876 	int error;
3877 	request_t *req;
3878 	PTR_MSG_TARGET_MODE_ABORT abtp;
3879 
3880 	req = mpt_get_request(mpt, FALSE);
3881 	if (req == NULL) {
3882 		return (-1);
3883 	}
3884 	abtp = req->req_vbuf;
3885 	memset(abtp, 0, sizeof (*abtp));
3886 
3887 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3888 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
3889 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
3890 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
3891 	error = 0;
3892 	if (mpt->is_fc || mpt->is_sas) {
3893 		mpt_send_cmd(mpt, req);
3894 	} else {
3895 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
3896 	}
3897 	return (error);
3898 }
3899 
3900 /*
3901  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
3902  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
3903  * FC929 to set bogus FC_RSP fields (nonzero residuals
3904  * but w/o RESID fields set). This causes QLogic initiators
3905  * to think maybe that a frame was lost.
3906  *
3907  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
3908  * we use allocated requests to do TARGET_ASSIST and we
3909  * need to know when to release them.
3910  */
3911 
3912 static void
3913 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
3914     uint8_t status, uint8_t const *sense_data)
3915 {
3916 	uint8_t *cmd_vbuf;
3917 	mpt_tgt_state_t *tgt;
3918 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
3919 	request_t *req;
3920 	bus_addr_t paddr;
3921 	int resplen = 0;
3922 
3923 	cmd_vbuf = cmd_req->req_vbuf;
3924 	cmd_vbuf += MPT_RQSL(mpt);
3925 	tgt = MPT_TGT_STATE(mpt, cmd_req);
3926 
3927 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
3928 		if (mpt->outofbeer == 0) {
3929 			mpt->outofbeer = 1;
3930 			xpt_freeze_simq(mpt->sim, 1);
3931 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
3932 		}
3933 		if (ccb) {
3934 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3935 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3936 			MPTLOCK_2_CAMLOCK(mpt);
3937 			xpt_done(ccb);
3938 			CAMLOCK_2_MPTLOCK(mpt);
3939 		} else {
3940 			mpt_prt(mpt,
3941 			    "XXXX could not allocate status req- dropping\n");
3942 		}
3943 		return;
3944 	}
3945 	req->ccb = ccb;
3946 	if (ccb) {
3947 		ccb->ccb_h.ccb_mpt_ptr = mpt;
3948 		ccb->ccb_h.ccb_req_ptr = req;
3949 	}
3950 
3951 	/*
3952 	 * Record the currently active ccb, if any, and the
3953 	 * request for it in our target state area.
3954 	 */
3955 	tgt->ccb = ccb;
3956 	tgt->req = req;
3957 	tgt->state = TGT_STATE_SENDING_STATUS;
3958 
3959 	tp = req->req_vbuf;
3960 	paddr = req->req_pbuf;
3961 	paddr += MPT_RQSL(mpt);
3962 
3963 	memset(tp, 0, sizeof (*tp));
3964 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
3965 	if (mpt->is_fc) {
3966 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
3967 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
3968 		uint8_t *sts_vbuf;
3969 		uint32_t *rsp;
3970 
3971 		sts_vbuf = req->req_vbuf;
3972 		sts_vbuf += MPT_RQSL(mpt);
3973 		rsp = (uint32_t *) sts_vbuf;
3974 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
3975 
3976 		/*
3977 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
3978 		 * It has to be big-endian in memory and is organized
3979 		 * in 32 bit words, which are much easier to deal with
3980 		 * as words which are swizzled as needed.
3981 		 *
3982 		 * All we're filling here is the FC_RSP payload.
3983 		 * We may just have the chip synthesize it if
3984 		 * we have no residual and an OK status.
3985 		 *
3986 		 */
3987 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
3988 
3989 		rsp[2] = status;
3990 		if (tgt->resid) {
3991 			rsp[2] |= 0x800;
3992 			rsp[3] = htobe32(tgt->resid);
3993 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
3994 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
3995 #endif
3996 		}
3997 		if (status == SCSI_STATUS_CHECK_COND) {
3998 			int i;
3999 
4000 			rsp[2] |= 0x200;
4001 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4002 			memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4003 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4004 				rsp[i] = htobe32(rsp[i]);
4005 			}
4006 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4007 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4008 #endif
4009 		}
4010 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4011 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4012 #endif
4013 		rsp[2] = htobe32(rsp[2]);
4014 	} else if (mpt->is_sas) {
4015 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4016 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4017 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4018 	} else {
4019 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4020 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4021 		tp->StatusCode = status;
4022 		tp->QueueTag = htole16(sp->Tag);
4023 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4024 	}
4025 
4026 	tp->ReplyWord = htole32(tgt->reply_desc);
4027 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4028 
4029 #ifdef	WE_CAN_USE_AUTO_REPOST
4030 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4031 #endif
4032 	if (status == SCSI_STATUS_OK && resplen == 0) {
4033 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4034 	} else {
4035 		tp->StatusDataSGE.u.Address32 = (uint32_t) paddr;
4036 		tp->StatusDataSGE.FlagsLength =
4037 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4038 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4039 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4040 			MPI_SGE_FLAGS_END_OF_LIST	|
4041 			MPI_SGE_FLAGS_END_OF_BUFFER;
4042 		tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
4043 		tp->StatusDataSGE.FlagsLength |= resplen;
4044 	}
4045 
4046 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4047 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4048 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4049 	    req->serno, tgt->resid);
4050 	if (ccb) {
4051 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4052 		ccb->ccb_h.timeout_ch = timeout(mpt_timeout, (caddr_t)ccb, hz);
4053 	}
4054 	mpt_send_cmd(mpt, req);
4055 }
4056 
4057 static void
4058 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4059     tgt_resource_t *trtp, int init_id)
4060 {
4061 	struct ccb_immed_notify *inot;
4062 	mpt_tgt_state_t *tgt;
4063 
4064 	tgt = MPT_TGT_STATE(mpt, req);
4065 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4066 	if (inot == NULL) {
4067 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4068 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4069 		return;
4070 	}
4071 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4072 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4073 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4074 
4075 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4076 	inot->sense_len = 0;
4077 	memset(inot->message_args, 0, sizeof (inot->message_args));
4078 	inot->initiator_id = init_id;	/* XXX */
4079 
4080 	/*
4081 	 * This is a somewhat grotesque attempt to map from task management
4082 	 * to old style SCSI messages. God help us all.
4083 	 */
4084 	switch (fc) {
4085 	case MPT_ABORT_TASK_SET:
4086 		inot->message_args[0] = MSG_ABORT_TAG;
4087 		break;
4088 	case MPT_CLEAR_TASK_SET:
4089 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4090 		break;
4091 	case MPT_TARGET_RESET:
4092 		inot->message_args[0] = MSG_TARGET_RESET;
4093 		break;
4094 	case MPT_CLEAR_ACA:
4095 		inot->message_args[0] = MSG_CLEAR_ACA;
4096 		break;
4097 	case MPT_TERMINATE_TASK:
4098 		inot->message_args[0] = MSG_ABORT_TAG;
4099 		break;
4100 	default:
4101 		inot->message_args[0] = MSG_NOOP;
4102 		break;
4103 	}
4104 	tgt->ccb = (union ccb *) inot;
4105 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4106         MPTLOCK_2_CAMLOCK(mpt);
4107 	xpt_done((union ccb *)inot);
4108         CAMLOCK_2_MPTLOCK(mpt);
4109 }
4110 
4111 static void
4112 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4113 {
4114 	struct ccb_accept_tio *atiop;
4115 	lun_id_t lun;
4116 	int tag_action = 0;
4117 	mpt_tgt_state_t *tgt;
4118 	tgt_resource_t *trtp;
4119 	U8 *lunptr;
4120 	U8 *vbuf;
4121 	U16 itag;
4122 	U16 ioindex;
4123 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4124 	uint8_t *cdbp;
4125 
4126 	/*
4127 	 * First, DMA sync the received command- which is in the *request*
4128 	 * phys area.
4129 	 * XXX: We could optimize this for a range
4130 	 */
4131 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
4132             BUS_DMASYNC_POSTREAD);
4133 
4134 	/*
4135 	 * Stash info for the current command where we can get at it later.
4136 	 */
4137 	vbuf = req->req_vbuf;
4138 	vbuf += MPT_RQSL(mpt);
4139 
4140 	/*
4141 	 * Get our state pointer set up.
4142 	 */
4143 	tgt = MPT_TGT_STATE(mpt, req);
4144 	if (tgt->state != TGT_STATE_LOADED) {
4145 		mpt_tgt_dump_req_state(mpt, req);
4146 		panic("bad target state in mpt_scsi_tgt_atio");
4147 	}
4148 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4149 	tgt->state = TGT_STATE_IN_CAM;
4150 	tgt->reply_desc = reply_desc;
4151 	ioindex = GET_IO_INDEX(reply_desc);
4152 
4153 	if (mpt->is_fc) {
4154 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4155 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4156 		if (fc->FcpCntl[2]) {
4157 			/*
4158 			 * Task Management Request
4159 			 */
4160 			switch (fc->FcpCntl[2]) {
4161 			case 0x2:
4162 				fct = MPT_ABORT_TASK_SET;
4163 				break;
4164 			case 0x4:
4165 				fct = MPT_CLEAR_TASK_SET;
4166 				break;
4167 			case 0x20:
4168 				fct = MPT_TARGET_RESET;
4169 				break;
4170 			case 0x40:
4171 				fct = MPT_CLEAR_ACA;
4172 				break;
4173 			case 0x80:
4174 				fct = MPT_TERMINATE_TASK;
4175 				break;
4176 			default:
4177 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4178 				    fc->FcpCntl[2]);
4179 				mpt_scsi_tgt_status(mpt, 0, req,
4180 				    SCSI_STATUS_OK, 0);
4181 				return;
4182 			}
4183 			return;
4184 		}
4185 		switch (fc->FcpCntl[1]) {
4186 		case 0:
4187 			tag_action = MSG_SIMPLE_Q_TAG;
4188 			break;
4189 		case 1:
4190 			tag_action = MSG_HEAD_OF_Q_TAG;
4191 			break;
4192 		case 2:
4193 			tag_action = MSG_ORDERED_Q_TAG;
4194 			break;
4195 		default:
4196 			/*
4197 			 * Bah. Ignore Untagged Queing and ACA
4198 			 */
4199 			tag_action = MSG_SIMPLE_Q_TAG;
4200 			break;
4201 		}
4202 		tgt->resid = be32toh(fc->FcpDl);
4203 		cdbp = fc->FcpCdb;
4204 		lunptr = fc->FcpLun;
4205 		itag = be16toh(fc->OptionalOxid);
4206 	} else if (mpt->is_sas) {
4207 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4208 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4209 		cdbp = ssp->CDB;
4210 		lunptr = ssp->LogicalUnitNumber;
4211 		itag = ssp->InitiatorTag;
4212 	} else {
4213 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4214 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4215 		cdbp = sp->CDB;
4216 		lunptr = sp->LogicalUnitNumber;
4217 		itag = sp->Tag;
4218 	}
4219 
4220 	/*
4221 	 * Generate a simple lun
4222 	 */
4223 	switch (lunptr[0] & 0xc0) {
4224 	case 0x40:
4225 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4226 		break;
4227 	case 0:
4228 		lun = lunptr[1];
4229 		break;
4230 	default:
4231 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4232 		lun = 0xffff;
4233 		break;
4234 	}
4235 
4236 	/*
4237 	 * Deal with non-enabled or bad luns here.
4238 	 */
4239 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4240 	    mpt->trt[lun].enabled == 0) {
4241 		if (mpt->twildcard) {
4242 			trtp = &mpt->trt_wildcard;
4243 		} else {
4244 			const uint8_t sp[MPT_SENSE_SIZE] = {
4245 				0xf0, 0, 0x5, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0x25
4246 			};
4247 			mpt_scsi_tgt_status(mpt, NULL, req,
4248 			    SCSI_STATUS_CHECK_COND, sp);
4249 			return;
4250 		}
4251 	} else {
4252 		trtp = &mpt->trt[lun];
4253 	}
4254 
4255 	if (fct != MPT_NIL_TMT_VALUE) {
4256 		/* undo any tgt residual settings */
4257 		tgt->resid = 0;
4258 		mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
4259 		    GET_INITIATOR_INDEX(reply_desc));
4260 		return;
4261 	}
4262 
4263 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
4264 	if (atiop == NULL) {
4265 		mpt_lprt(mpt, MPT_PRT_WARN,
4266 		    "no ATIOs for lun %u- sending back %s\n", lun,
4267 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
4268 		mpt_scsi_tgt_status(mpt, NULL, req,
4269 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
4270 		    NULL);
4271 		return;
4272 	}
4273 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
4274 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4275 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
4276 	atiop->ccb_h.ccb_mpt_ptr = mpt;
4277         atiop->ccb_h.status = CAM_CDB_RECVD;
4278 	atiop->ccb_h.target_lun = lun;
4279 	atiop->sense_len = 0;
4280         atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
4281         atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
4282         memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
4283 
4284 	/*
4285 	 * The tag we construct here allows us to find the
4286 	 * original request that the command came in with.
4287 	 *
4288 	 * This way we don't have to depend on anything but the
4289 	 * tag to find things when CCBs show back up from CAM.
4290 	 */
4291         atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4292 	tgt->tag_id = atiop->tag_id;
4293 	if (tag_action) {
4294                 atiop->tag_action = tag_action;
4295 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
4296 	}
4297 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4298 		int i;
4299 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
4300 		    atiop->ccb_h.target_lun);
4301 		for (i = 0; i < atiop->cdb_len; i++) {
4302 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
4303 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
4304 		}
4305 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
4306 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
4307 	}
4308 	tgt->ccb = (union ccb *) atiop;
4309 
4310         MPTLOCK_2_CAMLOCK(mpt);
4311 	xpt_done((union ccb *)atiop);
4312         CAMLOCK_2_MPTLOCK(mpt);
4313 }
4314 
4315 static void
4316 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
4317 {
4318 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4319 
4320 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
4321 	    "nx %d tag %08x state=%d\n", req, req->serno, tgt->reply_desc,
4322 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
4323 	    tgt->tag_id, tgt->state);
4324 }
4325 
4326 static void
4327 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
4328 {
4329 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
4330 	    req->index, req->index, req->state);
4331 	mpt_tgt_dump_tgt_state(mpt, req);
4332 }
4333 
4334 static int
4335 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
4336     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4337 {
4338 	int dbg;
4339 	union ccb *ccb;
4340 	U16 status;
4341 
4342 	if (reply_frame == NULL) {
4343 		/*
4344 		 * Figure out if this is a new command or a target assist
4345 		 * completing.
4346 		 */
4347 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4348 		char serno[8];
4349 
4350 		if (tgt->req) {
4351 			snprintf(serno, 8, "%u", tgt->req->serno);
4352 		} else {
4353 			strncpy(serno, "??", 8);
4354 		}
4355 
4356 		switch(tgt->state) {
4357 		case TGT_STATE_LOADED:
4358 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
4359 			break;
4360 		case TGT_STATE_MOVING_DATA:
4361 		{
4362 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4363 
4364 			ccb = tgt->ccb;
4365 			tgt->ccb = NULL;
4366 			tgt->nxfers++;
4367 			untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
4368 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4369 			    "TARGET_ASSIST %p (req %p:%s) done tag 0x%x\n",
4370 			    ccb, tgt->req, serno, ccb->csio.tag_id);
4371 			/*
4372 			 * Free the Target Assist Request
4373 			 */
4374 			KASSERT(tgt->req && tgt->req->ccb == ccb,
4375 			    ("tgt->req %p:%s tgt->req->ccb %p", tgt->req,
4376 			    serno, tgt->req? tgt->req->ccb : NULL));
4377 			mpt_free_request(mpt, tgt->req);
4378 			tgt->req = NULL;
4379 			/*
4380 			 * Do we need to send status now? That is, are
4381 			 * we done with all our data transfers?
4382 			 */
4383 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4384 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4385 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4386 				KASSERT(ccb->ccb_h.status,
4387 				    ("zero ccb sts at %d\n", __LINE__));
4388 				tgt->state = TGT_STATE_IN_CAM;
4389 				if (mpt->outofbeer) {
4390 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4391 					mpt->outofbeer = 0;
4392 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4393 				}
4394 				MPTLOCK_2_CAMLOCK(mpt);
4395 				xpt_done(ccb);
4396 				CAMLOCK_2_MPTLOCK(mpt);
4397 				break;
4398 			}
4399 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4400 				sp = sense;
4401 				memcpy(sp, &ccb->csio.sense_data,
4402 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
4403 			}
4404 			mpt_scsi_tgt_status(mpt, ccb, req,
4405 			    ccb->csio.scsi_status, sp);
4406 			break;
4407 		}
4408 		case TGT_STATE_SENDING_STATUS:
4409 		case TGT_STATE_MOVING_DATA_AND_STATUS:
4410 		{
4411 			int ioindex;
4412 			ccb = tgt->ccb;
4413 
4414 			if (ccb) {
4415 				tgt->ccb = NULL;
4416 				tgt->nxfers++;
4417 				untimeout(mpt_timeout, ccb,
4418 				    ccb->ccb_h.timeout_ch);
4419 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4420 					ccb->ccb_h.status |= CAM_SENT_SENSE;
4421 				}
4422 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4423 				    "TARGET_STATUS tag %x sts %x flgs %x req "
4424 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
4425 				    ccb->ccb_h.flags, tgt->req);
4426 				/*
4427 				 * Free the Target Send Status Request
4428 				 */
4429 				KASSERT(tgt->req && tgt->req->ccb == ccb,
4430 				    ("tgt->req %p:%s tgt->req->ccb %p",
4431 				    tgt->req, serno,
4432 				    tgt->req? tgt->req->ccb : NULL));
4433 				/*
4434 				 * Notify CAM that we're done
4435 				 */
4436 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4437 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4438 				KASSERT(ccb->ccb_h.status,
4439 				    ("ZERO ccb sts at %d\n", __LINE__));
4440 				tgt->ccb = NULL;
4441 			} else {
4442 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4443 				    "TARGET_STATUS non-CAM for  req %p:%s\n",
4444 				    tgt->req, serno);
4445 			}
4446 			mpt_free_request(mpt, tgt->req);
4447 			tgt->req = NULL;
4448 
4449 			/*
4450 			 * And re-post the Command Buffer.
4451 			 */
4452 			ioindex = GET_IO_INDEX(reply_desc);
4453 			mpt_post_target_command(mpt, req, ioindex);
4454 
4455 			/*
4456 			 * And post a done for anyone who cares
4457 			 */
4458 			if (ccb) {
4459 				if (mpt->outofbeer) {
4460 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4461 					mpt->outofbeer = 0;
4462 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4463 				}
4464 				MPTLOCK_2_CAMLOCK(mpt);
4465 				xpt_done(ccb);
4466 				CAMLOCK_2_MPTLOCK(mpt);
4467 			}
4468 			break;
4469 		}
4470 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
4471 			tgt->state = TGT_STATE_LOADED;
4472 			break;
4473 		default:
4474 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
4475 			    "Reply Function\n", tgt->state);
4476 		}
4477 		return (TRUE);
4478 	}
4479 
4480 	status = le16toh(reply_frame->IOCStatus);
4481 	if (status != MPI_IOCSTATUS_SUCCESS) {
4482 		dbg = MPT_PRT_ERROR;
4483 	} else {
4484 		dbg = MPT_PRT_DEBUG1;
4485 	}
4486 
4487 	mpt_lprt(mpt, dbg,
4488 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
4489 	     req, req->serno, reply_frame, reply_frame->Function, status);
4490 
4491 	switch (reply_frame->Function) {
4492 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
4493 	{
4494 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4495 		KASSERT(tgt->state == TGT_STATE_LOADING,
4496 		    ("bad state 0%x on reply to buffer post\n", tgt->state));
4497 		if ((req->serno = mpt->sequence++) == 0) {
4498 			req->serno = mpt->sequence++;
4499 		}
4500 		tgt->state = TGT_STATE_LOADED;
4501 		break;
4502 	}
4503 	case MPI_FUNCTION_TARGET_ASSIST:
4504 		mpt_free_request(mpt, req);
4505 		break;
4506 	case MPI_FUNCTION_TARGET_STATUS_SEND:
4507 		mpt_free_request(mpt, req);
4508 		break;
4509 	case MPI_FUNCTION_TARGET_MODE_ABORT:
4510 	{
4511 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
4512 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
4513 		PTR_MSG_TARGET_MODE_ABORT abtp =
4514 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
4515 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
4516 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
4517 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
4518 		mpt_free_request(mpt, req);
4519 		break;
4520 	}
4521 	default:
4522 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
4523 		    "0x%x\n", reply_frame->Function);
4524 		break;
4525 	}
4526 	return (TRUE);
4527 }
4528