xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 7dfd9569a2f0637fb9a48157b1c1bfe5709faee3)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  */
61 /*-
62  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
63  * Copyright (c) 2005, WHEEL Sp. z o.o.
64  * Copyright (c) 2004, 2005 Justin T. Gibbs
65  * All rights reserved.
66  *
67  * Redistribution and use in source and binary forms, with or without
68  * modification, are permitted provided that the following conditions are
69  * met:
70  * 1. Redistributions of source code must retain the above copyright
71  *    notice, this list of conditions and the following disclaimer.
72  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
73  *    substantially similar to the "NO WARRANTY" disclaimer below
74  *    ("Disclaimer") and any redistribution must be conditioned upon including
75  *    a substantially similar Disclaimer requirement for further binary
76  *    redistribution.
77  * 3. Neither the names of the above listed copyright holders nor the names
78  *    of any contributors may be used to endorse or promote products derived
79  *    from this software without specific prior written permission.
80  *
81  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
82  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
83  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
84  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
85  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
86  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
87  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
91  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
92  */
93 #include <sys/cdefs.h>
94 __FBSDID("$FreeBSD$");
95 
96 #include <dev/mpt/mpt.h>
97 #include <dev/mpt/mpt_cam.h>
98 #include <dev/mpt/mpt_raid.h>
99 
100 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
101 #include "dev/mpt/mpilib/mpi_init.h"
102 #include "dev/mpt/mpilib/mpi_targ.h"
103 #include "dev/mpt/mpilib/mpi_fc.h"
104 
105 #include <sys/callout.h>
106 #include <sys/kthread.h>
107 
108 static void mpt_poll(struct cam_sim *);
109 static timeout_t mpt_timeout;
110 static void mpt_action(struct cam_sim *, union ccb *);
111 static int
112 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
113 static void mpt_setwidth(struct mpt_softc *, int, int);
114 static void mpt_setsync(struct mpt_softc *, int, int, int);
115 static int mpt_update_spi_config(struct mpt_softc *, int);
116 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
117 static mpt_reply_handler_t mpt_scsi_reply_handler;
118 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
119 static mpt_reply_handler_t mpt_fc_els_reply_handler;
120 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
121 					MSG_DEFAULT_REPLY *);
122 static int mpt_bus_reset(struct mpt_softc *, int);
123 static int mpt_fc_reset_link(struct mpt_softc *, int);
124 
125 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
126 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
127 static void mpt_recovery_thread(void *arg);
128 static void mpt_recover_commands(struct mpt_softc *mpt);
129 
130 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
131     u_int, u_int, u_int, int);
132 
133 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
134 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
135 static int mpt_add_els_buffers(struct mpt_softc *mpt);
136 static int mpt_add_target_commands(struct mpt_softc *mpt);
137 static void mpt_free_els_buffers(struct mpt_softc *mpt);
138 static void mpt_free_target_commands(struct mpt_softc *mpt);
139 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
140 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
141 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
142 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
143 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
144 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
145     uint8_t, uint8_t const *);
146 static void
147 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
148     tgt_resource_t *, int);
149 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
150 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
151 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
152 
153 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
154 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
155 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
156 
157 static mpt_probe_handler_t	mpt_cam_probe;
158 static mpt_attach_handler_t	mpt_cam_attach;
159 static mpt_enable_handler_t	mpt_cam_enable;
160 static mpt_event_handler_t	mpt_cam_event;
161 static mpt_reset_handler_t	mpt_cam_ioc_reset;
162 static mpt_detach_handler_t	mpt_cam_detach;
163 
164 static struct mpt_personality mpt_cam_personality =
165 {
166 	.name		= "mpt_cam",
167 	.probe		= mpt_cam_probe,
168 	.attach		= mpt_cam_attach,
169 	.enable		= mpt_cam_enable,
170 	.event		= mpt_cam_event,
171 	.reset		= mpt_cam_ioc_reset,
172 	.detach		= mpt_cam_detach,
173 };
174 
175 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
176 
177 int
178 mpt_cam_probe(struct mpt_softc *mpt)
179 {
180 	/*
181 	 * Only attach to nodes that support the initiator or target
182 	 * role or have RAID physical devices that need CAM pass-thru support.
183 	 */
184 	if ((mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_INITIATOR) != 0
185 	 || (mpt->mpt_proto_flags & MPI_PORTFACTS_PROTOCOL_TARGET) != 0
186 	 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
187 		return (0);
188 	}
189 	return (ENODEV);
190 }
191 
192 int
193 mpt_cam_attach(struct mpt_softc *mpt)
194 {
195 	struct cam_devq *devq;
196 	mpt_handler_t	 handler;
197 	int		 maxq;
198 	int		 error;
199 
200 	TAILQ_INIT(&mpt->request_timeout_list);
201 	maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
202 	    mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
203 
204 	handler.reply_handler = mpt_scsi_reply_handler;
205 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
206 				     &scsi_io_handler_id);
207 	if (error != 0) {
208 		goto cleanup0;
209 	}
210 
211 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
212 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
213 				     &scsi_tmf_handler_id);
214 	if (error != 0) {
215 		goto cleanup0;
216 	}
217 
218 	/*
219 	 * If we're fibre channel and could support target mode, we register
220 	 * an ELS reply handler and give it resources.
221 	 */
222 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
223 		handler.reply_handler = mpt_fc_els_reply_handler;
224 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
225 		    &fc_els_handler_id);
226 		if (error != 0) {
227 			goto cleanup0;
228 		}
229 		if (mpt_add_els_buffers(mpt) == FALSE) {
230 			error = ENOMEM;
231 			goto cleanup0;
232 		}
233 		maxq -= mpt->els_cmds_allocated;
234 	}
235 
236 	/*
237 	 * If we support target mode, we register a reply handler for it,
238 	 * but don't add resources until we actually enable target mode.
239 	 */
240 	if ((mpt->role & MPT_ROLE_TARGET) != 0) {
241 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
242 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
243 		    &mpt->scsi_tgt_handler_id);
244 		if (error != 0) {
245 			goto cleanup0;
246 		}
247 	}
248 
249 	/*
250 	 * We keep one request reserved for timeout TMF requests.
251 	 */
252 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
253 	if (mpt->tmf_req == NULL) {
254 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
255 		error = ENOMEM;
256 		goto cleanup0;
257 	}
258 
259 	/*
260 	 * Mark the request as free even though not on the free list.
261 	 * There is only one TMF request allowed to be outstanding at
262 	 * a time and the TMF routines perform their own allocation
263 	 * tracking using the standard state flags.
264 	 */
265 	mpt->tmf_req->state = REQ_STATE_FREE;
266 	maxq--;
267 
268 	if (mpt_spawn_recovery_thread(mpt) != 0) {
269 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
270 		error = ENOMEM;
271 		goto cleanup0;
272 	}
273 
274 	/*
275 	 * The rest of this is CAM foo, for which we need to drop our lock
276 	 */
277 	MPTLOCK_2_CAMLOCK(mpt);
278 
279 	/*
280 	 * Create the device queue for our SIM(s).
281 	 */
282 	devq = cam_simq_alloc(maxq);
283 	if (devq == NULL) {
284 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
285 		error = ENOMEM;
286 		goto cleanup;
287 	}
288 
289 	/*
290 	 * Construct our SIM entry.
291 	 */
292 	mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
293 	    mpt->unit, 1, maxq, devq);
294 	if (mpt->sim == NULL) {
295 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
296 		cam_simq_free(devq);
297 		error = ENOMEM;
298 		goto cleanup;
299 	}
300 
301 	/*
302 	 * Register exactly the bus.
303 	 */
304 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
305 		mpt_prt(mpt, "Bus registration Failed!\n");
306 		error = ENOMEM;
307 		goto cleanup;
308 	}
309 
310 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
311 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
312 		mpt_prt(mpt, "Unable to allocate Path!\n");
313 		error = ENOMEM;
314 		goto cleanup;
315 	}
316 
317 	/*
318 	 * Only register a second bus for RAID physical
319 	 * devices if the controller supports RAID.
320 	 */
321 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
322 		CAMLOCK_2_MPTLOCK(mpt);
323 		return (0);
324 	}
325 
326 	/*
327 	 * Create a "bus" to export all hidden disks to CAM.
328 	 */
329 	mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
330 	    mpt->unit, 1, maxq, devq);
331 	if (mpt->phydisk_sim == NULL) {
332 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
333 		error = ENOMEM;
334 		goto cleanup;
335 	}
336 
337 	/*
338 	 * Register exactly the bus.
339 	 */
340 	if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) {
341 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
342 		error = ENOMEM;
343 		goto cleanup;
344 	}
345 
346 	if (xpt_create_path(&mpt->phydisk_path, NULL,
347 	    cam_sim_path(mpt->phydisk_sim),
348 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
349 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
350 		error = ENOMEM;
351 		goto cleanup;
352 	}
353 	CAMLOCK_2_MPTLOCK(mpt);
354 	return (0);
355 
356 cleanup:
357 	CAMLOCK_2_MPTLOCK(mpt);
358 cleanup0:
359 	mpt_cam_detach(mpt);
360 	return (error);
361 }
362 
363 /*
364  * Read FC configuration information
365  */
366 static int
367 mpt_read_config_info_fc(struct mpt_softc *mpt)
368 {
369 	char *topology = NULL;
370 	int rv, speed = 0;
371 
372 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
373 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
374 	if (rv) {
375 		return (-1);
376 	}
377 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
378 		 mpt->mpt_fcport_page0.Header.PageVersion,
379 		 mpt->mpt_fcport_page0.Header.PageLength,
380 		 mpt->mpt_fcport_page0.Header.PageNumber,
381 		 mpt->mpt_fcport_page0.Header.PageType);
382 
383 
384 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
385 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
386 	if (rv) {
387 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
388 		return (-1);
389 	}
390 
391 	speed = mpt->mpt_fcport_page0.CurrentSpeed;
392 
393 	switch (mpt->mpt_fcport_page0.Flags &
394 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
395 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
396 		speed = 0;
397 		topology = "<NO LOOP>";
398 		break;
399 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
400 		topology = "N-Port";
401 		break;
402 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
403 		topology = "NL-Port";
404 		break;
405 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
406 		topology = "F-Port";
407 		break;
408 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
409 		topology = "FL-Port";
410 		break;
411 	default:
412 		speed = 0;
413 		topology = "?";
414 		break;
415 	}
416 
417 	mpt_lprt(mpt, MPT_PRT_INFO,
418 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
419 	    "Speed %u-Gbit\n", topology,
420 	    mpt->mpt_fcport_page0.WWNN.High,
421 	    mpt->mpt_fcport_page0.WWNN.Low,
422 	    mpt->mpt_fcport_page0.WWPN.High,
423 	    mpt->mpt_fcport_page0.WWPN.Low,
424 	    speed);
425 
426 	return (0);
427 }
428 
429 /*
430  * Set FC configuration information.
431  */
432 static int
433 mpt_set_initial_config_fc(struct mpt_softc *mpt)
434 {
435 #if	0
436 	CONFIG_PAGE_FC_PORT_1 fc;
437 	U32 fl;
438 	int r, doit = 0;
439 
440 	if ((mpt->role & MPT_ROLE_TARGET) == 0) {
441 		return (0);
442 	}
443 
444 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
445 	    &fc.Header, FALSE, 5000);
446 	if (r) {
447 		return (mpt_fc_reset_link(mpt, 1));
448 	}
449 
450 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 0,
451 	    &fc.Header, sizeof (fc), FALSE, 5000);
452 	if (r) {
453 		return (mpt_fc_reset_link(mpt, 1));
454 	}
455 
456 	fl = le32toh(fc.Flags);
457 	if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
458 		fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
459 		doit = 1;
460 	}
461 	if (doit) {
462 		const char *cc;
463 
464 		mpt_lprt(mpt, MPT_PRT_INFO,
465 		    "FC Port Page 1: New Flags %x \n", fl);
466 		fc.Flags = htole32(fl);
467 		r = mpt_write_cfg_page(mpt,
468 		    MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT, 0, &fc.Header,
469 		    sizeof(fc), FALSE, 5000);
470 		if (r != 0) {
471 			cc = "FC PORT PAGE1 UPDATE: FAILED\n";
472 		} else {
473 			cc = "FC PORT PAGE1 UPDATED: SYSTEM NEEDS RESET\n";
474 		}
475 		mpt_prt(mpt, cc);
476 	}
477 #endif
478 	return (0);
479 }
480 
481 /*
482  * Read SAS configuration information. Nothing to do yet.
483  */
484 static int
485 mpt_read_config_info_sas(struct mpt_softc *mpt)
486 {
487 	return (0);
488 }
489 
490 /*
491  * Set SAS configuration information. Nothing to do yet.
492  */
493 static int
494 mpt_set_initial_config_sas(struct mpt_softc *mpt)
495 {
496 	return (0);
497 }
498 
499 /*
500  * Read SCSI configuration information
501  */
502 static int
503 mpt_read_config_info_spi(struct mpt_softc *mpt)
504 {
505 	int rv, i;
506 
507 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
508 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
509 	if (rv) {
510 		return (-1);
511 	}
512 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
513 	    mpt->mpt_port_page0.Header.PageVersion,
514 	    mpt->mpt_port_page0.Header.PageLength,
515 	    mpt->mpt_port_page0.Header.PageNumber,
516 	    mpt->mpt_port_page0.Header.PageType);
517 
518 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
519 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
520 	if (rv) {
521 		return (-1);
522 	}
523 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
524 	    mpt->mpt_port_page1.Header.PageVersion,
525 	    mpt->mpt_port_page1.Header.PageLength,
526 	    mpt->mpt_port_page1.Header.PageNumber,
527 	    mpt->mpt_port_page1.Header.PageType);
528 
529 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
530 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
531 	if (rv) {
532 		return (-1);
533 	}
534 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
535 	    mpt->mpt_port_page2.Header.PageVersion,
536 	    mpt->mpt_port_page2.Header.PageLength,
537 	    mpt->mpt_port_page2.Header.PageNumber,
538 	    mpt->mpt_port_page2.Header.PageType);
539 
540 	for (i = 0; i < 16; i++) {
541 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
542 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
543 		if (rv) {
544 			return (-1);
545 		}
546 		mpt_lprt(mpt, MPT_PRT_DEBUG,
547 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
548 		    mpt->mpt_dev_page0[i].Header.PageVersion,
549 		    mpt->mpt_dev_page0[i].Header.PageLength,
550 		    mpt->mpt_dev_page0[i].Header.PageNumber,
551 		    mpt->mpt_dev_page0[i].Header.PageType);
552 
553 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
554 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
555 		if (rv) {
556 			return (-1);
557 		}
558 		mpt_lprt(mpt, MPT_PRT_DEBUG,
559 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
560 		    mpt->mpt_dev_page1[i].Header.PageVersion,
561 		    mpt->mpt_dev_page1[i].Header.PageLength,
562 		    mpt->mpt_dev_page1[i].Header.PageNumber,
563 		    mpt->mpt_dev_page1[i].Header.PageType);
564 	}
565 
566 	/*
567 	 * At this point, we don't *have* to fail. As long as we have
568 	 * valid config header information, we can (barely) lurch
569 	 * along.
570 	 */
571 
572 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
573 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
574 	if (rv) {
575 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
576 	} else {
577 		mpt_lprt(mpt, MPT_PRT_DEBUG,
578 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
579 		    mpt->mpt_port_page0.Capabilities,
580 		    mpt->mpt_port_page0.PhysicalInterface);
581 	}
582 
583 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
584 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
585 	if (rv) {
586 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
587 	} else {
588 		mpt_lprt(mpt, MPT_PRT_DEBUG,
589 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
590 		    mpt->mpt_port_page1.Configuration,
591 		    mpt->mpt_port_page1.OnBusTimerValue);
592 	}
593 
594 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
595 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
596 	if (rv) {
597 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
598 	} else {
599 		mpt_lprt(mpt, MPT_PRT_DEBUG,
600 		    "SPI Port Page 2: Flags %x Settings %x\n",
601 		    mpt->mpt_port_page2.PortFlags,
602 		    mpt->mpt_port_page2.PortSettings);
603 		for (i = 0; i < 16; i++) {
604 			mpt_lprt(mpt, MPT_PRT_DEBUG,
605 		  	    "SPI Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
606 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
607 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
608 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
609 		}
610 	}
611 
612 	for (i = 0; i < 16; i++) {
613 		rv = mpt_read_cur_cfg_page(mpt, i,
614 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
615 		    FALSE, 5000);
616 		if (rv) {
617 			mpt_prt(mpt,
618 			    "cannot read SPI Target %d Device Page 0\n", i);
619 			continue;
620 		}
621 		mpt_lprt(mpt, MPT_PRT_DEBUG,
622 		    "SPI Tgt %d Page 0: NParms %x Information %x", i,
623 		    mpt->mpt_dev_page0[i].NegotiatedParameters,
624 		    mpt->mpt_dev_page0[i].Information);
625 
626 		rv = mpt_read_cur_cfg_page(mpt, i,
627 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
628 		    FALSE, 5000);
629 		if (rv) {
630 			mpt_prt(mpt,
631 			    "cannot read SPI Target %d Device Page 1\n", i);
632 			continue;
633 		}
634 		mpt_lprt(mpt, MPT_PRT_DEBUG,
635 		    "SPI Tgt %d Page 1: RParms %x Configuration %x\n", i,
636 		    mpt->mpt_dev_page1[i].RequestedParameters,
637 		    mpt->mpt_dev_page1[i].Configuration);
638 	}
639 	return (0);
640 }
641 
642 /*
643  * Validate SPI configuration information.
644  *
645  * In particular, validate SPI Port Page 1.
646  */
647 static int
648 mpt_set_initial_config_spi(struct mpt_softc *mpt)
649 {
650 	int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
651 	int error;
652 
653 	mpt->mpt_disc_enable = 0xff;
654 	mpt->mpt_tag_enable = 0;
655 
656 	if (mpt->mpt_port_page1.Configuration != pp1val) {
657 		CONFIG_PAGE_SCSI_PORT_1 tmp;
658 
659 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
660 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
661 		tmp = mpt->mpt_port_page1;
662 		tmp.Configuration = pp1val;
663 		error = mpt_write_cur_cfg_page(mpt, 0,
664 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
665 		if (error) {
666 			return (-1);
667 		}
668 		error = mpt_read_cur_cfg_page(mpt, 0,
669 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
670 		if (error) {
671 			return (-1);
672 		}
673 		if (tmp.Configuration != pp1val) {
674 			mpt_prt(mpt,
675 			    "failed to reset SPI Port Page 1 Config value\n");
676 			return (-1);
677 		}
678 		mpt->mpt_port_page1 = tmp;
679 	}
680 
681 	/*
682 	 * The purpose of this exercise is to get
683 	 * all targets back to async/narrow.
684 	 *
685 	 * We skip this if the BIOS has already negotiated speeds with targets.
686 	 */
687 	i = mpt->mpt_port_page2.PortSettings &
688 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
689 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
690 		mpt_lprt(mpt, MPT_PRT_INFO,
691 		    "honoring BIOS transfer negotiation for all targets\n");
692 		return (0);
693 	}
694 	for (i = 0; i < 16; i++) {
695 		mpt->mpt_dev_page1[i].RequestedParameters = 0;
696 		mpt->mpt_dev_page1[i].Configuration = 0;
697 		(void) mpt_update_spi_config(mpt, i);
698 	}
699 	return (0);
700 }
701 
702 int
703 mpt_cam_enable(struct mpt_softc *mpt)
704 {
705 	if (mpt->is_fc) {
706 		if (mpt_read_config_info_fc(mpt)) {
707 			return (EIO);
708 		}
709 		if (mpt_set_initial_config_fc(mpt)) {
710 			return (EIO);
711 		}
712 	} else if (mpt->is_sas) {
713 		if (mpt_read_config_info_sas(mpt)) {
714 			return (EIO);
715 		}
716 		if (mpt_set_initial_config_sas(mpt)) {
717 			return (EIO);
718 		}
719 	} else {
720 		if (mpt_read_config_info_spi(mpt)) {
721 			return (EIO);
722 		}
723 		if (mpt_set_initial_config_spi(mpt)) {
724 			return (EIO);
725 		}
726 	}
727 	return (0);
728 }
729 
730 void
731 mpt_cam_detach(struct mpt_softc *mpt)
732 {
733 	mpt_handler_t handler;
734 
735 	mpt_terminate_recovery_thread(mpt);
736 
737 	handler.reply_handler = mpt_scsi_reply_handler;
738 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
739 			       scsi_io_handler_id);
740 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
741 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
742 			       scsi_tmf_handler_id);
743 	handler.reply_handler = mpt_fc_els_reply_handler;
744 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
745 			       fc_els_handler_id);
746 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
747 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
748 			       mpt->scsi_tgt_handler_id);
749 
750 	if (mpt->tmf_req != NULL) {
751 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
752 		mpt_free_request(mpt, mpt->tmf_req);
753 		mpt->tmf_req = NULL;
754 	}
755 
756 	if (mpt->sim != NULL) {
757 		MPTLOCK_2_CAMLOCK(mpt);
758 		xpt_free_path(mpt->path);
759 		xpt_bus_deregister(cam_sim_path(mpt->sim));
760 		cam_sim_free(mpt->sim, TRUE);
761 		mpt->sim = NULL;
762 		CAMLOCK_2_MPTLOCK(mpt);
763 	}
764 
765 	if (mpt->phydisk_sim != NULL) {
766 		MPTLOCK_2_CAMLOCK(mpt);
767 		xpt_free_path(mpt->phydisk_path);
768 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
769 		cam_sim_free(mpt->phydisk_sim, TRUE);
770 		mpt->phydisk_sim = NULL;
771 		CAMLOCK_2_MPTLOCK(mpt);
772 	}
773 }
774 
775 /* This routine is used after a system crash to dump core onto the swap device.
776  */
777 static void
778 mpt_poll(struct cam_sim *sim)
779 {
780 	struct mpt_softc *mpt;
781 
782 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
783 	MPT_LOCK(mpt);
784 	mpt_intr(mpt);
785 	MPT_UNLOCK(mpt);
786 }
787 
788 /*
789  * Watchdog timeout routine for SCSI requests.
790  */
791 static void
792 mpt_timeout(void *arg)
793 {
794 	union ccb	 *ccb;
795 	struct mpt_softc *mpt;
796 	request_t	 *req;
797 
798 	ccb = (union ccb *)arg;
799 	mpt = ccb->ccb_h.ccb_mpt_ptr;
800 
801 	MPT_LOCK(mpt);
802 	req = ccb->ccb_h.ccb_req_ptr;
803 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
804 	    req->serno, ccb, req->ccb);
805 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
806 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
807 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
808 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
809 		req->state |= REQ_STATE_TIMEDOUT;
810 		mpt_wakeup_recovery_thread(mpt);
811 	}
812 	MPT_UNLOCK(mpt);
813 }
814 
815 /*
816  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
817  *
818  * Takes a list of physical segments and builds the SGL for SCSI IO command
819  * and forwards the commard to the IOC after one last check that CAM has not
820  * aborted the transaction.
821  */
822 static void
823 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
824 {
825 	request_t *req, *trq;
826 	char *mpt_off;
827 	union ccb *ccb;
828 	struct mpt_softc *mpt;
829 	int seg, first_lim;
830 	uint32_t flags, nxt_off;
831 	void *sglp;
832 	MSG_REQUEST_HEADER *hdrp;
833 	SGE_SIMPLE64 *se;
834 	SGE_CHAIN64 *ce;
835 
836 	req = (request_t *)arg;
837 	ccb = req->ccb;
838 
839 	mpt = ccb->ccb_h.ccb_mpt_ptr;
840 	req = ccb->ccb_h.ccb_req_ptr;
841 
842 	hdrp = req->req_vbuf;
843 	mpt_off = req->req_vbuf;
844 
845 	if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
846 		sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
847 	} else /* if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) */ {
848 		sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
849 	}
850 
851 
852 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
853 		error = EFBIG;
854 	}
855 
856 bad:
857 	if (error != 0) {
858 		if (error != EFBIG && error != ENOMEM) {
859 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
860 		}
861 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
862 			cam_status status;
863 			mpt_freeze_ccb(ccb);
864 			if (error == EFBIG) {
865 				status = CAM_REQ_TOO_BIG;
866 			} else if (error == ENOMEM) {
867 				if (mpt->outofbeer == 0) {
868 					mpt->outofbeer = 1;
869 					xpt_freeze_simq(mpt->sim, 1);
870 					mpt_lprt(mpt, MPT_PRT_DEBUG,
871 					    "FREEZEQ\n");
872 				}
873 				status = CAM_REQUEUE_REQ;
874 			} else {
875 				status = CAM_REQ_CMP_ERR;
876 			}
877 			mpt_set_ccb_status(ccb, status);
878 		}
879 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
880 			request_t *cmd_req =
881 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
882 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
883 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
884 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
885 		}
886 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
887 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
888 		xpt_done(ccb);
889 		CAMLOCK_2_MPTLOCK(mpt);
890 		mpt_free_request(mpt, req);
891 		MPTLOCK_2_CAMLOCK(mpt);
892 		return;
893 	}
894 
895 	/*
896 	 * No data to transfer?
897 	 * Just make a single simple SGL with zero length.
898 	 */
899 
900 	if (mpt->verbose >= MPT_PRT_DEBUG) {
901 		int tidx = ((char *)sglp) - mpt_off;
902 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
903 	}
904 
905 	if (nseg == 0) {
906 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
907 		MPI_pSGE_SET_FLAGS(se1,
908 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
909 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
910 		goto out;
911 	}
912 
913 
914 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
915 	if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
916 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
917 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
918 		}
919 	} else {
920 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
921 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
922 		}
923 	}
924 
925 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
926 		bus_dmasync_op_t op;
927 		if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
928 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
929 				op = BUS_DMASYNC_PREREAD;
930 			} else {
931 				op = BUS_DMASYNC_PREWRITE;
932 			}
933 		} else {
934 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
935 				op = BUS_DMASYNC_PREWRITE;
936 			} else {
937 				op = BUS_DMASYNC_PREREAD;
938 			}
939 		}
940 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
941 	}
942 
943 	/*
944 	 * Okay, fill in what we can at the end of the command frame.
945 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
946 	 * the command frame.
947 	 *
948 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
949 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
950 	 * that.
951 	 */
952 
953 	if (nseg < MPT_NSGL_FIRST(mpt)) {
954 		first_lim = nseg;
955 	} else {
956 		/*
957 		 * Leave room for CHAIN element
958 		 */
959 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
960 	}
961 
962 	se = (SGE_SIMPLE64 *) sglp;
963 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
964 		uint32_t tf;
965 
966 		memset(se, 0, sizeof (*se));
967 		se->Address.Low = dm_segs->ds_addr;
968 		if (sizeof(bus_addr_t) > 4) {
969 			se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32;
970 		}
971 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
972 		tf = flags;
973 		if (seg == first_lim - 1) {
974 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
975 		}
976 		if (seg == nseg - 1) {
977 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
978 				MPI_SGE_FLAGS_END_OF_BUFFER;
979 		}
980 		MPI_pSGE_SET_FLAGS(se, tf);
981 	}
982 
983 	if (seg == nseg) {
984 		goto out;
985 	}
986 
987 	/*
988 	 * Tell the IOC where to find the first chain element.
989 	 */
990 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
991 	nxt_off = MPT_RQSL(mpt);
992 	trq = req;
993 
994 	/*
995 	 * Make up the rest of the data segments out of a chain element
996 	 * (contiained in the current request frame) which points to
997 	 * SIMPLE64 elements in the next request frame, possibly ending
998 	 * with *another* chain element (if there's more).
999 	 */
1000 	while (seg < nseg) {
1001 		int this_seg_lim;
1002 		uint32_t tf, cur_off;
1003 		bus_addr_t chain_list_addr;
1004 
1005 		/*
1006 		 * Point to the chain descriptor. Note that the chain
1007 		 * descriptor is at the end of the *previous* list (whether
1008 		 * chain or simple).
1009 		 */
1010 		ce = (SGE_CHAIN64 *) se;
1011 
1012 		/*
1013 		 * Before we change our current pointer, make  sure we won't
1014 		 * overflow the request area with this frame. Note that we
1015 		 * test against 'greater than' here as it's okay in this case
1016 		 * to have next offset be just outside the request area.
1017 		 */
1018 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1019 			nxt_off = MPT_REQUEST_AREA;
1020 			goto next_chain;
1021 		}
1022 
1023 		/*
1024 		 * Set our SGE element pointer to the beginning of the chain
1025 		 * list and update our next chain list offset.
1026 		 */
1027 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1028 		cur_off = nxt_off;
1029 		nxt_off += MPT_RQSL(mpt);
1030 
1031 		/*
1032 		 * Now initialized the chain descriptor.
1033 		 */
1034 		memset(ce, 0, sizeof (*ce));
1035 
1036 		/*
1037 		 * Get the physical address of the chain list.
1038 		 */
1039 		chain_list_addr = trq->req_pbuf;
1040 		chain_list_addr += cur_off;
1041 		if (sizeof (bus_addr_t) > 4) {
1042 			ce->Address.High =
1043 			    (uint32_t) ((uint64_t)chain_list_addr >> 32);
1044 		}
1045 		ce->Address.Low = (uint32_t) chain_list_addr;
1046 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1047 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1048 
1049 		/*
1050 		 * If we have more than a frame's worth of segments left,
1051 		 * set up the chain list to have the last element be another
1052 		 * chain descriptor.
1053 		 */
1054 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1055 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1056 			/*
1057 			 * The length of the chain is the length in bytes of the
1058 			 * number of segments plus the next chain element.
1059 			 *
1060 			 * The next chain descriptor offset is the length,
1061 			 * in words, of the number of segments.
1062 			 */
1063 			ce->Length = (this_seg_lim - seg) *
1064 			    sizeof (SGE_SIMPLE64);
1065 			ce->NextChainOffset = ce->Length >> 2;
1066 			ce->Length += sizeof (SGE_CHAIN64);
1067 		} else {
1068 			this_seg_lim = nseg;
1069 			ce->Length = (this_seg_lim - seg) *
1070 			    sizeof (SGE_SIMPLE64);
1071 		}
1072 
1073 		/*
1074 		 * Fill in the chain list SGE elements with our segment data.
1075 		 *
1076 		 * If we're the last element in this chain list, set the last
1077 		 * element flag. If we're the completely last element period,
1078 		 * set the end of list and end of buffer flags.
1079 		 */
1080 		while (seg < this_seg_lim) {
1081 			memset(se, 0, sizeof (*se));
1082 			se->Address.Low = dm_segs->ds_addr;
1083 			if (sizeof (bus_addr_t) > 4) {
1084 				se->Address.High =
1085 				    ((uint64_t)dm_segs->ds_addr) >> 32;
1086 			}
1087 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1088 			tf = flags;
1089 			if (seg ==  this_seg_lim - 1) {
1090 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1091 			}
1092 			if (seg == nseg - 1) {
1093 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1094 					MPI_SGE_FLAGS_END_OF_BUFFER;
1095 			}
1096 			MPI_pSGE_SET_FLAGS(se, tf);
1097 			se++;
1098 			seg++;
1099 			dm_segs++;
1100 		}
1101 
1102     next_chain:
1103 		/*
1104 		 * If we have more segments to do and we've used up all of
1105 		 * the space in a request area, go allocate another one
1106 		 * and chain to that.
1107 		 */
1108 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1109 			request_t *nrq;
1110 
1111 			CAMLOCK_2_MPTLOCK(mpt);
1112 			nrq = mpt_get_request(mpt, FALSE);
1113 			MPTLOCK_2_CAMLOCK(mpt);
1114 
1115 			if (nrq == NULL) {
1116 				error = ENOMEM;
1117 				goto bad;
1118 			}
1119 
1120 			/*
1121 			 * Append the new request area on the tail of our list.
1122 			 */
1123 			if ((trq = req->chain) == NULL) {
1124 				req->chain = nrq;
1125 			} else {
1126 				while (trq->chain != NULL) {
1127 					trq = trq->chain;
1128 				}
1129 				trq->chain = nrq;
1130 			}
1131 			trq = nrq;
1132 			mpt_off = trq->req_vbuf;
1133 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1134 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1135 			}
1136 			nxt_off = 0;
1137 		}
1138 	}
1139 out:
1140 
1141 	/*
1142 	 * Last time we need to check if this CCB needs to be aborted.
1143 	 */
1144 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1145 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1146 			request_t *cmd_req =
1147 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1148 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1149 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1150 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1151 		}
1152 		mpt_prt(mpt,
1153 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1154 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1155 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1156 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1157 		}
1158 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1159 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1160 		xpt_done(ccb);
1161 		CAMLOCK_2_MPTLOCK(mpt);
1162 		mpt_free_request(mpt, req);
1163 		MPTLOCK_2_CAMLOCK(mpt);
1164 		return;
1165 	}
1166 
1167 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1168 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1169 		ccb->ccb_h.timeout_ch =
1170 			timeout(mpt_timeout, (caddr_t)ccb,
1171 				(ccb->ccb_h.timeout * hz) / 1000);
1172 	} else {
1173 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1174 	}
1175 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1176 		int nc = 0;
1177 		mpt_print_request(req->req_vbuf);
1178 		for (trq = req->chain; trq; trq = trq->chain) {
1179 			printf("  Additional Chain Area %d\n", nc++);
1180 			mpt_dump_sgl(trq->req_vbuf, 0);
1181 		}
1182 	}
1183 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1184 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1185 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1186 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1187 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1188 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1189 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1190 		} else {
1191 			tgt->state = TGT_STATE_MOVING_DATA;
1192 		}
1193 #else
1194 		tgt->state = TGT_STATE_MOVING_DATA;
1195 #endif
1196 	}
1197 	CAMLOCK_2_MPTLOCK(mpt);
1198 	mpt_send_cmd(mpt, req);
1199 	MPTLOCK_2_CAMLOCK(mpt);
1200 }
1201 
1202 static void
1203 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1204 {
1205 	request_t *req, *trq;
1206 	char *mpt_off;
1207 	union ccb *ccb;
1208 	struct mpt_softc *mpt;
1209 	int seg, first_lim;
1210 	uint32_t flags, nxt_off;
1211 	void *sglp;
1212 	MSG_REQUEST_HEADER *hdrp;
1213 	SGE_SIMPLE32 *se;
1214 	SGE_CHAIN32 *ce;
1215 
1216 	req = (request_t *)arg;
1217 	ccb = req->ccb;
1218 
1219 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1220 	req = ccb->ccb_h.ccb_req_ptr;
1221 
1222 	hdrp = req->req_vbuf;
1223 	mpt_off = req->req_vbuf;
1224 
1225 
1226 	if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
1227 		sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1228 	} else /* if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) */ {
1229 		sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1230 	}
1231 
1232 
1233 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1234 		error = EFBIG;
1235 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1236 		    nseg, mpt->max_seg_cnt);
1237 	}
1238 
1239 bad:
1240 	if (error != 0) {
1241 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1242 			request_t *cmd_req =
1243 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1244 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1245 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1246 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1247 		}
1248 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1249 			cam_status status;
1250 			mpt_freeze_ccb(ccb);
1251 			if (error == EFBIG) {
1252 				status = CAM_REQ_TOO_BIG;
1253 			} else if (error == ENOMEM) {
1254 				if (mpt->outofbeer == 0) {
1255 					mpt->outofbeer = 1;
1256 					xpt_freeze_simq(mpt->sim, 1);
1257 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1258 					    "FREEZEQ\n");
1259 				}
1260 				status = CAM_REQUEUE_REQ;
1261 			} else {
1262 				status = CAM_REQ_CMP_ERR;
1263 			}
1264 			mpt_set_ccb_status(ccb, status);
1265 		}
1266 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1267 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1268 		xpt_done(ccb);
1269 		CAMLOCK_2_MPTLOCK(mpt);
1270 		mpt_free_request(mpt, req);
1271 		MPTLOCK_2_CAMLOCK(mpt);
1272 		return;
1273 	}
1274 
1275 	/*
1276 	 * No data to transfer?
1277 	 * Just make a single simple SGL with zero length.
1278 	 */
1279 
1280 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1281 		int tidx = ((char *)sglp) - mpt_off;
1282 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1283 	}
1284 
1285 	if (nseg == 0) {
1286 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1287 		MPI_pSGE_SET_FLAGS(se1,
1288 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1289 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1290 		goto out;
1291 	}
1292 
1293 
1294 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1295 	if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
1296 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1297 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1298 		}
1299 	} else {
1300 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1301 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1302 		}
1303 	}
1304 
1305 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1306 		bus_dmasync_op_t op;
1307 		if (hdrp->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
1308 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1309 				op = BUS_DMASYNC_PREREAD;
1310 			} else {
1311 				op = BUS_DMASYNC_PREWRITE;
1312 			}
1313 		} else {
1314 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1315 				op = BUS_DMASYNC_PREWRITE;
1316 			} else {
1317 				op = BUS_DMASYNC_PREREAD;
1318 			}
1319 		}
1320 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1321 	}
1322 
1323 	/*
1324 	 * Okay, fill in what we can at the end of the command frame.
1325 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1326 	 * the command frame.
1327 	 *
1328 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1329 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1330 	 * that.
1331 	 */
1332 
1333 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1334 		first_lim = nseg;
1335 	} else {
1336 		/*
1337 		 * Leave room for CHAIN element
1338 		 */
1339 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1340 	}
1341 
1342 	se = (SGE_SIMPLE32 *) sglp;
1343 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1344 		uint32_t tf;
1345 
1346 		memset(se, 0,sizeof (*se));
1347 		se->Address = dm_segs->ds_addr;
1348 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1349 		tf = flags;
1350 		if (seg == first_lim - 1) {
1351 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1352 		}
1353 		if (seg == nseg - 1) {
1354 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1355 				MPI_SGE_FLAGS_END_OF_BUFFER;
1356 		}
1357 		MPI_pSGE_SET_FLAGS(se, tf);
1358 	}
1359 
1360 	if (seg == nseg) {
1361 		goto out;
1362 	}
1363 
1364 	/*
1365 	 * Tell the IOC where to find the first chain element.
1366 	 */
1367 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1368 	nxt_off = MPT_RQSL(mpt);
1369 	trq = req;
1370 
1371 	/*
1372 	 * Make up the rest of the data segments out of a chain element
1373 	 * (contiained in the current request frame) which points to
1374 	 * SIMPLE32 elements in the next request frame, possibly ending
1375 	 * with *another* chain element (if there's more).
1376 	 */
1377 	while (seg < nseg) {
1378 		int this_seg_lim;
1379 		uint32_t tf, cur_off;
1380 		bus_addr_t chain_list_addr;
1381 
1382 		/*
1383 		 * Point to the chain descriptor. Note that the chain
1384 		 * descriptor is at the end of the *previous* list (whether
1385 		 * chain or simple).
1386 		 */
1387 		ce = (SGE_CHAIN32 *) se;
1388 
1389 		/*
1390 		 * Before we change our current pointer, make  sure we won't
1391 		 * overflow the request area with this frame. Note that we
1392 		 * test against 'greater than' here as it's okay in this case
1393 		 * to have next offset be just outside the request area.
1394 		 */
1395 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1396 			nxt_off = MPT_REQUEST_AREA;
1397 			goto next_chain;
1398 		}
1399 
1400 		/*
1401 		 * Set our SGE element pointer to the beginning of the chain
1402 		 * list and update our next chain list offset.
1403 		 */
1404 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1405 		cur_off = nxt_off;
1406 		nxt_off += MPT_RQSL(mpt);
1407 
1408 		/*
1409 		 * Now initialized the chain descriptor.
1410 		 */
1411 		memset(ce, 0, sizeof (*ce));
1412 
1413 		/*
1414 		 * Get the physical address of the chain list.
1415 		 */
1416 		chain_list_addr = trq->req_pbuf;
1417 		chain_list_addr += cur_off;
1418 		ce->Address = chain_list_addr;
1419 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1420 
1421 		/*
1422 		 * If we have more than a frame's worth of segments left,
1423 		 * set up the chain list to have the last element be another
1424 		 * chain descriptor.
1425 		 */
1426 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1427 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1428 			/*
1429 			 * The length of the chain is the length in bytes of the
1430 			 * number of segments plus the next chain element.
1431 			 *
1432 			 * The next chain descriptor offset is the length,
1433 			 * in words, of the number of segments.
1434 			 */
1435 			ce->Length = (this_seg_lim - seg) *
1436 			    sizeof (SGE_SIMPLE32);
1437 			ce->NextChainOffset = ce->Length >> 2;
1438 			ce->Length += sizeof (SGE_CHAIN32);
1439 		} else {
1440 			this_seg_lim = nseg;
1441 			ce->Length = (this_seg_lim - seg) *
1442 			    sizeof (SGE_SIMPLE32);
1443 		}
1444 
1445 		/*
1446 		 * Fill in the chain list SGE elements with our segment data.
1447 		 *
1448 		 * If we're the last element in this chain list, set the last
1449 		 * element flag. If we're the completely last element period,
1450 		 * set the end of list and end of buffer flags.
1451 		 */
1452 		while (seg < this_seg_lim) {
1453 			memset(se, 0, sizeof (*se));
1454 			se->Address = dm_segs->ds_addr;
1455 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1456 			tf = flags;
1457 			if (seg ==  this_seg_lim - 1) {
1458 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1459 			}
1460 			if (seg == nseg - 1) {
1461 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1462 					MPI_SGE_FLAGS_END_OF_BUFFER;
1463 			}
1464 			MPI_pSGE_SET_FLAGS(se, tf);
1465 			se++;
1466 			seg++;
1467 			dm_segs++;
1468 		}
1469 
1470     next_chain:
1471 		/*
1472 		 * If we have more segments to do and we've used up all of
1473 		 * the space in a request area, go allocate another one
1474 		 * and chain to that.
1475 		 */
1476 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1477 			request_t *nrq;
1478 
1479 			CAMLOCK_2_MPTLOCK(mpt);
1480 			nrq = mpt_get_request(mpt, FALSE);
1481 			MPTLOCK_2_CAMLOCK(mpt);
1482 
1483 			if (nrq == NULL) {
1484 				error = ENOMEM;
1485 				goto bad;
1486 			}
1487 
1488 			/*
1489 			 * Append the new request area on the tail of our list.
1490 			 */
1491 			if ((trq = req->chain) == NULL) {
1492 				req->chain = nrq;
1493 			} else {
1494 				while (trq->chain != NULL) {
1495 					trq = trq->chain;
1496 				}
1497 				trq->chain = nrq;
1498 			}
1499 			trq = nrq;
1500 			mpt_off = trq->req_vbuf;
1501 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1502 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1503 			}
1504 			nxt_off = 0;
1505 		}
1506 	}
1507 out:
1508 
1509 	/*
1510 	 * Last time we need to check if this CCB needs to be aborted.
1511 	 */
1512 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1513 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1514 			request_t *cmd_req =
1515 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1516 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1517 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1518 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1519 		}
1520 		mpt_prt(mpt, "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1521 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1522 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1523 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1524 		}
1525 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1526 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1527 		xpt_done(ccb);
1528 		CAMLOCK_2_MPTLOCK(mpt);
1529 		mpt_free_request(mpt, req);
1530 		MPTLOCK_2_CAMLOCK(mpt);
1531 		return;
1532 	}
1533 
1534 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1535 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1536 		ccb->ccb_h.timeout_ch =
1537 			timeout(mpt_timeout, (caddr_t)ccb,
1538 				(ccb->ccb_h.timeout * hz) / 1000);
1539 	} else {
1540 		callout_handle_init(&ccb->ccb_h.timeout_ch);
1541 	}
1542 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1543 		int nc = 0;
1544 		mpt_print_request(req->req_vbuf);
1545 		for (trq = req->chain; trq; trq = trq->chain) {
1546 			printf("  Additional Chain Area %d\n", nc++);
1547 			mpt_dump_sgl(trq->req_vbuf, 0);
1548 		}
1549 	}
1550 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1551 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1552 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1553 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1554 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1555 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1556 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1557 		} else {
1558 			tgt->state = TGT_STATE_MOVING_DATA;
1559 		}
1560 #else
1561 		tgt->state = TGT_STATE_MOVING_DATA;
1562 #endif
1563 	}
1564 	CAMLOCK_2_MPTLOCK(mpt);
1565 	mpt_send_cmd(mpt, req);
1566 	MPTLOCK_2_CAMLOCK(mpt);
1567 }
1568 
1569 static void
1570 mpt_start(struct cam_sim *sim, union ccb *ccb)
1571 {
1572 	request_t *req;
1573 	struct mpt_softc *mpt;
1574 	MSG_SCSI_IO_REQUEST *mpt_req;
1575 	struct ccb_scsiio *csio = &ccb->csio;
1576 	struct ccb_hdr *ccbh = &ccb->ccb_h;
1577 	bus_dmamap_callback_t *cb;
1578 	int raid_passthru;
1579 
1580 	/* Get the pointer for the physical addapter */
1581 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1582 	raid_passthru = (sim == mpt->phydisk_sim);
1583 
1584 	CAMLOCK_2_MPTLOCK(mpt);
1585 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
1586 		if (mpt->outofbeer == 0) {
1587 			mpt->outofbeer = 1;
1588 			xpt_freeze_simq(mpt->sim, 1);
1589 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
1590 		}
1591 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1592 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
1593 		MPTLOCK_2_CAMLOCK(mpt);
1594 		xpt_done(ccb);
1595 		return;
1596 	}
1597 #ifdef	INVARIANTS
1598 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
1599 #endif
1600 	MPTLOCK_2_CAMLOCK(mpt);
1601 
1602 	if (sizeof (bus_addr_t) > 4) {
1603 		cb = mpt_execute_req_a64;
1604 	} else {
1605 		cb = mpt_execute_req;
1606 	}
1607 
1608 #if 0
1609 	COWWWWW
1610 	if (raid_passthru) {
1611 		status = mpt_raid_quiesce_disk(mpt, mpt->raid_disks + ccb->ccb_h.target_id,
1612 		     request_t *req)
1613 	}
1614 #endif
1615 
1616 	/*
1617 	 * Link the ccb and the request structure so we can find
1618 	 * the other knowing either the request or the ccb
1619 	 */
1620 	req->ccb = ccb;
1621 	ccb->ccb_h.ccb_req_ptr = req;
1622 
1623 	/* Now we build the command for the IOC */
1624 	mpt_req = req->req_vbuf;
1625 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
1626 
1627 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
1628 	if (raid_passthru) {
1629 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
1630 	}
1631 	mpt_req->Bus = 0;	/* we don't have multiport devices yet */
1632 	mpt_req->SenseBufferLength =
1633 		(csio->sense_len < MPT_SENSE_SIZE) ?
1634 		 csio->sense_len : MPT_SENSE_SIZE;
1635 
1636 	/*
1637 	 * We use the message context to find the request structure when we
1638 	 * Get the command completion interrupt from the IOC.
1639 	 */
1640 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
1641 
1642 	/* Which physical device to do the I/O on */
1643 	mpt_req->TargetID = ccb->ccb_h.target_id;
1644 
1645 	/* We assume a single level LUN type */
1646 	if (ccb->ccb_h.target_lun >= 256) {
1647 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
1648 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
1649 	} else {
1650 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
1651 	}
1652 
1653 	/* Set the direction of the transfer */
1654 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1655 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
1656 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1657 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
1658 	} else {
1659 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
1660 	}
1661 
1662 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
1663 		switch(ccb->csio.tag_action) {
1664 		case MSG_HEAD_OF_Q_TAG:
1665 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
1666 			break;
1667 		case MSG_ACA_TASK:
1668 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
1669 			break;
1670 		case MSG_ORDERED_Q_TAG:
1671 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
1672 			break;
1673 		case MSG_SIMPLE_Q_TAG:
1674 		default:
1675 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1676 			break;
1677 		}
1678 	} else {
1679 		if (mpt->is_fc || mpt->is_sas) {
1680 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
1681 		} else {
1682 			/* XXX No such thing for a target doing packetized. */
1683 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
1684 		}
1685 	}
1686 
1687 	if (mpt->is_fc == 0 && mpt->is_sas == 0) {
1688 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1689 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
1690 		}
1691 	}
1692 
1693 	/* Copy the scsi command block into place */
1694 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1695 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
1696 	} else {
1697 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
1698 	}
1699 
1700 	mpt_req->CDBLength = csio->cdb_len;
1701 	mpt_req->DataLength = csio->dxfer_len;
1702 	mpt_req->SenseBufferLowAddr = req->sense_pbuf;
1703 
1704 	/*
1705 	 * If we have any data to send with this command map it into bus space.
1706 	 */
1707 
1708 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1709 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
1710 			/*
1711 			 * We've been given a pointer to a single buffer.
1712 			 */
1713 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
1714 				/*
1715 				 * Virtual address that needs to translated into
1716 				 * one or more physical address ranges.
1717 				 */
1718 				int error;
1719 				int s = splsoftvm();
1720 				error = bus_dmamap_load(mpt->buffer_dmat,
1721 				    req->dmap, csio->data_ptr, csio->dxfer_len,
1722 				    cb, req, 0);
1723 				splx(s);
1724 				if (error == EINPROGRESS) {
1725 					/*
1726 					 * So as to maintain ordering,
1727 					 * freeze the controller queue
1728 					 * until our mapping is
1729 					 * returned.
1730 					 */
1731 					xpt_freeze_simq(mpt->sim, 1);
1732 					ccbh->status |= CAM_RELEASE_SIMQ;
1733 				}
1734 			} else {
1735 				/*
1736 				 * We have been given a pointer to single
1737 				 * physical buffer.
1738 				 */
1739 				struct bus_dma_segment seg;
1740 				seg.ds_addr =
1741 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
1742 				seg.ds_len = csio->dxfer_len;
1743 				(*cb)(req, &seg, 1, 0);
1744 			}
1745 		} else {
1746 			/*
1747 			 * We have been given a list of addresses.
1748 			 * This case could be easily supported but they are not
1749 			 * currently generated by the CAM subsystem so there
1750 			 * is no point in wasting the time right now.
1751 			 */
1752 			struct bus_dma_segment *segs;
1753 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
1754 				(*cb)(req, NULL, 0, EFAULT);
1755 			} else {
1756 				/* Just use the segments provided */
1757 				segs = (struct bus_dma_segment *)csio->data_ptr;
1758 				(*cb)(req, segs, csio->sglist_cnt, 0);
1759 			}
1760 		}
1761 	} else {
1762 		(*cb)(req, NULL, 0, 0);
1763 	}
1764 }
1765 
1766 static int
1767 mpt_bus_reset(struct mpt_softc *mpt, int sleep_ok)
1768 {
1769 	int   error;
1770 	uint16_t status;
1771 	uint8_t response;
1772 
1773 	error = mpt_scsi_send_tmf(mpt, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
1774 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
1775 	    0, 0, 0, 0, sleep_ok);
1776 
1777 	if (error != 0) {
1778 		/*
1779 		 * mpt_scsi_send_tmf hard resets on failure, so no
1780 		 * need to do so here.
1781 		 */
1782 		mpt_prt(mpt,
1783 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
1784 		return (EIO);
1785 	}
1786 
1787 	/* Wait for bus reset to be processed by the IOC. */
1788 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
1789 	    REQ_STATE_DONE, sleep_ok, 5000);
1790 
1791 	status = mpt->tmf_req->IOCStatus;
1792 	response = mpt->tmf_req->ResponseCode;
1793 	mpt->tmf_req->state = REQ_STATE_FREE;
1794 
1795 	if (error) {
1796 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
1797 		    "Resetting controller.\n");
1798 		mpt_reset(mpt, TRUE);
1799 		return (ETIMEDOUT);
1800 	}
1801 
1802 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1803 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
1804 		    "Resetting controller.\n", status);
1805 		mpt_reset(mpt, TRUE);
1806 		return (EIO);
1807 	}
1808 
1809 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
1810 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
1811 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
1812 		    "Resetting controller.\n", response);
1813 		mpt_reset(mpt, TRUE);
1814 		return (EIO);
1815 	}
1816 	return (0);
1817 }
1818 
1819 static int
1820 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
1821 {
1822 	int r = 0;
1823 	request_t *req;
1824 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
1825 
1826  	req = mpt_get_request(mpt, FALSE);
1827 	if (req == NULL) {
1828 		return (ENOMEM);
1829 	}
1830 	fc = req->req_vbuf;
1831 	memset(fc, 0, sizeof(*fc));
1832 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
1833 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
1834 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
1835 	mpt_send_cmd(mpt, req);
1836 	if (dowait) {
1837 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
1838 		    REQ_STATE_DONE, FALSE, 60 * 1000);
1839 		if (r == 0) {
1840 			mpt_free_request(mpt, req);
1841 		}
1842 	}
1843 	return (r);
1844 }
1845 
1846 static int
1847 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
1848 	      MSG_EVENT_NOTIFY_REPLY *msg)
1849 {
1850 	switch(msg->Event & 0xFF) {
1851 	case MPI_EVENT_UNIT_ATTENTION:
1852 		mpt_prt(mpt, "Bus: 0x%02x TargetID: 0x%02x\n",
1853 		    (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1854 		break;
1855 
1856 	case MPI_EVENT_IOC_BUS_RESET:
1857 		/* We generated a bus reset */
1858 		mpt_prt(mpt, "IOC Bus Reset Port: %d\n",
1859 		    (msg->Data[0] >> 8) & 0xff);
1860 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
1861 		break;
1862 
1863 	case MPI_EVENT_EXT_BUS_RESET:
1864 		/* Someone else generated a bus reset */
1865 		mpt_prt(mpt, "External Bus Reset Detected\n");
1866 		/*
1867 		 * These replies don't return EventData like the MPI
1868 		 * spec says they do
1869 		 */
1870 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
1871 		break;
1872 
1873 	case MPI_EVENT_RESCAN:
1874 		/*
1875 		 * In general this means a device has been added to the loop.
1876 		 */
1877 		mpt_prt(mpt, "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
1878 /*		xpt_async(AC_FOUND_DEVICE, path, NULL);  */
1879 		break;
1880 
1881 	case MPI_EVENT_LINK_STATUS_CHANGE:
1882 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
1883 		    (msg->Data[1] >> 8) & 0xff,
1884 		    ((msg->Data[0] & 0xff) == 0)?  "Failed" : "Active");
1885 		break;
1886 
1887 	case MPI_EVENT_LOOP_STATE_CHANGE:
1888 		switch ((msg->Data[0] >> 16) & 0xff) {
1889 		case 0x01:
1890 			mpt_prt(mpt,
1891 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
1892 			    "(Loop Initialization)\n",
1893 			    (msg->Data[1] >> 8) & 0xff,
1894 			    (msg->Data[0] >> 8) & 0xff,
1895 			    (msg->Data[0]     ) & 0xff);
1896 			switch ((msg->Data[0] >> 8) & 0xff) {
1897 			case 0xF7:
1898 				if ((msg->Data[0] & 0xff) == 0xF7) {
1899 					mpt_prt(mpt, "Device needs AL_PA\n");
1900 				} else {
1901 					mpt_prt(mpt, "Device %02x doesn't like "
1902 					    "FC performance\n",
1903 					    msg->Data[0] & 0xFF);
1904 				}
1905 				break;
1906 			case 0xF8:
1907 				if ((msg->Data[0] & 0xff) == 0xF7) {
1908 					mpt_prt(mpt, "Device had loop failure "
1909 					    "at its receiver prior to acquiring"
1910 					    " AL_PA\n");
1911 				} else {
1912 					mpt_prt(mpt, "Device %02x detected loop"
1913 					    " failure at its receiver\n",
1914 					    msg->Data[0] & 0xFF);
1915 				}
1916 				break;
1917 			default:
1918 				mpt_prt(mpt, "Device %02x requests that device "
1919 				    "%02x reset itself\n",
1920 				    msg->Data[0] & 0xFF,
1921 				    (msg->Data[0] >> 8) & 0xFF);
1922 				break;
1923 			}
1924 			break;
1925 		case 0x02:
1926 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
1927 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
1928 			    (msg->Data[1] >> 8) & 0xff, /* Port */
1929 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
1930 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
1931 			break;
1932 		case 0x03:
1933 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
1934 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
1935 			    (msg->Data[1] >> 8) & 0xff, /* Port */
1936 			    (msg->Data[0] >> 8) & 0xff, /* Character 3 */
1937 			    (msg->Data[0]     ) & 0xff  /* Character 4 */);
1938 			break;
1939 		default:
1940 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
1941 			    "FC event (%02x %02x %02x)\n",
1942 			    (msg->Data[1] >> 8) & 0xff, /* Port */
1943 			    (msg->Data[0] >> 16) & 0xff, /* Event */
1944 			    (msg->Data[0] >>  8) & 0xff, /* Character 3 */
1945 			    (msg->Data[0]      ) & 0xff  /* Character 4 */);
1946 		}
1947 		break;
1948 
1949 	case MPI_EVENT_LOGOUT:
1950 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
1951 		    (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1952 		break;
1953 	case MPI_EVENT_EVENT_CHANGE:
1954 		mpt_lprt(mpt, MPT_PRT_DEBUG,
1955 		    "mpt_cam_event: MPI_EVENT_EVENT_CHANGE\n");
1956 		break;
1957 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1958 		/*
1959 		 * Devices are attachin'.....
1960 		 */
1961 		mpt_prt(mpt,
1962 		    "mpt_cam_event: MPI_EVENT_SAS_DEVICE_STATUS_CHANGE\n");
1963 		break;
1964 	default:
1965 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
1966 		    msg->Event & 0xFF);
1967 		return (0);
1968 	}
1969 	return (1);
1970 }
1971 
1972 /*
1973  * Reply path for all SCSI I/O requests, called from our
1974  * interrupt handler by extracting our handler index from
1975  * the MsgContext field of the reply from the IOC.
1976  *
1977  * This routine is optimized for the common case of a
1978  * completion without error.  All exception handling is
1979  * offloaded to non-inlined helper routines to minimize
1980  * cache footprint.
1981  */
1982 static int
1983 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
1984     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
1985 {
1986 	MSG_SCSI_IO_REQUEST *scsi_req;
1987 	union ccb *ccb;
1988 
1989 	if (req->state == REQ_STATE_FREE) {
1990 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
1991 		return (TRUE);
1992 	}
1993 
1994 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
1995 	ccb = req->ccb;
1996 	if (ccb == NULL) {
1997 		mpt_prt(mpt, "req %p:%u without CCB (state %#x "
1998 		    "func %#x index %u rf %p)\n", req, req->serno, req->state,
1999 		    scsi_req->Function, req->index, reply_frame);
2000 		mpt_print_scsi_io_request(scsi_req);
2001 		return (TRUE);
2002 	}
2003 
2004 	untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
2005 
2006 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2007 		bus_dmasync_op_t op;
2008 
2009 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2010 			op = BUS_DMASYNC_POSTREAD;
2011 		else
2012 			op = BUS_DMASYNC_POSTWRITE;
2013 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2014 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2015 	}
2016 
2017 	if (reply_frame == NULL) {
2018 		/*
2019 		 * Context only reply, completion without error status.
2020 		 */
2021 		ccb->csio.resid = 0;
2022 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2023 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2024 	} else {
2025 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2026 	}
2027 
2028 	if (mpt->outofbeer) {
2029 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2030 		mpt->outofbeer = 0;
2031 		mpt_lprt(mpt,  MPT_PRT_DEBUG, "THAWQ\n");
2032 	}
2033 	if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH &&
2034 	    scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2035 		struct scsi_inquiry_data *inq;
2036 		/*
2037 		 * Fake out the device type so that only the
2038 		 * pass-thru device will attach.
2039 		 */
2040 		inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2041 		inq->device &= ~0x1F;
2042 		inq->device |= T_NODEVICE;
2043 	}
2044 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2045 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2046 	MPTLOCK_2_CAMLOCK(mpt);
2047 	xpt_done(ccb);
2048 	CAMLOCK_2_MPTLOCK(mpt);
2049 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2050 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2051 	} else {
2052 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2053 		    req, req->serno);
2054 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2055 	}
2056 	if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2057 #ifdef	INVARIANTS
2058 		mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2059 #endif
2060 		mpt_free_request(mpt, req);
2061 		return (TRUE);
2062 	}
2063 	req->state &= ~REQ_STATE_QUEUED;
2064 	req->state |= REQ_STATE_DONE;
2065 	wakeup(req);
2066 	return (TRUE);
2067 }
2068 
2069 static int
2070 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2071     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2072 {
2073 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2074 
2075 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2076 #ifdef	INVARIANTS
2077 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2078 #endif
2079 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2080 	/* Record IOC Status and Response Code of TMF for any waiters. */
2081 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2082 	req->ResponseCode = tmf_reply->ResponseCode;
2083 
2084 	mpt_lprt(mpt, MPT_PRT_INFO, "TMF complete: req %p:%u status 0x%x\n",
2085 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2086 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2087 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2088 		req->state |= REQ_STATE_DONE;
2089 		wakeup(req);
2090 	} else {
2091 		mpt->tmf_req->state = REQ_STATE_FREE;
2092 	}
2093 	return (TRUE);
2094 }
2095 
2096 
2097 /*
2098  * XXX: Move to definitions file
2099  */
2100 #define	ELS	0x22
2101 #define	FC4LS	0x32
2102 #define	ABTS	0x81
2103 #define	BA_ACC	0x84
2104 
2105 #define	LS_RJT	0x01
2106 #define	LS_ACC	0x02
2107 #define	PLOGI	0x03
2108 #define	LOGO	0x05
2109 #define SRR	0x14
2110 #define PRLI	0x20
2111 #define PRLO	0x21
2112 #define ADISC	0x52
2113 #define RSCN	0x61
2114 
2115 static void
2116 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2117     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2118 {
2119 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2120 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2121 
2122 	/*
2123 	 * We are going to reuse the ELS request to send this response back.
2124 	 */
2125 	rsp = &tmp;
2126 	memset(rsp, 0, sizeof(*rsp));
2127 
2128 #ifdef	USE_IMMEDIATE_LINK_DATA
2129 	/*
2130 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2131 	 */
2132 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2133 #endif
2134 	rsp->RspLength = length;
2135 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2136 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2137 
2138 	/*
2139 	 * Copy over information from the original reply frame to
2140 	 * it's correct place in the response.
2141 	 */
2142 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2143 
2144 	/*
2145 	 * And now copy back the temporary area to the original frame.
2146 	 */
2147 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2148 	rsp = req->req_vbuf;
2149 
2150 #ifdef	USE_IMMEDIATE_LINK_DATA
2151 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2152 #else
2153 {
2154 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2155 	bus_addr_t paddr = req->req_pbuf;
2156 	paddr += MPT_RQSL(mpt);
2157 
2158 	se->FlagsLength =
2159 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2160 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2161 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2162 		MPI_SGE_FLAGS_END_OF_LIST	|
2163 		MPI_SGE_FLAGS_END_OF_BUFFER;
2164 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
2165 	se->FlagsLength |= (length);
2166 	se->Address = (uint32_t) paddr;
2167 }
2168 #endif
2169 
2170 	/*
2171 	 * Send it on...
2172 	 */
2173 	mpt_send_cmd(mpt, req);
2174 }
2175 
2176 static int
2177 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2178     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2179 {
2180 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2181 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2182 	U8 rctl;
2183 	U8 type;
2184 	U8 cmd;
2185 	U16 status = le16toh(reply_frame->IOCStatus);
2186 	U32 *elsbuf;
2187 	int ioindex;
2188 	int do_refresh = TRUE;
2189 
2190 #ifdef	INVARIANTS
2191 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2192 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2193 	    req, req->serno, rp->Function));
2194 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2195 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2196 	} else {
2197 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2198 	}
2199 #endif
2200 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2201 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2202 	    req, req->serno, reply_frame, reply_frame->Function);
2203 
2204 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2205 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2206 		    status, reply_frame->Function);
2207 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2208 			/*
2209 			 * XXX: to get around shutdown issue
2210 			 */
2211 			mpt->disabled = 1;
2212 			return (TRUE);
2213 		}
2214 		return (TRUE);
2215 	}
2216 
2217 	/*
2218 	 * If the function of a link service response, we recycle the
2219 	 * response to be a refresh for a new link service request.
2220 	 *
2221 	 * The request pointer is bogus in this case and we have to fetch
2222 	 * it based upon the TransactionContext.
2223 	 */
2224 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2225 		/* Freddie Uncle Charlie Katie */
2226 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2227 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2228 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2229 				break;
2230 			}
2231 
2232 		KASSERT(ioindex < mpt->els_cmds_allocated,
2233 		    ("can't find my mommie!"));
2234 
2235 		/* remove from active list as we're going to re-post it */
2236 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2237 		req->state &= ~REQ_STATE_QUEUED;
2238 		req->state |= REQ_STATE_DONE;
2239 		mpt_fc_post_els(mpt, req, ioindex);
2240 		return (TRUE);
2241 	}
2242 
2243 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2244 		/* remove from active list as we're done */
2245 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2246 		req->state &= ~REQ_STATE_QUEUED;
2247 		req->state |= REQ_STATE_DONE;
2248 		if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2249 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2250 			    "Async Primitive Send Complete\n");
2251 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2252 			mpt_free_request(mpt, req);
2253 		} else {
2254 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2255 			    "Sync Primitive Send Complete\n");
2256 			wakeup(req);
2257 		}
2258 		return (TRUE);
2259 	}
2260 
2261 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2262 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2263 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2264 		    rp->MsgLength, rp->MsgFlags);
2265 		return (TRUE);
2266 	}
2267 
2268 	if (rp->MsgLength <= 5) {
2269 		/*
2270 		 * This is just a ack of an original ELS buffer post
2271 		 */
2272 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2273 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2274 		return (TRUE);
2275 	}
2276 
2277 
2278 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2279 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2280 
2281 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2282 	cmd = be32toh(elsbuf[0]) >> 24;
2283 
2284 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2285 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2286 		return (TRUE);
2287 	}
2288 
2289 	ioindex = le32toh(rp->TransactionContext);
2290 	req = mpt->els_cmd_ptrs[ioindex];
2291 
2292 	if (rctl == ELS && type == 1) {
2293 		switch (cmd) {
2294 		case PRLI:
2295 			/*
2296 			 * Send back a PRLI ACC
2297 			 */
2298 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2299 			    le32toh(rp->Wwn.PortNameHigh),
2300 			    le32toh(rp->Wwn.PortNameLow));
2301 			elsbuf[0] = htobe32(0x02100014);
2302 			elsbuf[1] |= htobe32(0x00000100);
2303 			elsbuf[4] = htobe32(0x00000002);
2304 			if (mpt->role & MPT_ROLE_TARGET)
2305 				elsbuf[4] |= htobe32(0x00000010);
2306 			if (mpt->role & MPT_ROLE_INITIATOR)
2307 				elsbuf[4] |= htobe32(0x00000020);
2308 			/* remove from active list as we're done */
2309 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2310 			req->state &= ~REQ_STATE_QUEUED;
2311 			req->state |= REQ_STATE_DONE;
2312 			mpt_fc_els_send_response(mpt, req, rp, 20);
2313 			do_refresh = FALSE;
2314 			break;
2315 		case PRLO:
2316 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2317 			elsbuf[0] = htobe32(0x02100014);
2318 			elsbuf[1] = htobe32(0x08000100);
2319 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2320 			    le32toh(rp->Wwn.PortNameHigh),
2321 			    le32toh(rp->Wwn.PortNameLow));
2322 			/* remove from active list as we're done */
2323 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2324 			req->state &= ~REQ_STATE_QUEUED;
2325 			req->state |= REQ_STATE_DONE;
2326 			mpt_fc_els_send_response(mpt, req, rp, 20);
2327 			do_refresh = FALSE;
2328 			break;
2329 		default:
2330 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2331 			break;
2332 		}
2333 	} else if (rctl == ABTS && type == 0) {
2334 		uint16_t rx_id = le16toh(rp->Rxid);
2335 		uint16_t ox_id = le16toh(rp->Oxid);
2336 		request_t *tgt_req = NULL;
2337 
2338 		mpt_prt(mpt,
2339 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2340 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2341 		    le32toh(rp->Wwn.PortNameLow));
2342 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2343 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2344 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2345 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2346 		} else {
2347 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2348 		}
2349 		if (tgt_req) {
2350 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2351 			uint8_t *vbuf;
2352 			union ccb *ccb = tgt->ccb;
2353 			uint32_t ct_id;
2354 
2355 			vbuf = tgt_req->req_vbuf;
2356 			vbuf += MPT_RQSL(mpt);
2357 
2358 			/*
2359 			 * Check to make sure we have the correct command
2360 			 * The reply descriptor in the target state should
2361 			 * should contain an IoIndex that should match the
2362 			 * RX_ID.
2363 			 *
2364 			 * It'd be nice to have OX_ID to crosscheck with
2365 			 * as well.
2366 			 */
2367 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2368 
2369 			if (ct_id != rx_id) {
2370 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2371 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2372 				    rx_id, ct_id);
2373 				goto skip;
2374 			}
2375 
2376 			ccb = tgt->ccb;
2377 			if (ccb) {
2378 				mpt_prt(mpt,
2379 				    "CCB (%p): lun %u flags %x status %x\n",
2380 				    ccb, ccb->ccb_h.target_lun,
2381 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2382 			}
2383 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2384 			    "%x nxfers %x\n", tgt->state,
2385 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2386 			    tgt->nxfers);
2387   skip:
2388 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
2389 				mpt_prt(mpt, "unable to start TargetAbort\n");
2390 			}
2391 		} else {
2392 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2393 		}
2394 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2395 		elsbuf[0] = htobe32(0);
2396 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2397 		elsbuf[2] = htobe32(0x000ffff);
2398 		/*
2399 		 * Dork with the reply frame so that the reponse to it
2400 		 * will be correct.
2401 		 */
2402 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2403 		/* remove from active list as we're done */
2404 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2405 		req->state &= ~REQ_STATE_QUEUED;
2406 		req->state |= REQ_STATE_DONE;
2407 		mpt_fc_els_send_response(mpt, req, rp, 12);
2408 		do_refresh = FALSE;
2409 	} else {
2410 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2411 	}
2412 	if (do_refresh == TRUE) {
2413 		/* remove from active list as we're done */
2414 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2415 		req->state &= ~REQ_STATE_QUEUED;
2416 		req->state |= REQ_STATE_DONE;
2417 		mpt_fc_post_els(mpt, req, ioindex);
2418 	}
2419 	return (TRUE);
2420 }
2421 
2422 /*
2423  * Clean up all SCSI Initiator personality state in response
2424  * to a controller reset.
2425  */
2426 static void
2427 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2428 {
2429 	/*
2430 	 * The pending list is already run down by
2431 	 * the generic handler.  Perform the same
2432 	 * operation on the timed out request list.
2433 	 */
2434 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
2435 				   MPI_IOCSTATUS_INVALID_STATE);
2436 
2437 	/*
2438 	 * XXX: We need to repost ELS and Target Command Buffers?
2439 	 */
2440 
2441 	/*
2442 	 * Inform the XPT that a bus reset has occurred.
2443 	 */
2444 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
2445 }
2446 
2447 /*
2448  * Parse additional completion information in the reply
2449  * frame for SCSI I/O requests.
2450  */
2451 static int
2452 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
2453 			     MSG_DEFAULT_REPLY *reply_frame)
2454 {
2455 	union ccb *ccb;
2456 	MSG_SCSI_IO_REPLY *scsi_io_reply;
2457 	u_int ioc_status;
2458 	u_int sstate;
2459 	u_int loginfo;
2460 
2461 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
2462 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
2463 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
2464 		("MPT SCSI I/O Handler called with incorrect reply type"));
2465 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
2466 		("MPT SCSI I/O Handler called with continuation reply"));
2467 
2468 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
2469 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
2470 	loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE;
2471 	ioc_status &= MPI_IOCSTATUS_MASK;
2472 	sstate = scsi_io_reply->SCSIState;
2473 
2474 	ccb = req->ccb;
2475 	ccb->csio.resid =
2476 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
2477 
2478 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
2479 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
2480 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2481 		ccb->csio.sense_resid =
2482 		    ccb->csio.sense_len - scsi_io_reply->SenseCount;
2483 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
2484 		      min(ccb->csio.sense_len, scsi_io_reply->SenseCount));
2485 	}
2486 
2487 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
2488 		/*
2489 		 * Tag messages rejected, but non-tagged retry
2490 		 * was successful.
2491 XXXX
2492 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
2493 		 */
2494 	}
2495 
2496 	switch(ioc_status) {
2497 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2498 		/*
2499 		 * XXX
2500 		 * Linux driver indicates that a zero
2501 		 * transfer length with this error code
2502 		 * indicates a CRC error.
2503 		 *
2504 		 * No need to swap the bytes for checking
2505 		 * against zero.
2506 		 */
2507 		if (scsi_io_reply->TransferCount == 0) {
2508 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2509 			break;
2510 		}
2511 		/* FALLTHROUGH */
2512 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
2513 	case MPI_IOCSTATUS_SUCCESS:
2514 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
2515 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
2516 			/*
2517 			 * Status was never returned for this transaction.
2518 			 */
2519 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
2520 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
2521 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
2522 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
2523 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
2524 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
2525 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
2526 
2527 			/* XXX Handle SPI-Packet and FCP-2 reponse info. */
2528 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2529 		} else
2530 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2531 		break;
2532 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
2533 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
2534 		break;
2535 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
2536 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
2537 		break;
2538 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2539 		/*
2540 		 * Since selection timeouts and "device really not
2541 		 * there" are grouped into this error code, report
2542 		 * selection timeout.  Selection timeouts are
2543 		 * typically retried before giving up on the device
2544 		 * whereas "device not there" errors are considered
2545 		 * unretryable.
2546 		 */
2547 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
2548 		break;
2549 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2550 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
2551 		break;
2552 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
2553 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
2554 		break;
2555 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
2556 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
2557 		break;
2558 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2559 		ccb->ccb_h.status = CAM_UA_TERMIO;
2560 		break;
2561 	case MPI_IOCSTATUS_INVALID_STATE:
2562 		/*
2563 		 * The IOC has been reset.  Emulate a bus reset.
2564 		 */
2565 		/* FALLTHROUGH */
2566 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
2567 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2568 		break;
2569 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
2570 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
2571 		/*
2572 		 * Don't clobber any timeout status that has
2573 		 * already been set for this transaction.  We
2574 		 * want the SCSI layer to be able to differentiate
2575 		 * between the command we aborted due to timeout
2576 		 * and any innocent bystanders.
2577 		 */
2578 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
2579 			break;
2580 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
2581 		break;
2582 
2583 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
2584 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
2585 		break;
2586 	case MPI_IOCSTATUS_BUSY:
2587 		mpt_set_ccb_status(ccb, CAM_BUSY);
2588 		break;
2589 	case MPI_IOCSTATUS_INVALID_FUNCTION:
2590 	case MPI_IOCSTATUS_INVALID_SGL:
2591 	case MPI_IOCSTATUS_INTERNAL_ERROR:
2592 	case MPI_IOCSTATUS_INVALID_FIELD:
2593 	default:
2594 		/* XXX
2595 		 * Some of the above may need to kick
2596 		 * of a recovery action!!!!
2597 		 */
2598 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2599 		break;
2600 	}
2601 
2602 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2603 		mpt_freeze_ccb(ccb);
2604 	}
2605 
2606 	return (TRUE);
2607 }
2608 
2609 static void
2610 mpt_action(struct cam_sim *sim, union ccb *ccb)
2611 {
2612 	struct	mpt_softc *mpt;
2613 	struct	ccb_trans_settings *cts;
2614 	u_int	tgt;
2615 	int	raid_passthru;
2616 
2617 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
2618 
2619 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
2620 	KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action"));
2621 	raid_passthru = (sim == mpt->phydisk_sim);
2622 
2623 	tgt = ccb->ccb_h.target_id;
2624 	if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ &&
2625 	    ccb->ccb_h.func_code != XPT_RESET_BUS) {
2626 		CAMLOCK_2_MPTLOCK(mpt);
2627 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2628 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2629 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2630 			MPTLOCK_2_CAMLOCK(mpt);
2631 			xpt_done(ccb);
2632 			return;
2633 		}
2634 		MPTLOCK_2_CAMLOCK(mpt);
2635 	}
2636 	ccb->ccb_h.ccb_mpt_ptr = mpt;
2637 
2638 	switch (ccb->ccb_h.func_code) {
2639 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2640 		/*
2641 		 * Do a couple of preliminary checks...
2642 		 */
2643 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2644 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2645 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2646 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2647 				xpt_done(ccb);
2648 				break;
2649 			}
2650 		}
2651 		/* Max supported CDB length is 16 bytes */
2652 		/* XXX Unless we implement the new 32byte message type */
2653 		if (ccb->csio.cdb_len >
2654 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
2655 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2656 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2657 			xpt_done(ccb);
2658 			return;
2659 		}
2660 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2661 		mpt_start(sim, ccb);
2662 		break;
2663 
2664 	case XPT_RESET_BUS:
2665 		mpt_lprt(mpt, MPT_PRT_DEBUG, "XPT_RESET_BUS\n");
2666 		if (!raid_passthru) {
2667 			CAMLOCK_2_MPTLOCK(mpt);
2668 			(void)mpt_bus_reset(mpt, /*sleep_ok*/FALSE);
2669 			MPTLOCK_2_CAMLOCK(mpt);
2670 		}
2671 		/*
2672 		 * mpt_bus_reset is always successful in that it
2673 		 * will fall back to a hard reset should a bus
2674 		 * reset attempt fail.
2675 		 */
2676 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2677 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2678 		xpt_done(ccb);
2679 		break;
2680 
2681 	case XPT_ABORT:
2682 	{
2683 		union ccb *accb = ccb->cab.abort_ccb;
2684 		CAMLOCK_2_MPTLOCK(mpt);
2685 		switch (accb->ccb_h.func_code) {
2686 		case XPT_ACCEPT_TARGET_IO:
2687 		case XPT_IMMED_NOTIFY:
2688 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
2689 			break;
2690 		case XPT_CONT_TARGET_IO:
2691 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
2692 			ccb->ccb_h.status = CAM_UA_ABORT;
2693 			break;
2694 		case XPT_SCSI_IO:
2695 			ccb->ccb_h.status = CAM_UA_ABORT;
2696 			break;
2697 		default:
2698 			ccb->ccb_h.status = CAM_REQ_INVALID;
2699 			break;
2700 		}
2701 		MPTLOCK_2_CAMLOCK(mpt);
2702 		xpt_done(ccb);
2703 		break;
2704 	}
2705 
2706 #ifdef	CAM_NEW_TRAN_CODE
2707 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2708 #else
2709 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2710 #endif
2711 #define	DP_DISC_ENABLE	0x1
2712 #define	DP_DISC_DISABL	0x2
2713 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
2714 
2715 #define	DP_TQING_ENABLE	0x4
2716 #define	DP_TQING_DISABL	0x8
2717 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
2718 
2719 #define	DP_WIDE		0x10
2720 #define	DP_NARROW	0x20
2721 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
2722 
2723 #define	DP_SYNC		0x40
2724 
2725 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2726 	{
2727 #ifdef	CAM_NEW_TRAN_CODE
2728 		struct ccb_trans_settings_scsi *scsi;
2729 		struct ccb_trans_settings_spi *spi;
2730 #endif
2731 		uint8_t dval;
2732 		u_int period;
2733 		u_int offset;
2734 		int m;
2735 
2736 		cts = &ccb->cts;
2737 		if (!IS_CURRENT_SETTINGS(cts)) {
2738 			mpt_prt(mpt, "Attempt to set User settings\n");
2739 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2740 			xpt_done(ccb);
2741 			break;
2742 		}
2743 		if (mpt->is_fc || mpt->is_sas) {
2744 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2745 			xpt_done(ccb);
2746 			break;
2747 		}
2748 
2749 		m = mpt->mpt_port_page2.PortSettings;
2750 		if ((m & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS) ==
2751 		    MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
2752 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2753 			xpt_done(ccb);
2754 			break;
2755 		}
2756 
2757 		dval = 0;
2758 		period = 0;
2759 		offset = 0;
2760 
2761 #ifndef	CAM_NEW_TRAN_CODE
2762 		if (cts->valid & CCB_TRANS_DISC_VALID) {
2763 			dval |= DP_DISC_ENABLE;
2764 		}
2765 		if (cts->valid & CCB_TRANS_TQ_VALID) {
2766 			dval |= DP_TQING_ENABLE;
2767 		}
2768 		if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2769 			if (cts->bus_width)
2770 				dval |= DP_WIDE;
2771 			else
2772 				dval |= DP_NARROW;
2773 		}
2774 		/*
2775 		 * Any SYNC RATE of nonzero and SYNC_OFFSET
2776 		 * of nonzero will cause us to go to the
2777 		 * selected (from NVRAM) maximum value for
2778 		 * this device. At a later point, we'll
2779 		 * allow finer control.
2780 		 */
2781 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2782 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
2783 			dval |= DP_SYNC;
2784 			period = cts->sync_period;
2785 			offset = cts->sync_offset;
2786 		}
2787 #else
2788 		scsi = &cts->proto_specific.scsi;
2789 		spi = &cts->xport_specific.spi;
2790 
2791 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2792 			if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2793 				dval |= DP_DISC_ENABLE;
2794 			else
2795 				dval |= DP_DISC_DISABL;
2796 		}
2797 
2798 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2799 			if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2800 				dval |= DP_TQING_ENABLE;
2801 			else
2802 				dval |= DP_TQING_DISABL;
2803 		}
2804 
2805 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2806 			if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2807 				dval |= DP_WIDE;
2808 			else
2809 				dval |= DP_NARROW;
2810 		}
2811 
2812 		if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2813 		    (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
2814 		    (spi->sync_period && spi->sync_offset)) {
2815 			dval |= DP_SYNC;
2816 			period = spi->sync_period;
2817 			offset = spi->sync_offset;
2818 		}
2819 #endif
2820 		CAMLOCK_2_MPTLOCK(mpt);
2821 		if (dval & DP_DISC_ENABLE) {
2822 			mpt->mpt_disc_enable |= (1 << tgt);
2823 		} else if (dval & DP_DISC_DISABL) {
2824 			mpt->mpt_disc_enable &= ~(1 << tgt);
2825 		}
2826 		if (dval & DP_TQING_ENABLE) {
2827 			mpt->mpt_tag_enable |= (1 << tgt);
2828 		} else if (dval & DP_TQING_DISABL) {
2829 			mpt->mpt_tag_enable &= ~(1 << tgt);
2830 		}
2831 		if (dval & DP_WIDTH) {
2832 			mpt_setwidth(mpt, tgt, 1);
2833 		}
2834 		if (dval & DP_SYNC) {
2835 			mpt_setsync(mpt, tgt, period, offset);
2836 		}
2837 		MPTLOCK_2_CAMLOCK(mpt);
2838 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2839 		    "SET tgt %d flags %x period %x off %x\n",
2840 		    tgt, dval, period, offset);
2841 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2842 		xpt_done(ccb);
2843 		break;
2844 	}
2845 	case XPT_GET_TRAN_SETTINGS:
2846 		cts = &ccb->cts;
2847 		if (mpt->is_fc) {
2848 #ifndef	CAM_NEW_TRAN_CODE
2849 			/*
2850 			 * a lot of normal SCSI things don't make sense.
2851 			 */
2852 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2853 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2854 			/*
2855 			 * How do you measure the width of a high
2856 			 * speed serial bus? Well, in bytes.
2857 			 *
2858 			 * Offset and period make no sense, though, so we set
2859 			 * (above) a 'base' transfer speed to be gigabit.
2860 			 */
2861 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2862 #else
2863 			struct ccb_trans_settings_fc *fc =
2864 			    &cts->xport_specific.fc;
2865 
2866 			cts->protocol = PROTO_SCSI;
2867 			cts->protocol_version = SCSI_REV_2;
2868 			cts->transport = XPORT_FC;
2869 			cts->transport_version = 0;
2870 
2871 			fc->valid = CTS_FC_VALID_SPEED;
2872 			fc->bitrate = 100000;	/* XXX: Need for 2Gb/s */
2873 			/* XXX: need a port database for each target */
2874 #endif
2875 		} else if (mpt->is_sas) {
2876 #ifndef	CAM_NEW_TRAN_CODE
2877 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2878 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2879 			/*
2880 			 * How do you measure the width of a high
2881 			 * speed serial bus? Well, in bytes.
2882 			 *
2883 			 * Offset and period make no sense, though, so we set
2884 			 * (above) a 'base' transfer speed to be gigabit.
2885 			 */
2886 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2887 #else
2888 			struct ccb_trans_settings_sas *sas =
2889 			    &cts->xport_specific.sas;
2890 
2891 			cts->protocol = PROTO_SCSI;
2892 			cts->protocol_version = SCSI_REV_3;
2893 			cts->transport = XPORT_SAS;
2894 			cts->transport_version = 0;
2895 
2896 			sas->valid = CTS_SAS_VALID_SPEED;
2897 			sas->bitrate = 300000;	/* XXX: Default 3Gbps */
2898 #endif
2899 		} else {
2900 			if (mpt_get_spi_settings(mpt, cts) != 0) {
2901 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2902 				xpt_done(ccb);
2903 				return;
2904 			}
2905 		}
2906 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2907 		xpt_done(ccb);
2908 		break;
2909 
2910 	case XPT_CALC_GEOMETRY:
2911 	{
2912 		struct ccb_calc_geometry *ccg;
2913 
2914 		ccg = &ccb->ccg;
2915 		if (ccg->block_size == 0) {
2916 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2917 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
2918 			xpt_done(ccb);
2919 			break;
2920 		}
2921 		mpt_calc_geometry(ccg, /*extended*/1);
2922 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2923 		xpt_done(ccb);
2924 		break;
2925 	}
2926 	case XPT_PATH_INQ:		/* Path routing inquiry */
2927 	{
2928 		struct ccb_pathinq *cpi = &ccb->cpi;
2929 
2930 		cpi->version_num = 1;
2931 		cpi->target_sprt = 0;
2932 		cpi->hba_eng_cnt = 0;
2933 		cpi->max_lun = 7;
2934 		cpi->bus_id = cam_sim_bus(sim);
2935 		/* XXX Report base speed more accurately for FC/SAS, etc.*/
2936 		if (raid_passthru) {
2937 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks;
2938 			cpi->hba_misc = PIM_NOBUSRESET;
2939 			cpi->initiator_id = cpi->max_target + 1;
2940 			cpi->hba_inquiry = PI_TAG_ABLE;
2941 			if (mpt->is_fc) {
2942 				cpi->base_transfer_speed = 100000;
2943 			} else if (mpt->is_sas) {
2944 				cpi->base_transfer_speed = 300000;
2945 			} else {
2946 				cpi->base_transfer_speed = 3300;
2947 				cpi->hba_inquiry |=
2948 				    PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2949 			}
2950 		} else if (mpt->is_fc) {
2951 			/* XXX SHOULD BE BASED UPON IOC FACTS XXX XXX */
2952 			cpi->max_target = 255;
2953 			cpi->hba_misc = PIM_NOBUSRESET;
2954 			cpi->initiator_id = mpt->mpt_ini_id;
2955 			cpi->base_transfer_speed = 100000;
2956 			cpi->hba_inquiry = PI_TAG_ABLE;
2957 		} else if (mpt->is_sas) {
2958 			cpi->max_target = 63;	/* XXX */
2959 			cpi->hba_misc = PIM_NOBUSRESET;
2960 			cpi->initiator_id = mpt->mpt_ini_id;
2961 			cpi->base_transfer_speed = 300000;
2962 			cpi->hba_inquiry = PI_TAG_ABLE;
2963 		} else {
2964 			cpi->initiator_id = mpt->mpt_ini_id;
2965 			cpi->base_transfer_speed = 3300;
2966 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2967 			cpi->hba_misc = 0;
2968 			cpi->max_target = 15;
2969 		}
2970 
2971 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
2972 			cpi->hba_misc |= PIM_NOINITIATOR;
2973 		}
2974 		if ((mpt->role & MPT_ROLE_TARGET) != 0) {
2975 			cpi->target_sprt =
2976 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2977 		} else {
2978 			cpi->target_sprt = 0;
2979 		}
2980 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2981 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
2982 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2983 		cpi->unit_number = cam_sim_unit(sim);
2984 		cpi->ccb_h.status = CAM_REQ_CMP;
2985 		xpt_done(ccb);
2986 		break;
2987 	}
2988 	case XPT_EN_LUN:		/* Enable LUN as a target */
2989 	{
2990 		int result;
2991 
2992 		CAMLOCK_2_MPTLOCK(mpt);
2993 		if (ccb->cel.enable)
2994 			result = mpt_enable_lun(mpt,
2995 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2996 		else
2997 			result = mpt_disable_lun(mpt,
2998 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2999 		MPTLOCK_2_CAMLOCK(mpt);
3000 		if (result == 0) {
3001 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3002 		} else {
3003 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3004 		}
3005 		xpt_done(ccb);
3006 		break;
3007 	}
3008 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3009 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3010 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3011 	{
3012 		tgt_resource_t *trtp;
3013 		lun_id_t lun = ccb->ccb_h.target_lun;
3014 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3015 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3016 		ccb->ccb_h.flags = 0;
3017 
3018 		if (lun == CAM_LUN_WILDCARD) {
3019 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3020 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3021 				xpt_done(ccb);
3022 				break;
3023 			}
3024 			trtp = &mpt->trt_wildcard;
3025 		} else if (lun >= MPT_MAX_LUNS) {
3026 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3027 			xpt_done(ccb);
3028 			break;
3029 		} else {
3030 			trtp = &mpt->trt[lun];
3031 		}
3032 		CAMLOCK_2_MPTLOCK(mpt);
3033 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3034 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3035 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3036 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3037 			    sim_links.stqe);
3038 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3039 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3040 			    "Put FREE INOT lun %d\n", lun);
3041 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3042 			    sim_links.stqe);
3043 		} else {
3044 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3045 		}
3046 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3047 		MPTLOCK_2_CAMLOCK(mpt);
3048 		break;
3049 	}
3050 	case XPT_CONT_TARGET_IO:
3051 		CAMLOCK_2_MPTLOCK(mpt);
3052 		mpt_target_start_io(mpt, ccb);
3053 		MPTLOCK_2_CAMLOCK(mpt);
3054 		break;
3055 	default:
3056 		ccb->ccb_h.status = CAM_REQ_INVALID;
3057 		xpt_done(ccb);
3058 		break;
3059 	}
3060 }
3061 
3062 static int
3063 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3064 {
3065 #ifdef	CAM_NEW_TRAN_CODE
3066 	struct ccb_trans_settings_scsi *scsi =
3067 	    &cts->proto_specific.scsi;
3068 	struct ccb_trans_settings_spi *spi =
3069 	    &cts->xport_specific.spi;
3070 #endif
3071 	int tgt;
3072 	uint8_t dval, pval, oval;
3073 	int rv;
3074 
3075 
3076 	tgt = cts->ccb_h.target_id;
3077 
3078 	/*
3079 	 * We aren't going off of Port PAGE2 params for
3080 	 * tagged queuing or disconnect capabilities
3081 	 * for current settings. For goal settings,
3082 	 * we assert all capabilities- we've had some
3083 	 * problems with reading NVRAM data.
3084 	 */
3085 	if (IS_CURRENT_SETTINGS(cts)) {
3086 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3087 		dval = 0;
3088 
3089 		CAMLOCK_2_MPTLOCK(mpt);
3090 		tmp = mpt->mpt_dev_page0[tgt];
3091 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3092 		    sizeof(tmp), FALSE, 5000);
3093 		if (rv) {
3094 			MPTLOCK_2_CAMLOCK(mpt);
3095 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3096 			return (rv);
3097 		}
3098 		MPTLOCK_2_CAMLOCK(mpt);
3099 
3100 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3101 		    "mpt_get_spi: SPI Tgt %d Page 0: NParms %x Info %x\n",
3102 		    tgt, tmp.NegotiatedParameters, tmp.Information);
3103 		if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) {
3104 			dval |= DP_WIDE;
3105 		}
3106 		if (mpt->mpt_disc_enable & (1 << tgt)) {
3107 			dval |= DP_DISC_ENABLE;
3108 		}
3109 		if (mpt->mpt_tag_enable & (1 << tgt)) {
3110 			dval |= DP_TQING_ENABLE;
3111 		}
3112 		oval = (tmp.NegotiatedParameters >> 16) & 0xff;
3113 		pval = (tmp.NegotiatedParameters >>  8) & 0xff;
3114 	} else {
3115 		/*
3116 		 * XXX: Fix wrt NVRAM someday. Attempts
3117 		 * XXX: to read port page2 device data
3118 		 * XXX: just returns zero in these areas.
3119 		 */
3120 		dval = DP_WIDE|DP_DISC|DP_TQING;
3121 		oval = (mpt->mpt_port_page0.Capabilities >> 16);
3122 		pval = (mpt->mpt_port_page0.Capabilities >>  8);
3123 	}
3124 #ifndef	CAM_NEW_TRAN_CODE
3125 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3126 	if (dval & DP_DISC_ENABLE) {
3127 		cts->flags |= CCB_TRANS_DISC_ENB;
3128 	}
3129 	if (dval & DP_TQING_ENABLE) {
3130 		cts->flags |= CCB_TRANS_TAG_ENB;
3131 	}
3132 	if (dval & DP_WIDE) {
3133 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3134 	} else {
3135 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3136 	}
3137 	cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
3138 	    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3139 	if (oval) {
3140 		cts->sync_period = pval;
3141 		cts->sync_offset = oval;
3142 		cts->valid |=
3143 		    CCB_TRANS_SYNC_RATE_VALID | CCB_TRANS_SYNC_OFFSET_VALID;
3144 	}
3145 #else
3146 	cts->protocol = PROTO_SCSI;
3147 	cts->protocol_version = SCSI_REV_2;
3148 	cts->transport = XPORT_SPI;
3149 	cts->transport_version = 2;
3150 
3151 	scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
3152 	spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
3153 	if (dval & DP_DISC_ENABLE) {
3154 		spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3155 	}
3156 	if (dval & DP_TQING_ENABLE) {
3157 		scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3158 	}
3159 	if (oval && pval) {
3160 		spi->sync_offset = oval;
3161 		spi->sync_period = pval;
3162 		spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3163 		spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3164 	}
3165 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3166 	if (dval & DP_WIDE) {
3167 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3168 	} else {
3169 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3170 	}
3171 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3172 		scsi->valid = CTS_SCSI_VALID_TQ;
3173 		spi->valid |= CTS_SPI_VALID_DISC;
3174 	} else {
3175 		scsi->valid = 0;
3176 	}
3177 #endif
3178 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3179 	    "mpt_get_spi: tgt %d %s settings flags %x period %x offset %x\n",
3180 	    tgt, IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
3181 	    dval, pval, oval);
3182 	return (0);
3183 }
3184 
3185 static void
3186 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3187 {
3188 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3189 
3190 	tmp = &mpt->mpt_dev_page1[tgt];
3191 	if (onoff) {
3192 		tmp->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3193 	} else {
3194 		tmp->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3195 	}
3196 }
3197 
3198 static void
3199 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3200 {
3201 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3202 
3203 	tmp = &mpt->mpt_dev_page1[tgt];
3204 	tmp->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3205 	tmp->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3206 	tmp->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3207 	tmp->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3208 	tmp->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3209 
3210 	/*
3211 	 * XXX: For now, we're ignoring specific settings
3212 	 */
3213 	if (period && offset) {
3214 		int factor, offset, np;
3215 		factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
3216 		offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
3217 		np = 0;
3218 		if (factor < 0x9) {
3219 			np |= MPI_SCSIDEVPAGE1_RP_QAS;
3220 			np |= MPI_SCSIDEVPAGE1_RP_IU;
3221 		}
3222 		if (factor < 0xa) {
3223 			np |= MPI_SCSIDEVPAGE1_RP_DT;
3224 		}
3225 		np |= (factor << 8) | (offset << 16);
3226 		tmp->RequestedParameters |= np;
3227 	}
3228 }
3229 
3230 static int
3231 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3232 {
3233 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3234 	int rv;
3235 
3236 	tmp = mpt->mpt_dev_page1[tgt];
3237 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3238 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3239 	if (rv) {
3240 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3241 		return (-1);
3242 	}
3243 	rv = mpt_read_cur_cfg_page(mpt, tgt,
3244 	    &tmp.Header, sizeof(tmp), FALSE, 500);
3245 	if (rv) {
3246 		mpt_prt(mpt, "mpt_update_spi_config: read cur page failed\n");
3247 		return (-1);
3248 	}
3249 	mpt->mpt_dev_page1[tgt] = tmp;
3250 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3251 	    "mpt_update_spi_config[%d]: Page 1: RParams %x Config %x\n", tgt,
3252 	    mpt->mpt_dev_page1[tgt].RequestedParameters,
3253 	    mpt->mpt_dev_page1[tgt].Configuration);
3254 	return (0);
3255 }
3256 
3257 static void
3258 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3259 {
3260 #if __FreeBSD_version >= 500000
3261 	cam_calc_geometry(ccg, extended);
3262 #else
3263 	uint32_t size_mb;
3264 	uint32_t secs_per_cylinder;
3265 
3266 	if (ccg->block_size == 0) {
3267 		ccg->ccb_h.status = CAM_REQ_INVALID;
3268 		return;
3269 	}
3270 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3271 	if (size_mb > 1024 && extended) {
3272 		ccg->heads = 255;
3273 		ccg->secs_per_track = 63;
3274 	} else {
3275 		ccg->heads = 64;
3276 		ccg->secs_per_track = 32;
3277 	}
3278 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3279 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3280 	ccg->ccb_h.status = CAM_REQ_CMP;
3281 #endif
3282 }
3283 
3284 /****************************** Timeout Recovery ******************************/
3285 static int
3286 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3287 {
3288 	int error;
3289 
3290 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3291 	    &mpt->recovery_thread, /*flags*/0,
3292 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3293 	return (error);
3294 }
3295 
3296 static void
3297 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3298 {
3299 	if (mpt->recovery_thread == NULL) {
3300 		return;
3301 	}
3302 	mpt->shutdwn_recovery = 1;
3303 	wakeup(mpt);
3304 	/*
3305 	 * Sleep on a slightly different location
3306 	 * for this interlock just for added safety.
3307 	 */
3308 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3309 }
3310 
3311 static void
3312 mpt_recovery_thread(void *arg)
3313 {
3314 	struct mpt_softc *mpt;
3315 
3316 #if __FreeBSD_version >= 500000
3317 	mtx_lock(&Giant);
3318 #endif
3319 	mpt = (struct mpt_softc *)arg;
3320 	MPT_LOCK(mpt);
3321 	for (;;) {
3322 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3323 			if (mpt->shutdwn_recovery == 0) {
3324 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3325 			}
3326 		}
3327 		if (mpt->shutdwn_recovery != 0) {
3328 			break;
3329 		}
3330 		mpt_recover_commands(mpt);
3331 	}
3332 	mpt->recovery_thread = NULL;
3333 	wakeup(&mpt->recovery_thread);
3334 	MPT_UNLOCK(mpt);
3335 #if __FreeBSD_version >= 500000
3336 	mtx_unlock(&Giant);
3337 #endif
3338 	kthread_exit(0);
3339 }
3340 
3341 static int
3342 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3343     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3344 {
3345 	MSG_SCSI_TASK_MGMT *tmf_req;
3346 	int		    error;
3347 
3348 	/*
3349 	 * Wait for any current TMF request to complete.
3350 	 * We're only allowed to issue one TMF at a time.
3351 	 */
3352 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3353 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3354 	if (error != 0) {
3355 		mpt_reset(mpt, TRUE);
3356 		return (ETIMEDOUT);
3357 	}
3358 
3359 	mpt_assign_serno(mpt, mpt->tmf_req);
3360 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3361 
3362 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3363 	memset(tmf_req, 0, sizeof(*tmf_req));
3364 	tmf_req->TargetID = target;
3365 	tmf_req->Bus = channel;
3366 	tmf_req->ChainOffset = 0;
3367 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3368 	tmf_req->Reserved = 0;
3369 	tmf_req->TaskType = type;
3370 	tmf_req->Reserved1 = 0;
3371 	tmf_req->MsgFlags = flags;
3372 	tmf_req->MsgContext =
3373 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3374 	memset(&tmf_req->LUN, 0,
3375 	    sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2));
3376 	if (lun > 256) {
3377 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3378 		tmf_req->LUN[1] = lun & 0xff;
3379 	} else {
3380 		tmf_req->LUN[1] = lun;
3381 	}
3382 	tmf_req->TaskMsgContext = abort_ctx;
3383 
3384 	mpt_lprt(mpt, MPT_PRT_INFO,
3385 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3386 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3387 	if (mpt->verbose > MPT_PRT_DEBUG) {
3388 		mpt_print_request(tmf_req);
3389 	}
3390 
3391 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3392 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3393 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3394 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3395 	if (error != MPT_OK) {
3396 		mpt_reset(mpt, TRUE);
3397 	}
3398 	return (error);
3399 }
3400 
3401 /*
3402  * When a command times out, it is placed on the requeust_timeout_list
3403  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3404  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3405  * the timedout transactions.  The next TMF is issued either by the
3406  * completion handler of the current TMF waking our recovery thread,
3407  * or the TMF timeout handler causing a hard reset sequence.
3408  */
3409 static void
3410 mpt_recover_commands(struct mpt_softc *mpt)
3411 {
3412 	request_t	   *req;
3413 	union ccb	   *ccb;
3414 	int		    error;
3415 
3416 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3417 		/*
3418 		 * No work to do- leave.
3419 		 */
3420 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3421 		return;
3422 	}
3423 
3424 	/*
3425 	 * Flush any commands whose completion coincides with their timeout.
3426 	 */
3427 	mpt_intr(mpt);
3428 
3429 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3430 		/*
3431 		 * The timedout commands have already
3432 		 * completed.  This typically means
3433 		 * that either the timeout value was on
3434 		 * the hairy edge of what the device
3435 		 * requires or - more likely - interrupts
3436 		 * are not happening.
3437 		 */
3438 		mpt_prt(mpt, "Timedout requests already complete. "
3439 		    "Interrupts may not be functioning.\n");
3440 		mpt_enable_ints(mpt);
3441 		return;
3442 	}
3443 
3444 	/*
3445 	 * We have no visibility into the current state of the
3446 	 * controller, so attempt to abort the commands in the
3447 	 * order they timed-out. For initiator commands, we
3448 	 * depend on the reply handler pulling requests off
3449 	 * the timeout list.
3450 	 */
3451 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3452 		uint16_t status;
3453 		uint8_t response;
3454 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3455 
3456 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3457 		    req, req->serno, hdrp->Function);
3458 		ccb = req->ccb;
3459 		if (ccb == NULL) {
3460 			mpt_prt(mpt, "null ccb in timed out request. "
3461 			    "Resetting Controller.\n");
3462 			mpt_reset(mpt, TRUE);
3463 			continue;
3464 		}
3465 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
3466 
3467 		/*
3468 		 * Check to see if this is not an initiator command and
3469 		 * deal with it differently if it is.
3470 		 */
3471 		switch (hdrp->Function) {
3472 		case MPI_FUNCTION_SCSI_IO_REQUEST:
3473 			break;
3474 		default:
3475 			/*
3476 			 * XXX: FIX ME: need to abort target assists...
3477 			 */
3478 			mpt_prt(mpt, "just putting it back on the pend q\n");
3479 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
3480 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
3481 			    links);
3482 			continue;
3483 		}
3484 
3485 		error = mpt_scsi_send_tmf(mpt,
3486 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3487 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
3488 		    htole32(req->index | scsi_io_handler_id), TRUE);
3489 
3490 		if (error != 0) {
3491 			/*
3492 			 * mpt_scsi_send_tmf hard resets on failure, so no
3493 			 * need to do so here.  Our queue should be emptied
3494 			 * by the hard reset.
3495 			 */
3496 			continue;
3497 		}
3498 
3499 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
3500 		    REQ_STATE_DONE, TRUE, 500);
3501 
3502 		status = mpt->tmf_req->IOCStatus;
3503 		response = mpt->tmf_req->ResponseCode;
3504 		mpt->tmf_req->state = REQ_STATE_FREE;
3505 
3506 		if (error != 0) {
3507 			/*
3508 			 * If we've errored out,, reset the controller.
3509 			 */
3510 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
3511 			    "Resetting controller\n");
3512 			mpt_reset(mpt, TRUE);
3513 			continue;
3514 		}
3515 
3516 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
3517 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
3518 			    "Resetting controller.\n", status);
3519 			mpt_reset(mpt, TRUE);
3520 			continue;
3521 		}
3522 
3523 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
3524 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
3525 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
3526 			    "Resetting controller.\n", response);
3527 			mpt_reset(mpt, TRUE);
3528 			continue;
3529 		}
3530 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
3531 	}
3532 }
3533 
3534 /************************ Target Mode Support ****************************/
3535 static void
3536 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
3537 {
3538 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
3539 	PTR_SGE_TRANSACTION32 tep;
3540 	PTR_SGE_SIMPLE32 se;
3541 	bus_addr_t paddr;
3542 
3543 	paddr = req->req_pbuf;
3544 	paddr += MPT_RQSL(mpt);
3545 
3546 	fc = req->req_vbuf;
3547 	memset(fc, 0, MPT_REQUEST_AREA);
3548 	fc->BufferCount = 1;
3549 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
3550 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
3551 
3552 	/*
3553 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
3554 	 * consist of a TE SGL element (with details length of zero)
3555 	 * followe by a SIMPLE SGL element which holds the address
3556 	 * of the buffer.
3557 	 */
3558 
3559 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
3560 
3561 	tep->ContextSize = 4;
3562 	tep->Flags = 0;
3563 	tep->TransactionContext[0] = htole32(ioindex);
3564 
3565 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
3566 	se->FlagsLength =
3567 		MPI_SGE_FLAGS_HOST_TO_IOC	|
3568 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
3569 		MPI_SGE_FLAGS_LAST_ELEMENT	|
3570 		MPI_SGE_FLAGS_END_OF_LIST	|
3571 		MPI_SGE_FLAGS_END_OF_BUFFER;
3572 	se->FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
3573 	se->FlagsLength |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
3574 	se->Address = (uint32_t) paddr;
3575 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3576 	    "add ELS index %d ioindex %d for %p:%u\n",
3577 	    req->index, ioindex, req, req->serno);
3578 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
3579 	    ("mpt_fc_post_els: request not locked"));
3580 	mpt_send_cmd(mpt, req);
3581 }
3582 
3583 static void
3584 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
3585 {
3586 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
3587 	PTR_CMD_BUFFER_DESCRIPTOR cb;
3588 	bus_addr_t paddr;
3589 
3590 	paddr = req->req_pbuf;
3591 	paddr += MPT_RQSL(mpt);
3592 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
3593 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
3594 
3595 	fc = req->req_vbuf;
3596 	fc->BufferCount = 1;
3597 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
3598 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3599 
3600 	cb = &fc->Buffer[0];
3601 	cb->IoIndex = htole16(ioindex);
3602 	cb->u.PhysicalAddress32 = (U32) paddr;
3603 
3604 	mpt_check_doorbell(mpt);
3605 	mpt_send_cmd(mpt, req);
3606 }
3607 
3608 static int
3609 mpt_add_els_buffers(struct mpt_softc *mpt)
3610 {
3611 	int i;
3612 
3613 	if (mpt->is_fc == 0) {
3614 		return (TRUE);
3615 	}
3616 
3617 	if (mpt->els_cmds_allocated) {
3618 		return (TRUE);
3619 	}
3620 
3621 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
3622 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3623 
3624 	if (mpt->els_cmd_ptrs == NULL) {
3625 		return (FALSE);
3626 	}
3627 
3628 	/*
3629 	 * Feed the chip some ELS buffer resources
3630 	 */
3631 	for (i = 0; i < MPT_MAX_ELS; i++) {
3632 		request_t *req = mpt_get_request(mpt, FALSE);
3633 		if (req == NULL) {
3634 			break;
3635 		}
3636 		req->state |= REQ_STATE_LOCKED;
3637 		mpt->els_cmd_ptrs[i] = req;
3638 		mpt_fc_post_els(mpt, req, i);
3639 	}
3640 
3641 	if (i == 0) {
3642 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
3643 		free(mpt->els_cmd_ptrs, M_DEVBUF);
3644 		mpt->els_cmd_ptrs = NULL;
3645 		return (FALSE);
3646 	}
3647 	if (i != MPT_MAX_ELS) {
3648 		mpt_lprt(mpt, MPT_PRT_INFO,
3649 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
3650 	}
3651 	mpt->els_cmds_allocated = i;
3652 	return(TRUE);
3653 }
3654 
3655 static int
3656 mpt_add_target_commands(struct mpt_softc *mpt)
3657 {
3658 	int i, max;
3659 
3660 	if (mpt->tgt_cmd_ptrs) {
3661 		return (TRUE);
3662 	}
3663 
3664 	max = MPT_MAX_REQUESTS(mpt) >> 1;
3665 	if (max > mpt->mpt_max_tgtcmds) {
3666 		max = mpt->mpt_max_tgtcmds;
3667 	}
3668 	mpt->tgt_cmd_ptrs =
3669 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
3670 	if (mpt->tgt_cmd_ptrs == NULL) {
3671 		mpt_prt(mpt,
3672 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
3673 		return (FALSE);
3674 	}
3675 
3676 	for (i = 0; i < max; i++) {
3677 		request_t *req;
3678 
3679 		req = mpt_get_request(mpt, FALSE);
3680 		if (req == NULL) {
3681 			break;
3682 		}
3683 		req->state |= REQ_STATE_LOCKED;
3684 		mpt->tgt_cmd_ptrs[i] = req;
3685 		mpt_post_target_command(mpt, req, i);
3686 	}
3687 
3688 
3689 	if (i == 0) {
3690 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
3691 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
3692 		mpt->tgt_cmd_ptrs = NULL;
3693 		return (FALSE);
3694 	}
3695 
3696 	mpt->tgt_cmds_allocated = i;
3697 
3698 	if (i < max) {
3699 		mpt_lprt(mpt, MPT_PRT_INFO,
3700 		    "added %d of %d target bufs\n", i, max);
3701 	}
3702 	return (i);
3703 }
3704 
3705 static void
3706 mpt_free_els_buffers(struct mpt_softc *mpt)
3707 {
3708 	mpt_prt(mpt, "fix me! need to implement mpt_free_els_buffers");
3709 }
3710 
3711 static void
3712 mpt_free_target_commands(struct mpt_softc *mpt)
3713 {
3714 	mpt_prt(mpt, "fix me! need to implement mpt_free_target_commands");
3715 }
3716 
3717 
3718 static int
3719 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3720 {
3721 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3722 		mpt->twildcard = 1;
3723 	} else if (lun >= MPT_MAX_LUNS) {
3724 		return (EINVAL);
3725 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3726 		return (EINVAL);
3727 	}
3728 	if (mpt->tenabled == 0) {
3729 		/*
3730 		 * Try to add some target command resources
3731 		 */
3732 		if (mpt_add_target_commands(mpt) == FALSE) {
3733 			mpt_free_els_buffers(mpt);
3734 			return (ENOMEM);
3735 		}
3736 		if (mpt->is_fc) {
3737 			(void) mpt_fc_reset_link(mpt, 0);
3738 		}
3739 		mpt->tenabled = 1;
3740 	}
3741 	if (lun == CAM_LUN_WILDCARD) {
3742 		mpt->trt_wildcard.enabled = 1;
3743 	} else {
3744 		mpt->trt[lun].enabled = 1;
3745 	}
3746 	return (0);
3747 }
3748 
3749 static int
3750 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
3751 {
3752 	int i;
3753 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
3754 		mpt->twildcard = 0;
3755 	} else if (lun >= MPT_MAX_LUNS) {
3756 		return (EINVAL);
3757 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
3758 		return (EINVAL);
3759 	}
3760 	if (lun == CAM_LUN_WILDCARD) {
3761 		mpt->trt_wildcard.enabled = 0;
3762 	} else {
3763 		mpt->trt[lun].enabled = 0;
3764 	}
3765 	for (i = 0; i < MPT_MAX_LUNS; i++) {
3766 		if (mpt->trt[lun].enabled) {
3767 			break;
3768 		}
3769 	}
3770 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
3771 		mpt_free_els_buffers(mpt);
3772 		mpt_free_target_commands(mpt);
3773 		if (mpt->is_fc) {
3774 			(void) mpt_fc_reset_link(mpt, 0);
3775 		}
3776 		mpt->tenabled = 0;
3777 	}
3778 	return (0);
3779 }
3780 
3781 /*
3782  * Called with MPT lock held
3783  */
3784 static void
3785 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
3786 {
3787 	struct ccb_scsiio *csio = &ccb->csio;
3788 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
3789 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
3790 
3791 	switch (tgt->state) {
3792 	case TGT_STATE_IN_CAM:
3793 		break;
3794 	case TGT_STATE_MOVING_DATA:
3795 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3796 		xpt_freeze_simq(mpt->sim, 1);
3797 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3798 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3799 		MPTLOCK_2_CAMLOCK(mpt);
3800 		xpt_done(ccb);
3801 		CAMLOCK_2_MPTLOCK(mpt);
3802 		return;
3803 	default:
3804 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
3805 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
3806 		mpt_tgt_dump_req_state(mpt, cmd_req);
3807 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3808 		MPTLOCK_2_CAMLOCK(mpt);
3809 		xpt_done(ccb);
3810 		CAMLOCK_2_MPTLOCK(mpt);
3811 		return;
3812 	}
3813 
3814 	if (csio->dxfer_len) {
3815 		bus_dmamap_callback_t *cb;
3816 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
3817 		request_t *req;
3818 
3819 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
3820 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
3821 
3822 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
3823 			if (mpt->outofbeer == 0) {
3824 				mpt->outofbeer = 1;
3825 				xpt_freeze_simq(mpt->sim, 1);
3826 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
3827 			}
3828 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3829 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
3830 			MPTLOCK_2_CAMLOCK(mpt);
3831 			xpt_done(ccb);
3832 			CAMLOCK_2_MPTLOCK(mpt);
3833 			return;
3834 		}
3835 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
3836 		if (sizeof (bus_addr_t) > 4) {
3837 			cb = mpt_execute_req_a64;
3838 		} else {
3839 			cb = mpt_execute_req;
3840 		}
3841 
3842 		req->ccb = ccb;
3843 		ccb->ccb_h.ccb_req_ptr = req;
3844 
3845 		/*
3846 		 * Record the currently active ccb and the
3847 		 * request for it in our target state area.
3848 		 */
3849 		tgt->ccb = ccb;
3850 		tgt->req = req;
3851 
3852 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
3853 		ta = req->req_vbuf;
3854 
3855 		if (mpt->is_fc) {
3856 			;
3857 		} else if (mpt->is_sas == 0) {
3858 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
3859 			     cmd_req->req_vbuf;
3860 			ta->QueueTag = ssp->InitiatorTag;
3861 		} else {
3862 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
3863 			     cmd_req->req_vbuf;
3864 			ta->QueueTag = sp->Tag;
3865 		}
3866 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
3867 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
3868 		ta->ReplyWord = htole32(tgt->reply_desc);
3869 		if (csio->ccb_h.target_lun > 256) {
3870 			ta->LUN[0] =
3871 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
3872 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
3873 		} else {
3874 			ta->LUN[1] = csio->ccb_h.target_lun;
3875 		}
3876 
3877 		ta->RelativeOffset = tgt->bytes_xfered;
3878 		ta->DataLength = ccb->csio.dxfer_len;
3879 		if (ta->DataLength > tgt->resid) {
3880 			ta->DataLength = tgt->resid;
3881 		}
3882 
3883 		/*
3884 		 * XXX Should be done after data transfer completes?
3885 		 */
3886 		tgt->resid -= csio->dxfer_len;
3887 		tgt->bytes_xfered += csio->dxfer_len;
3888 
3889 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
3890 			ta->TargetAssistFlags |=
3891 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
3892 		}
3893 
3894 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
3895 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
3896 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
3897 			ta->TargetAssistFlags |=
3898 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
3899 		}
3900 #endif
3901 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
3902 
3903 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3904 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
3905 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
3906 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
3907 
3908 		MPTLOCK_2_CAMLOCK(mpt);
3909 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
3910 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
3911 				int error;
3912 				int s = splsoftvm();
3913 				error = bus_dmamap_load(mpt->buffer_dmat,
3914 				    req->dmap, csio->data_ptr, csio->dxfer_len,
3915 				    cb, req, 0);
3916 				splx(s);
3917 				if (error == EINPROGRESS) {
3918 					xpt_freeze_simq(mpt->sim, 1);
3919 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
3920 				}
3921 			} else {
3922 				/*
3923 				 * We have been given a pointer to single
3924 				 * physical buffer.
3925 				 */
3926 				struct bus_dma_segment seg;
3927 				seg.ds_addr = (bus_addr_t)
3928 				    (vm_offset_t)csio->data_ptr;
3929 				seg.ds_len = csio->dxfer_len;
3930 				(*cb)(req, &seg, 1, 0);
3931 			}
3932 		} else {
3933 			/*
3934 			 * We have been given a list of addresses.
3935 			 * This case could be easily supported but they are not
3936 			 * currently generated by the CAM subsystem so there
3937 			 * is no point in wasting the time right now.
3938 			 */
3939 			struct bus_dma_segment *sgs;
3940 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
3941 				(*cb)(req, NULL, 0, EFAULT);
3942 			} else {
3943 				/* Just use the segments provided */
3944 				sgs = (struct bus_dma_segment *)csio->data_ptr;
3945 				(*cb)(req, sgs, csio->sglist_cnt, 0);
3946 			}
3947 		}
3948 		CAMLOCK_2_MPTLOCK(mpt);
3949 	} else {
3950 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
3951 
3952 		/*
3953 		 * XXX: I don't know why this seems to happen, but
3954 		 * XXX: completing the CCB seems to make things happy.
3955 		 * XXX: This seems to happen if the initiator requests
3956 		 * XXX: enough data that we have to do multiple CTIOs.
3957 		 */
3958 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
3959 			mpt_lprt(mpt, MPT_PRT_DEBUG,
3960 			    "Meaningless STATUS CCB (%p): flags %x status %x "
3961 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
3962 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
3963 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3964 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3965 			MPTLOCK_2_CAMLOCK(mpt);
3966 			xpt_done(ccb);
3967 			CAMLOCK_2_MPTLOCK(mpt);
3968 			return;
3969 		}
3970 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
3971 			sp = sense;
3972 			memcpy(sp, &csio->sense_data,
3973 			   min(csio->sense_len, MPT_SENSE_SIZE));
3974 		}
3975 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
3976 	}
3977 }
3978 
3979 /*
3980  * Abort queued up CCBs
3981  */
3982 static cam_status
3983 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
3984 {
3985 	struct mpt_hdr_stailq *lp;
3986 	struct ccb_hdr *srch;
3987 	int found = 0;
3988 	union ccb *accb = ccb->cab.abort_ccb;
3989 	tgt_resource_t *trtp;
3990 
3991 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
3992 
3993 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
3994 		trtp = &mpt->trt_wildcard;
3995 	} else {
3996 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
3997 	}
3998 
3999 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4000 		lp = &trtp->atios;
4001 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4002 		lp = &trtp->inots;
4003 	} else {
4004 		return (CAM_REQ_INVALID);
4005 	}
4006 
4007 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4008 		if (srch == &accb->ccb_h) {
4009 			found = 1;
4010 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4011 			break;
4012 		}
4013 	}
4014 	if (found) {
4015 		accb->ccb_h.status = CAM_REQ_ABORTED;
4016 		xpt_done(accb);
4017 		return (CAM_REQ_CMP);
4018 	}
4019 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4020 	return (CAM_PATH_INVALID);
4021 }
4022 
4023 /*
4024  * Ask the MPT to abort the current target command
4025  */
4026 static int
4027 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4028 {
4029 	int error;
4030 	request_t *req;
4031 	PTR_MSG_TARGET_MODE_ABORT abtp;
4032 
4033 	req = mpt_get_request(mpt, FALSE);
4034 	if (req == NULL) {
4035 		return (-1);
4036 	}
4037 	abtp = req->req_vbuf;
4038 	memset(abtp, 0, sizeof (*abtp));
4039 
4040 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4041 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4042 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4043 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4044 	error = 0;
4045 	if (mpt->is_fc || mpt->is_sas) {
4046 		mpt_send_cmd(mpt, req);
4047 	} else {
4048 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4049 	}
4050 	return (error);
4051 }
4052 
4053 /*
4054  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4055  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4056  * FC929 to set bogus FC_RSP fields (nonzero residuals
4057  * but w/o RESID fields set). This causes QLogic initiators
4058  * to think maybe that a frame was lost.
4059  *
4060  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4061  * we use allocated requests to do TARGET_ASSIST and we
4062  * need to know when to release them.
4063  */
4064 
4065 static void
4066 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4067     uint8_t status, uint8_t const *sense_data)
4068 {
4069 	uint8_t *cmd_vbuf;
4070 	mpt_tgt_state_t *tgt;
4071 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4072 	request_t *req;
4073 	bus_addr_t paddr;
4074 	int resplen = 0;
4075 
4076 	cmd_vbuf = cmd_req->req_vbuf;
4077 	cmd_vbuf += MPT_RQSL(mpt);
4078 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4079 
4080 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4081 		if (mpt->outofbeer == 0) {
4082 			mpt->outofbeer = 1;
4083 			xpt_freeze_simq(mpt->sim, 1);
4084 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4085 		}
4086 		if (ccb) {
4087 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4088 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4089 			MPTLOCK_2_CAMLOCK(mpt);
4090 			xpt_done(ccb);
4091 			CAMLOCK_2_MPTLOCK(mpt);
4092 		} else {
4093 			mpt_prt(mpt,
4094 			    "XXXX could not allocate status req- dropping\n");
4095 		}
4096 		return;
4097 	}
4098 	req->ccb = ccb;
4099 	if (ccb) {
4100 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4101 		ccb->ccb_h.ccb_req_ptr = req;
4102 	}
4103 
4104 	/*
4105 	 * Record the currently active ccb, if any, and the
4106 	 * request for it in our target state area.
4107 	 */
4108 	tgt->ccb = ccb;
4109 	tgt->req = req;
4110 	tgt->state = TGT_STATE_SENDING_STATUS;
4111 
4112 	tp = req->req_vbuf;
4113 	paddr = req->req_pbuf;
4114 	paddr += MPT_RQSL(mpt);
4115 
4116 	memset(tp, 0, sizeof (*tp));
4117 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4118 	if (mpt->is_fc) {
4119 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4120 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4121 		uint8_t *sts_vbuf;
4122 		uint32_t *rsp;
4123 
4124 		sts_vbuf = req->req_vbuf;
4125 		sts_vbuf += MPT_RQSL(mpt);
4126 		rsp = (uint32_t *) sts_vbuf;
4127 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4128 
4129 		/*
4130 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4131 		 * It has to be big-endian in memory and is organized
4132 		 * in 32 bit words, which are much easier to deal with
4133 		 * as words which are swizzled as needed.
4134 		 *
4135 		 * All we're filling here is the FC_RSP payload.
4136 		 * We may just have the chip synthesize it if
4137 		 * we have no residual and an OK status.
4138 		 *
4139 		 */
4140 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4141 
4142 		rsp[2] = status;
4143 		if (tgt->resid) {
4144 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4145 			rsp[3] = htobe32(tgt->resid);
4146 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4147 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4148 #endif
4149 		}
4150 		if (status == SCSI_STATUS_CHECK_COND) {
4151 			int i;
4152 
4153 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4154 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4155 			if (sense_data) {
4156 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4157 			} else {
4158 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4159 				    "TION but no sense data?\n");
4160 				memset(&rsp, 0, MPT_SENSE_SIZE);
4161 			}
4162 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4163 				rsp[i] = htobe32(rsp[i]);
4164 			}
4165 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4166 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4167 #endif
4168 		}
4169 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4170 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4171 #endif
4172 		rsp[2] = htobe32(rsp[2]);
4173 	} else if (mpt->is_sas) {
4174 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4175 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4176 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4177 	} else {
4178 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4179 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4180 		tp->StatusCode = status;
4181 		tp->QueueTag = htole16(sp->Tag);
4182 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4183 	}
4184 
4185 	tp->ReplyWord = htole32(tgt->reply_desc);
4186 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4187 
4188 #ifdef	WE_CAN_USE_AUTO_REPOST
4189 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4190 #endif
4191 	if (status == SCSI_STATUS_OK && resplen == 0) {
4192 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4193 	} else {
4194 		tp->StatusDataSGE.u.Address32 = (uint32_t) paddr;
4195 		tp->StatusDataSGE.FlagsLength =
4196 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4197 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4198 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4199 			MPI_SGE_FLAGS_END_OF_LIST	|
4200 			MPI_SGE_FLAGS_END_OF_BUFFER;
4201 		tp->StatusDataSGE.FlagsLength <<= MPI_SGE_FLAGS_SHIFT;
4202 		tp->StatusDataSGE.FlagsLength |= resplen;
4203 	}
4204 
4205 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4206 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4207 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4208 	    req->serno, tgt->resid);
4209 	if (ccb) {
4210 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4211 		ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz);
4212 	}
4213 	mpt_send_cmd(mpt, req);
4214 }
4215 
4216 static void
4217 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4218     tgt_resource_t *trtp, int init_id)
4219 {
4220 	struct ccb_immed_notify *inot;
4221 	mpt_tgt_state_t *tgt;
4222 
4223 	tgt = MPT_TGT_STATE(mpt, req);
4224 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4225 	if (inot == NULL) {
4226 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4227 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4228 		return;
4229 	}
4230 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4231 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4232 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4233 
4234 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4235 	inot->sense_len = 0;
4236 	memset(inot->message_args, 0, sizeof (inot->message_args));
4237 	inot->initiator_id = init_id;	/* XXX */
4238 
4239 	/*
4240 	 * This is a somewhat grotesque attempt to map from task management
4241 	 * to old style SCSI messages. God help us all.
4242 	 */
4243 	switch (fc) {
4244 	case MPT_ABORT_TASK_SET:
4245 		inot->message_args[0] = MSG_ABORT_TAG;
4246 		break;
4247 	case MPT_CLEAR_TASK_SET:
4248 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4249 		break;
4250 	case MPT_TARGET_RESET:
4251 		inot->message_args[0] = MSG_TARGET_RESET;
4252 		break;
4253 	case MPT_CLEAR_ACA:
4254 		inot->message_args[0] = MSG_CLEAR_ACA;
4255 		break;
4256 	case MPT_TERMINATE_TASK:
4257 		inot->message_args[0] = MSG_ABORT_TAG;
4258 		break;
4259 	default:
4260 		inot->message_args[0] = MSG_NOOP;
4261 		break;
4262 	}
4263 	tgt->ccb = (union ccb *) inot;
4264 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4265 	MPTLOCK_2_CAMLOCK(mpt);
4266 	xpt_done((union ccb *)inot);
4267 	CAMLOCK_2_MPTLOCK(mpt);
4268 }
4269 
4270 static void
4271 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4272 {
4273 	struct ccb_accept_tio *atiop;
4274 	lun_id_t lun;
4275 	int tag_action = 0;
4276 	mpt_tgt_state_t *tgt;
4277 	tgt_resource_t *trtp = NULL;
4278 	U8 *lunptr;
4279 	U8 *vbuf;
4280 	U16 itag;
4281 	U16 ioindex;
4282 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4283 	uint8_t *cdbp;
4284 
4285 	/*
4286 	 * First, DMA sync the received command- which is in the *request*
4287 	 * phys area.
4288 	 * XXX: We could optimize this for a range
4289 	 */
4290 	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
4291 	    BUS_DMASYNC_POSTREAD);
4292 
4293 	/*
4294 	 * Stash info for the current command where we can get at it later.
4295 	 */
4296 	vbuf = req->req_vbuf;
4297 	vbuf += MPT_RQSL(mpt);
4298 
4299 	/*
4300 	 * Get our state pointer set up.
4301 	 */
4302 	tgt = MPT_TGT_STATE(mpt, req);
4303 	if (tgt->state != TGT_STATE_LOADED) {
4304 		mpt_tgt_dump_req_state(mpt, req);
4305 		panic("bad target state in mpt_scsi_tgt_atio");
4306 	}
4307 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4308 	tgt->state = TGT_STATE_IN_CAM;
4309 	tgt->reply_desc = reply_desc;
4310 	ioindex = GET_IO_INDEX(reply_desc);
4311 
4312 	if (mpt->is_fc) {
4313 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4314 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4315 		if (fc->FcpCntl[2]) {
4316 			/*
4317 			 * Task Management Request
4318 			 */
4319 			switch (fc->FcpCntl[2]) {
4320 			case 0x2:
4321 				fct = MPT_ABORT_TASK_SET;
4322 				break;
4323 			case 0x4:
4324 				fct = MPT_CLEAR_TASK_SET;
4325 				break;
4326 			case 0x20:
4327 				fct = MPT_TARGET_RESET;
4328 				break;
4329 			case 0x40:
4330 				fct = MPT_CLEAR_ACA;
4331 				break;
4332 			case 0x80:
4333 				fct = MPT_TERMINATE_TASK;
4334 				break;
4335 			default:
4336 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4337 				    fc->FcpCntl[2]);
4338 				mpt_scsi_tgt_status(mpt, 0, req,
4339 				    SCSI_STATUS_OK, 0);
4340 				return;
4341 			}
4342 		} else {
4343 			switch (fc->FcpCntl[1]) {
4344 			case 0:
4345 				tag_action = MSG_SIMPLE_Q_TAG;
4346 				break;
4347 			case 1:
4348 				tag_action = MSG_HEAD_OF_Q_TAG;
4349 				break;
4350 			case 2:
4351 				tag_action = MSG_ORDERED_Q_TAG;
4352 				break;
4353 			default:
4354 				/*
4355 				 * Bah. Ignore Untagged Queing and ACA
4356 				 */
4357 				tag_action = MSG_SIMPLE_Q_TAG;
4358 				break;
4359 			}
4360 		}
4361 		tgt->resid = be32toh(fc->FcpDl);
4362 		cdbp = fc->FcpCdb;
4363 		lunptr = fc->FcpLun;
4364 		itag = be16toh(fc->OptionalOxid);
4365 	} else if (mpt->is_sas) {
4366 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4367 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4368 		cdbp = ssp->CDB;
4369 		lunptr = ssp->LogicalUnitNumber;
4370 		itag = ssp->InitiatorTag;
4371 	} else {
4372 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4373 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4374 		cdbp = sp->CDB;
4375 		lunptr = sp->LogicalUnitNumber;
4376 		itag = sp->Tag;
4377 	}
4378 
4379 	/*
4380 	 * Generate a simple lun
4381 	 */
4382 	switch (lunptr[0] & 0xc0) {
4383 	case 0x40:
4384 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
4385 		break;
4386 	case 0:
4387 		lun = lunptr[1];
4388 		break;
4389 	default:
4390 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
4391 		lun = 0xffff;
4392 		break;
4393 	}
4394 
4395 	/*
4396 	 * Deal with non-enabled or bad luns here.
4397 	 */
4398 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4399 	    mpt->trt[lun].enabled == 0) {
4400 		if (mpt->twildcard) {
4401 			trtp = &mpt->trt_wildcard;
4402 		} else if (fct != MPT_NIL_TMT_VALUE) {
4403 			const uint8_t sp[MPT_SENSE_SIZE] = {
4404 				0xf0, 0, 0x5, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0x25
4405 			};
4406 			mpt_scsi_tgt_status(mpt, NULL, req,
4407 			    SCSI_STATUS_CHECK_COND, sp);
4408 			return;
4409 		}
4410 	} else {
4411 		trtp = &mpt->trt[lun];
4412 	}
4413 
4414 	/*
4415 	 * Deal with any task management
4416 	 */
4417 	if (fct != MPT_NIL_TMT_VALUE) {
4418 		if (trtp == NULL) {
4419 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
4420 			    fct);
4421 			mpt_scsi_tgt_status(mpt, 0, req,
4422 			    SCSI_STATUS_OK, 0);
4423 		} else {
4424 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
4425 			    GET_INITIATOR_INDEX(reply_desc));
4426 		}
4427 		return;
4428 	}
4429 
4430 
4431 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
4432 	if (atiop == NULL) {
4433 		mpt_lprt(mpt, MPT_PRT_WARN,
4434 		    "no ATIOs for lun %u- sending back %s\n", lun,
4435 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
4436 		mpt_scsi_tgt_status(mpt, NULL, req,
4437 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
4438 		    NULL);
4439 		return;
4440 	}
4441 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
4442 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4443 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
4444 	atiop->ccb_h.ccb_mpt_ptr = mpt;
4445 	atiop->ccb_h.status = CAM_CDB_RECVD;
4446 	atiop->ccb_h.target_lun = lun;
4447 	atiop->sense_len = 0;
4448 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
4449 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
4450 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
4451 
4452 	/*
4453 	 * The tag we construct here allows us to find the
4454 	 * original request that the command came in with.
4455 	 *
4456 	 * This way we don't have to depend on anything but the
4457 	 * tag to find things when CCBs show back up from CAM.
4458 	 */
4459 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4460 	tgt->tag_id = atiop->tag_id;
4461 	if (tag_action) {
4462 		atiop->tag_action = tag_action;
4463 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
4464 	}
4465 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4466 		int i;
4467 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
4468 		    atiop->ccb_h.target_lun);
4469 		for (i = 0; i < atiop->cdb_len; i++) {
4470 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
4471 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
4472 		}
4473 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
4474 	    	    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
4475 	}
4476 
4477 	MPTLOCK_2_CAMLOCK(mpt);
4478 	xpt_done((union ccb *)atiop);
4479 	CAMLOCK_2_MPTLOCK(mpt);
4480 }
4481 
4482 static void
4483 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
4484 {
4485 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4486 
4487 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
4488 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
4489 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
4490 	    tgt->tag_id, tgt->state);
4491 }
4492 
4493 static void
4494 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
4495 {
4496 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
4497 	    req->index, req->index, req->state);
4498 	mpt_tgt_dump_tgt_state(mpt, req);
4499 }
4500 
4501 static int
4502 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
4503     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
4504 {
4505 	int dbg;
4506 	union ccb *ccb;
4507 	U16 status;
4508 
4509 	if (reply_frame == NULL) {
4510 		/*
4511 		 * Figure out what the state of the command is.
4512 		 */
4513 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
4514 
4515 #ifdef	INVARIANTS
4516 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
4517 		if (tgt->req) {
4518 			mpt_req_not_spcl(mpt, tgt->req,
4519 			    "turbo scsi_tgt_reply associated req", __LINE__);
4520 		}
4521 #endif
4522 		switch(tgt->state) {
4523 		case TGT_STATE_LOADED:
4524 			/*
4525 			 * This is a new command starting.
4526 			 */
4527 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
4528 			break;
4529 		case TGT_STATE_MOVING_DATA:
4530 		{
4531 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4532 
4533 			ccb = tgt->ccb;
4534 			if (tgt->req == NULL) {
4535 				panic("mpt: turbo target reply with null "
4536 				    "associated request moving data");
4537 				/* NOTREACHED */
4538 			}
4539 			if (ccb == NULL) {
4540 				panic("mpt: turbo target reply with null "
4541 				    "associated ccb moving data");
4542 				/* NOTREACHED */
4543 			}
4544 			tgt->ccb = NULL;
4545 			tgt->nxfers++;
4546 			untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch);
4547 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4548 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
4549 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
4550 			/*
4551 			 * Free the Target Assist Request
4552 			 */
4553 			KASSERT(tgt->req->ccb == ccb,
4554 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
4555 			    tgt->req->serno, tgt->req->ccb));
4556 			TAILQ_REMOVE(&mpt->request_pending_list,
4557 			    tgt->req, links);
4558 			mpt_free_request(mpt, tgt->req);
4559 			tgt->req = NULL;
4560 
4561 			/*
4562 			 * Do we need to send status now? That is, are
4563 			 * we done with all our data transfers?
4564 			 */
4565 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4566 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4567 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4568 				KASSERT(ccb->ccb_h.status,
4569 				    ("zero ccb sts at %d\n", __LINE__));
4570 				tgt->state = TGT_STATE_IN_CAM;
4571 				if (mpt->outofbeer) {
4572 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4573 					mpt->outofbeer = 0;
4574 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4575 				}
4576 				MPTLOCK_2_CAMLOCK(mpt);
4577 				xpt_done(ccb);
4578 				CAMLOCK_2_MPTLOCK(mpt);
4579 				break;
4580 			}
4581 			/*
4582 			 * Otherwise, send status (and sense)
4583 			 */
4584 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4585 				sp = sense;
4586 				memcpy(sp, &ccb->csio.sense_data,
4587 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
4588 			}
4589 			mpt_scsi_tgt_status(mpt, ccb, req,
4590 			    ccb->csio.scsi_status, sp);
4591 			break;
4592 		}
4593 		case TGT_STATE_SENDING_STATUS:
4594 		case TGT_STATE_MOVING_DATA_AND_STATUS:
4595 		{
4596 			int ioindex;
4597 			ccb = tgt->ccb;
4598 
4599 			if (tgt->req == NULL) {
4600 				panic("mpt: turbo target reply with null "
4601 				    "associated request sending status");
4602 				/* NOTREACHED */
4603 			}
4604 
4605 			if (ccb) {
4606 				tgt->ccb = NULL;
4607 				if (tgt->state ==
4608 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
4609 					tgt->nxfers++;
4610 				}
4611 				untimeout(mpt_timeout, ccb,
4612 				    ccb->ccb_h.timeout_ch);
4613 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4614 					ccb->ccb_h.status |= CAM_SENT_SENSE;
4615 				}
4616 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4617 				    "TARGET_STATUS tag %x sts %x flgs %x req "
4618 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
4619 				    ccb->ccb_h.flags, tgt->req);
4620 				/*
4621 				 * Free the Target Send Status Request
4622 				 */
4623 				KASSERT(tgt->req->ccb == ccb,
4624 				    ("tgt->req %p:%u tgt->req->ccb %p",
4625 				    tgt->req, tgt->req->serno, tgt->req->ccb));
4626 				/*
4627 				 * Notify CAM that we're done
4628 				 */
4629 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4630 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4631 				KASSERT(ccb->ccb_h.status,
4632 				    ("ZERO ccb sts at %d\n", __LINE__));
4633 				tgt->ccb = NULL;
4634 			} else {
4635 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4636 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
4637 				    tgt->req, tgt->req->serno);
4638 			}
4639 			TAILQ_REMOVE(&mpt->request_pending_list,
4640 			    tgt->req, links);
4641 			mpt_free_request(mpt, tgt->req);
4642 			tgt->req = NULL;
4643 
4644 			/*
4645 			 * And re-post the Command Buffer.
4646 			 * This wil reset the state.
4647 			 */
4648 			ioindex = GET_IO_INDEX(reply_desc);
4649 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4650 			mpt_post_target_command(mpt, req, ioindex);
4651 
4652 			/*
4653 			 * And post a done for anyone who cares
4654 			 */
4655 			if (ccb) {
4656 				if (mpt->outofbeer) {
4657 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4658 					mpt->outofbeer = 0;
4659 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
4660 				}
4661 				MPTLOCK_2_CAMLOCK(mpt);
4662 				xpt_done(ccb);
4663 				CAMLOCK_2_MPTLOCK(mpt);
4664 			}
4665 			break;
4666 		}
4667 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
4668 			tgt->state = TGT_STATE_LOADED;
4669 			break;
4670 		default:
4671 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
4672 			    "Reply Function\n", tgt->state);
4673 		}
4674 		return (TRUE);
4675 	}
4676 
4677 	status = le16toh(reply_frame->IOCStatus);
4678 	if (status != MPI_IOCSTATUS_SUCCESS) {
4679 		dbg = MPT_PRT_ERROR;
4680 	} else {
4681 		dbg = MPT_PRT_DEBUG1;
4682 	}
4683 
4684 	mpt_lprt(mpt, dbg,
4685 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
4686 	     req, req->serno, reply_frame, reply_frame->Function, status);
4687 
4688 	switch (reply_frame->Function) {
4689 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
4690 	{
4691 		mpt_tgt_state_t *tgt;
4692 #ifdef	INVARIANTS
4693 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
4694 #endif
4695 		if (status != MPI_IOCSTATUS_SUCCESS) {
4696 			/*
4697 			 * XXX What to do?
4698 			 */
4699 			break;
4700 		}
4701 		tgt = MPT_TGT_STATE(mpt, req);
4702 		KASSERT(tgt->state == TGT_STATE_LOADING,
4703 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
4704 		mpt_assign_serno(mpt, req);
4705 		tgt->state = TGT_STATE_LOADED;
4706 		break;
4707 	}
4708 	case MPI_FUNCTION_TARGET_ASSIST:
4709 #ifdef	INVARIANTS
4710 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
4711 #endif
4712 		mpt_prt(mpt, "target assist completion\n");
4713 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4714 		mpt_free_request(mpt, req);
4715 		break;
4716 	case MPI_FUNCTION_TARGET_STATUS_SEND:
4717 #ifdef	INVARIANTS
4718 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
4719 #endif
4720 		mpt_prt(mpt, "status send completion\n");
4721 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4722 		mpt_free_request(mpt, req);
4723 		break;
4724 	case MPI_FUNCTION_TARGET_MODE_ABORT:
4725 	{
4726 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
4727 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
4728 		PTR_MSG_TARGET_MODE_ABORT abtp =
4729 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
4730 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
4731 #ifdef	INVARIANTS
4732 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
4733 #endif
4734 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
4735 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
4736 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
4737 		mpt_free_request(mpt, req);
4738 		break;
4739 	}
4740 	default:
4741 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
4742 		    "0x%x\n", reply_frame->Function);
4743 		break;
4744 	}
4745 	return (TRUE);
4746 }
4747