xref: /freebsd/sys/dev/mpt/mpt_cam.c (revision 5b900bd26ab46dfb1a6889d593de3f2e50f4ad35)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * SPDX-License-Identifier: BSD-2-Clause AND BSD-3-Clause
6  *
7  * Copyright (c)  2000, 2001 by Greg Ansley
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice immediately at the beginning of the file, without modification,
14  *    this list of conditions, and the following disclaimer.
15  * 2. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 /*-
31  * Copyright (c) 2002, 2006 by Matthew Jacob
32  * All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions are
36  * met:
37  * 1. Redistributions of source code must retain the above copyright
38  *    notice, this list of conditions and the following disclaimer.
39  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
40  *    substantially similar to the "NO WARRANTY" disclaimer below
41  *    ("Disclaimer") and any redistribution must be conditioned upon including
42  *    a substantially similar Disclaimer requirement for further binary
43  *    redistribution.
44  * 3. Neither the names of the above listed copyright holders nor the names
45  *    of any contributors may be used to endorse or promote products derived
46  *    from this software without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
49  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
52  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
53  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
54  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
55  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
56  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
57  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
58  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  *
60  * Support from Chris Ellsworth in order to make SAS adapters work
61  * is gratefully acknowledged.
62  *
63  * Support from LSI-Logic has also gone a great deal toward making this a
64  * workable subsystem and is gratefully acknowledged.
65  */
66 /*-
67  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
68  * Copyright (c) 2005, WHEEL Sp. z o.o.
69  * Copyright (c) 2004, 2005 Justin T. Gibbs
70  * All rights reserved.
71  *
72  * Redistribution and use in source and binary forms, with or without
73  * modification, are permitted provided that the following conditions are
74  * met:
75  * 1. Redistributions of source code must retain the above copyright
76  *    notice, this list of conditions and the following disclaimer.
77  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
78  *    substantially similar to the "NO WARRANTY" disclaimer below
79  *    ("Disclaimer") and any redistribution must be conditioned upon including
80  *    a substantially similar Disclaimer requirement for further binary
81  *    redistribution.
82  * 3. Neither the names of the above listed copyright holders nor the names
83  *    of any contributors may be used to endorse or promote products derived
84  *    from this software without specific prior written permission.
85  *
86  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
87  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
90  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
91  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
92  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
93  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
94  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
95  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
96  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97  */
98 #include <sys/cdefs.h>
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102 
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
108 
109 #include <sys/callout.h>
110 #include <sys/kthread.h>
111 #include <sys/sysctl.h>
112 
113 static void mpt_poll(struct cam_sim *);
114 static callout_func_t mpt_timeout;
115 static void mpt_action(struct cam_sim *, union ccb *);
116 static int
117 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
118 static void mpt_setwidth(struct mpt_softc *, int, int);
119 static void mpt_setsync(struct mpt_softc *, int, int, int);
120 static int mpt_update_spi_config(struct mpt_softc *, int);
121 
122 static mpt_reply_handler_t mpt_scsi_reply_handler;
123 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
124 static mpt_reply_handler_t mpt_fc_els_reply_handler;
125 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
126 					MSG_DEFAULT_REPLY *);
127 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
128 static int mpt_fc_reset_link(struct mpt_softc *, int);
129 
130 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
131 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
132 static void mpt_recovery_thread(void *arg);
133 static void mpt_recover_commands(struct mpt_softc *mpt);
134 
135 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
136     target_id_t, lun_id_t, u_int, int);
137 
138 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
139 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
140 static int mpt_add_els_buffers(struct mpt_softc *mpt);
141 static int mpt_add_target_commands(struct mpt_softc *mpt);
142 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
143 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
145 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
146 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
147 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
148     uint8_t, uint8_t const *, u_int);
149 static void
150 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
151     tgt_resource_t *, int);
152 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
153 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
154 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
155 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
156 
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
161 
162 static mpt_probe_handler_t	mpt_cam_probe;
163 static mpt_attach_handler_t	mpt_cam_attach;
164 static mpt_enable_handler_t	mpt_cam_enable;
165 static mpt_ready_handler_t	mpt_cam_ready;
166 static mpt_event_handler_t	mpt_cam_event;
167 static mpt_reset_handler_t	mpt_cam_ioc_reset;
168 static mpt_detach_handler_t	mpt_cam_detach;
169 
170 static struct mpt_personality mpt_cam_personality =
171 {
172 	.name		= "mpt_cam",
173 	.probe		= mpt_cam_probe,
174 	.attach		= mpt_cam_attach,
175 	.enable		= mpt_cam_enable,
176 	.ready		= mpt_cam_ready,
177 	.event		= mpt_cam_event,
178 	.reset		= mpt_cam_ioc_reset,
179 	.detach		= mpt_cam_detach,
180 };
181 
182 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
183 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
184 
185 int mpt_enable_sata_wc = -1;
186 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
187 
188 static int
mpt_cam_probe(struct mpt_softc * mpt)189 mpt_cam_probe(struct mpt_softc *mpt)
190 {
191 	int role;
192 
193 	/*
194 	 * Only attach to nodes that support the initiator or target role
195 	 * (or want to) or have RAID physical devices that need CAM pass-thru
196 	 * support.
197 	 */
198 	if (mpt->do_cfg_role) {
199 		role = mpt->cfg_role;
200 	} else {
201 		role = mpt->role;
202 	}
203 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
204 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
205 		return (0);
206 	}
207 	return (ENODEV);
208 }
209 
210 static int
mpt_cam_attach(struct mpt_softc * mpt)211 mpt_cam_attach(struct mpt_softc *mpt)
212 {
213 	struct cam_devq *devq;
214 	mpt_handler_t	 handler;
215 	int		 maxq;
216 	int		 error;
217 
218 	MPT_LOCK(mpt);
219 	TAILQ_INIT(&mpt->request_timeout_list);
220 	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
221 	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
222 
223 	handler.reply_handler = mpt_scsi_reply_handler;
224 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
225 				     &scsi_io_handler_id);
226 	if (error != 0) {
227 		MPT_UNLOCK(mpt);
228 		goto cleanup;
229 	}
230 
231 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
232 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 				     &scsi_tmf_handler_id);
234 	if (error != 0) {
235 		MPT_UNLOCK(mpt);
236 		goto cleanup;
237 	}
238 
239 	/*
240 	 * If we're fibre channel and could support target mode, we register
241 	 * an ELS reply handler and give it resources.
242 	 */
243 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
244 		handler.reply_handler = mpt_fc_els_reply_handler;
245 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
246 		    &fc_els_handler_id);
247 		if (error != 0) {
248 			MPT_UNLOCK(mpt);
249 			goto cleanup;
250 		}
251 		if (mpt_add_els_buffers(mpt) == FALSE) {
252 			error = ENOMEM;
253 			MPT_UNLOCK(mpt);
254 			goto cleanup;
255 		}
256 		maxq -= mpt->els_cmds_allocated;
257 	}
258 
259 	/*
260 	 * If we support target mode, we register a reply handler for it,
261 	 * but don't add command resources until we actually enable target
262 	 * mode.
263 	 */
264 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
265 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
266 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
267 		    &mpt->scsi_tgt_handler_id);
268 		if (error != 0) {
269 			MPT_UNLOCK(mpt);
270 			goto cleanup;
271 		}
272 	}
273 
274 	if (mpt->is_sas) {
275 		handler.reply_handler = mpt_sata_pass_reply_handler;
276 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
277 		    &sata_pass_handler_id);
278 		if (error != 0) {
279 			MPT_UNLOCK(mpt);
280 			goto cleanup;
281 		}
282 	}
283 
284 	/*
285 	 * We keep one request reserved for timeout TMF requests.
286 	 */
287 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
288 	if (mpt->tmf_req == NULL) {
289 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
290 		error = ENOMEM;
291 		MPT_UNLOCK(mpt);
292 		goto cleanup;
293 	}
294 
295 	/*
296 	 * Mark the request as free even though not on the free list.
297 	 * There is only one TMF request allowed to be outstanding at
298 	 * a time and the TMF routines perform their own allocation
299 	 * tracking using the standard state flags.
300 	 */
301 	mpt->tmf_req->state = REQ_STATE_FREE;
302 	maxq--;
303 
304 	/*
305 	 * The rest of this is CAM foo, for which we need to drop our lock
306 	 */
307 	MPT_UNLOCK(mpt);
308 
309 	if (mpt_spawn_recovery_thread(mpt) != 0) {
310 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
311 		error = ENOMEM;
312 		goto cleanup;
313 	}
314 
315 	/*
316 	 * Create the device queue for our SIM(s).
317 	 */
318 	devq = cam_simq_alloc(maxq);
319 	if (devq == NULL) {
320 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
321 		error = ENOMEM;
322 		goto cleanup;
323 	}
324 
325 	/*
326 	 * Construct our SIM entry.
327 	 */
328 	mpt->sim =
329 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
330 	if (mpt->sim == NULL) {
331 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
332 		cam_simq_free(devq);
333 		error = ENOMEM;
334 		goto cleanup;
335 	}
336 
337 	/*
338 	 * Register exactly this bus.
339 	 */
340 	MPT_LOCK(mpt);
341 	if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
342 		mpt_prt(mpt, "Bus registration Failed!\n");
343 		error = ENOMEM;
344 		MPT_UNLOCK(mpt);
345 		goto cleanup;
346 	}
347 
348 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
349 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
350 		mpt_prt(mpt, "Unable to allocate Path!\n");
351 		error = ENOMEM;
352 		MPT_UNLOCK(mpt);
353 		goto cleanup;
354 	}
355 	MPT_UNLOCK(mpt);
356 
357 	/*
358 	 * Only register a second bus for RAID physical
359 	 * devices if the controller supports RAID.
360 	 */
361 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
362 		return (0);
363 	}
364 
365 	/*
366 	 * Create a "bus" to export all hidden disks to CAM.
367 	 */
368 	mpt->phydisk_sim =
369 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
370 	if (mpt->phydisk_sim == NULL) {
371 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
372 		error = ENOMEM;
373 		goto cleanup;
374 	}
375 
376 	/*
377 	 * Register this bus.
378 	 */
379 	MPT_LOCK(mpt);
380 	if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
381 	    CAM_SUCCESS) {
382 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
383 		error = ENOMEM;
384 		MPT_UNLOCK(mpt);
385 		goto cleanup;
386 	}
387 
388 	if (xpt_create_path(&mpt->phydisk_path, NULL,
389 	    cam_sim_path(mpt->phydisk_sim),
390 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
391 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
392 		error = ENOMEM;
393 		MPT_UNLOCK(mpt);
394 		goto cleanup;
395 	}
396 	MPT_UNLOCK(mpt);
397 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
398 	return (0);
399 
400 cleanup:
401 	mpt_cam_detach(mpt);
402 	return (error);
403 }
404 
405 /*
406  * Read FC configuration information
407  */
408 static int
mpt_read_config_info_fc(struct mpt_softc * mpt)409 mpt_read_config_info_fc(struct mpt_softc *mpt)
410 {
411 	struct sysctl_ctx_list *ctx;
412 	struct sysctl_oid *tree;
413 	char *topology = NULL;
414 	int rv;
415 
416 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
417 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
418 	if (rv) {
419 		return (-1);
420 	}
421 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
422 		 mpt->mpt_fcport_page0.Header.PageVersion,
423 		 mpt->mpt_fcport_page0.Header.PageLength,
424 		 mpt->mpt_fcport_page0.Header.PageNumber,
425 		 mpt->mpt_fcport_page0.Header.PageType);
426 
427 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
428 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
429 	if (rv) {
430 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
431 		return (-1);
432 	}
433 	mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
434 
435 	switch (mpt->mpt_fcport_page0.CurrentSpeed) {
436 	case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT:
437 		mpt->mpt_fcport_speed = 1;
438 		break;
439 	case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT:
440 		mpt->mpt_fcport_speed = 2;
441 		break;
442 	case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT:
443 		mpt->mpt_fcport_speed = 10;
444 		break;
445 	case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT:
446 		mpt->mpt_fcport_speed = 4;
447 		break;
448 	default:
449 		mpt->mpt_fcport_speed = 0;
450 		break;
451 	}
452 
453 	switch (mpt->mpt_fcport_page0.Flags &
454 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
455 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
456 		mpt->mpt_fcport_speed = 0;
457 		topology = "<NO LOOP>";
458 		break;
459 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
460 		topology = "N-Port";
461 		break;
462 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
463 		topology = "NL-Port";
464 		break;
465 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
466 		topology = "F-Port";
467 		break;
468 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
469 		topology = "FL-Port";
470 		break;
471 	default:
472 		mpt->mpt_fcport_speed = 0;
473 		topology = "?";
474 		break;
475 	}
476 
477 	mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32)
478 	    | mpt->mpt_fcport_page0.WWNN.Low;
479 	mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32)
480 	    | mpt->mpt_fcport_page0.WWPN.Low;
481 	mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier;
482 
483 	mpt_lprt(mpt, MPT_PRT_INFO,
484 	    "FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx "
485 	    "Speed %u-Gbit\n", topology,
486 	    (uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn,
487 	    mpt->mpt_fcport_speed);
488 	MPT_UNLOCK(mpt);
489 	ctx = device_get_sysctl_ctx(mpt->dev);
490 	tree = device_get_sysctl_tree(mpt->dev);
491 
492 	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
493 	    "wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn,
494 	    "World Wide Node Name");
495 
496 	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
497 	     "wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn,
498 	     "World Wide Port Name");
499 
500 	MPT_LOCK(mpt);
501 	return (0);
502 }
503 
504 /*
505  * Set FC configuration information.
506  */
507 static int
mpt_set_initial_config_fc(struct mpt_softc * mpt)508 mpt_set_initial_config_fc(struct mpt_softc *mpt)
509 {
510 	CONFIG_PAGE_FC_PORT_1 fc;
511 	U32 fl;
512 	int r, doit = 0;
513 	int role;
514 
515 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
516 	    &fc.Header, FALSE, 5000);
517 	if (r) {
518 		mpt_prt(mpt, "failed to read FC page 1 header\n");
519 		return (mpt_fc_reset_link(mpt, 1));
520 	}
521 
522 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
523 	    &fc.Header, sizeof (fc), FALSE, 5000);
524 	if (r) {
525 		mpt_prt(mpt, "failed to read FC page 1\n");
526 		return (mpt_fc_reset_link(mpt, 1));
527 	}
528 	mpt2host_config_page_fc_port_1(&fc);
529 
530 	/*
531 	 * Check our flags to make sure we support the role we want.
532 	 */
533 	doit = 0;
534 	role = 0;
535 	fl = fc.Flags;
536 
537 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
538 		role |= MPT_ROLE_INITIATOR;
539 	}
540 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
541 		role |= MPT_ROLE_TARGET;
542 	}
543 
544 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
545 
546 	if (mpt->do_cfg_role == 0) {
547 		role = mpt->cfg_role;
548 	} else {
549 		mpt->do_cfg_role = 0;
550 	}
551 
552 	if (role != mpt->cfg_role) {
553 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
554 			if ((role & MPT_ROLE_INITIATOR) == 0) {
555 				mpt_prt(mpt, "adding initiator role\n");
556 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
557 				doit++;
558 			} else {
559 				mpt_prt(mpt, "keeping initiator role\n");
560 			}
561 		} else if (role & MPT_ROLE_INITIATOR) {
562 			mpt_prt(mpt, "removing initiator role\n");
563 			doit++;
564 		}
565 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
566 			if ((role & MPT_ROLE_TARGET) == 0) {
567 				mpt_prt(mpt, "adding target role\n");
568 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
569 				doit++;
570 			} else {
571 				mpt_prt(mpt, "keeping target role\n");
572 			}
573 		} else if (role & MPT_ROLE_TARGET) {
574 			mpt_prt(mpt, "removing target role\n");
575 			doit++;
576 		}
577 		mpt->role = mpt->cfg_role;
578 	}
579 
580 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
581 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
582 			mpt_prt(mpt, "adding OXID option\n");
583 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
584 			doit++;
585 		}
586 	}
587 
588 	if (doit) {
589 		fc.Flags = fl;
590 		host2mpt_config_page_fc_port_1(&fc);
591 		r = mpt_write_cfg_page(mpt,
592 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
593 		    sizeof(fc), FALSE, 5000);
594 		if (r != 0) {
595 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
596 			return (0);
597 		}
598 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
599 		    "effect until next reboot or IOC reset\n");
600 	}
601 	return (0);
602 }
603 
604 static int
mptsas_sas_io_unit_pg0(struct mpt_softc * mpt,struct mptsas_portinfo * portinfo)605 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
606 {
607 	ConfigExtendedPageHeader_t hdr;
608 	struct mptsas_phyinfo *phyinfo;
609 	SasIOUnitPage0_t *buffer;
610 	int error, len, i;
611 
612 	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
613 				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
614 				       &hdr, 0, 10000);
615 	if (error)
616 		goto out;
617 	if (hdr.ExtPageLength == 0) {
618 		error = ENXIO;
619 		goto out;
620 	}
621 
622 	len = hdr.ExtPageLength * 4;
623 	buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
624 	if (buffer == NULL) {
625 		error = ENOMEM;
626 		goto out;
627 	}
628 
629 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
630 				     0, &hdr, buffer, len, 0, 10000);
631 	if (error) {
632 		free(buffer, M_DEVBUF);
633 		goto out;
634 	}
635 
636 	portinfo->num_phys = buffer->NumPhys;
637 	portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
638 	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
639 	if (portinfo->phy_info == NULL) {
640 		free(buffer, M_DEVBUF);
641 		error = ENOMEM;
642 		goto out;
643 	}
644 
645 	for (i = 0; i < portinfo->num_phys; i++) {
646 		phyinfo = &portinfo->phy_info[i];
647 		phyinfo->phy_num = i;
648 		phyinfo->port_id = buffer->PhyData[i].Port;
649 		phyinfo->negotiated_link_rate =
650 		    buffer->PhyData[i].NegotiatedLinkRate;
651 		phyinfo->handle =
652 		    le16toh(buffer->PhyData[i].ControllerDevHandle);
653 	}
654 
655 	free(buffer, M_DEVBUF);
656 out:
657 	return (error);
658 }
659 
660 static int
mptsas_sas_phy_pg0(struct mpt_softc * mpt,struct mptsas_phyinfo * phy_info,uint32_t form,uint32_t form_specific)661 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
662 	uint32_t form, uint32_t form_specific)
663 {
664 	ConfigExtendedPageHeader_t hdr;
665 	SasPhyPage0_t *buffer;
666 	int error;
667 
668 	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
669 				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
670 				       0, 10000);
671 	if (error)
672 		goto out;
673 	if (hdr.ExtPageLength == 0) {
674 		error = ENXIO;
675 		goto out;
676 	}
677 
678 	buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
679 	if (buffer == NULL) {
680 		error = ENOMEM;
681 		goto out;
682 	}
683 
684 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
685 				     form + form_specific, &hdr, buffer,
686 				     sizeof(SasPhyPage0_t), 0, 10000);
687 	if (error) {
688 		free(buffer, M_DEVBUF);
689 		goto out;
690 	}
691 
692 	phy_info->hw_link_rate = buffer->HwLinkRate;
693 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
694 	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
695 	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
696 
697 	free(buffer, M_DEVBUF);
698 out:
699 	return (error);
700 }
701 
702 static int
mptsas_sas_device_pg0(struct mpt_softc * mpt,struct mptsas_devinfo * device_info,uint32_t form,uint32_t form_specific)703 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
704 	uint32_t form, uint32_t form_specific)
705 {
706 	ConfigExtendedPageHeader_t hdr;
707 	SasDevicePage0_t *buffer;
708 	uint64_t sas_address;
709 	int error = 0;
710 
711 	bzero(device_info, sizeof(*device_info));
712 	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
713 				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
714 				       &hdr, 0, 10000);
715 	if (error)
716 		goto out;
717 	if (hdr.ExtPageLength == 0) {
718 		error = ENXIO;
719 		goto out;
720 	}
721 
722 	buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
723 	if (buffer == NULL) {
724 		error = ENOMEM;
725 		goto out;
726 	}
727 
728 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
729 				     form + form_specific, &hdr, buffer,
730 				     sizeof(SasDevicePage0_t), 0, 10000);
731 	if (error) {
732 		free(buffer, M_DEVBUF);
733 		goto out;
734 	}
735 
736 	device_info->dev_handle = le16toh(buffer->DevHandle);
737 	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
738 	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
739 	device_info->slot = le16toh(buffer->Slot);
740 	device_info->phy_num = buffer->PhyNum;
741 	device_info->physical_port = buffer->PhysicalPort;
742 	device_info->target_id = buffer->TargetID;
743 	device_info->bus = buffer->Bus;
744 	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
745 	device_info->sas_address = le64toh(sas_address);
746 	device_info->device_info = le32toh(buffer->DeviceInfo);
747 
748 	free(buffer, M_DEVBUF);
749 out:
750 	return (error);
751 }
752 
753 /*
754  * Read SAS configuration information. Nothing to do yet.
755  */
756 static int
mpt_read_config_info_sas(struct mpt_softc * mpt)757 mpt_read_config_info_sas(struct mpt_softc *mpt)
758 {
759 	struct mptsas_portinfo *portinfo;
760 	struct mptsas_phyinfo *phyinfo;
761 	int error, i;
762 
763 	portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
764 	if (portinfo == NULL)
765 		return (ENOMEM);
766 
767 	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
768 	if (error) {
769 		free(portinfo, M_DEVBUF);
770 		return (0);
771 	}
772 
773 	for (i = 0; i < portinfo->num_phys; i++) {
774 		phyinfo = &portinfo->phy_info[i];
775 		error = mptsas_sas_phy_pg0(mpt, phyinfo,
776 		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
777 		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
778 		if (error)
779 			break;
780 		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
781 		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
782 		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
783 		    phyinfo->handle);
784 		if (error)
785 			break;
786 		phyinfo->identify.phy_num = phyinfo->phy_num = i;
787 		if (phyinfo->attached.dev_handle)
788 			error = mptsas_sas_device_pg0(mpt,
789 			    &phyinfo->attached,
790 			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
791 			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
792 			    phyinfo->attached.dev_handle);
793 		if (error)
794 			break;
795 	}
796 	mpt->sas_portinfo = portinfo;
797 	return (0);
798 }
799 
800 static void
mptsas_set_sata_wc(struct mpt_softc * mpt,struct mptsas_devinfo * devinfo,int enabled)801 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
802 	int enabled)
803 {
804 	SataPassthroughRequest_t	*pass;
805 	request_t *req;
806 	int error, status;
807 
808 	req = mpt_get_request(mpt, 0);
809 	if (req == NULL)
810 		return;
811 
812 	pass = req->req_vbuf;
813 	bzero(pass, sizeof(SataPassthroughRequest_t));
814 	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
815 	pass->TargetID = devinfo->target_id;
816 	pass->Bus = devinfo->bus;
817 	pass->PassthroughFlags = 0;
818 	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
819 	pass->DataLength = 0;
820 	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
821 	pass->CommandFIS[0] = 0x27;
822 	pass->CommandFIS[1] = 0x80;
823 	pass->CommandFIS[2] = 0xef;
824 	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
825 	pass->CommandFIS[7] = 0x40;
826 	pass->CommandFIS[15] = 0x08;
827 
828 	mpt_check_doorbell(mpt);
829 	mpt_send_cmd(mpt, req);
830 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
831 			     10 * 1000);
832 	if (error) {
833 		mpt_free_request(mpt, req);
834 		printf("error %d sending passthrough\n", error);
835 		return;
836 	}
837 
838 	status = le16toh(req->IOCStatus);
839 	if (status != MPI_IOCSTATUS_SUCCESS) {
840 		mpt_free_request(mpt, req);
841 		printf("IOCSTATUS %d\n", status);
842 		return;
843 	}
844 
845 	mpt_free_request(mpt, req);
846 }
847 
848 /*
849  * Set SAS configuration information. Nothing to do yet.
850  */
851 static int
mpt_set_initial_config_sas(struct mpt_softc * mpt)852 mpt_set_initial_config_sas(struct mpt_softc *mpt)
853 {
854 	struct mptsas_phyinfo *phyinfo;
855 	int i;
856 
857 	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
858 		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
859 			phyinfo = &mpt->sas_portinfo->phy_info[i];
860 			if (phyinfo->attached.dev_handle == 0)
861 				continue;
862 			if ((phyinfo->attached.device_info &
863 			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
864 				continue;
865 			if (bootverbose)
866 				device_printf(mpt->dev,
867 				    "%sabling SATA WC on phy %d\n",
868 				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
869 			mptsas_set_sata_wc(mpt, &phyinfo->attached,
870 					   mpt_enable_sata_wc);
871 		}
872 	}
873 
874 	return (0);
875 }
876 
877 static int
mpt_sata_pass_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)878 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
879  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
880 {
881 
882 	if (req != NULL) {
883 		if (reply_frame != NULL) {
884 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
885 		}
886 		req->state &= ~REQ_STATE_QUEUED;
887 		req->state |= REQ_STATE_DONE;
888 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
889 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
890 			wakeup(req);
891 		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
892 			/*
893 			 * Whew- we can free this request (late completion)
894 			 */
895 			mpt_free_request(mpt, req);
896 		}
897 	}
898 
899 	return (TRUE);
900 }
901 
902 /*
903  * Read SCSI configuration information
904  */
905 static int
mpt_read_config_info_spi(struct mpt_softc * mpt)906 mpt_read_config_info_spi(struct mpt_softc *mpt)
907 {
908 	int rv, i;
909 
910 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
911 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
912 	if (rv) {
913 		return (-1);
914 	}
915 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
916 	    mpt->mpt_port_page0.Header.PageVersion,
917 	    mpt->mpt_port_page0.Header.PageLength,
918 	    mpt->mpt_port_page0.Header.PageNumber,
919 	    mpt->mpt_port_page0.Header.PageType);
920 
921 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
922 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
923 	if (rv) {
924 		return (-1);
925 	}
926 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
927 	    mpt->mpt_port_page1.Header.PageVersion,
928 	    mpt->mpt_port_page1.Header.PageLength,
929 	    mpt->mpt_port_page1.Header.PageNumber,
930 	    mpt->mpt_port_page1.Header.PageType);
931 
932 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
933 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
934 	if (rv) {
935 		return (-1);
936 	}
937 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
938 	    mpt->mpt_port_page2.Header.PageVersion,
939 	    mpt->mpt_port_page2.Header.PageLength,
940 	    mpt->mpt_port_page2.Header.PageNumber,
941 	    mpt->mpt_port_page2.Header.PageType);
942 
943 	for (i = 0; i < 16; i++) {
944 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
945 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
946 		if (rv) {
947 			return (-1);
948 		}
949 		mpt_lprt(mpt, MPT_PRT_DEBUG,
950 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
951 		    mpt->mpt_dev_page0[i].Header.PageVersion,
952 		    mpt->mpt_dev_page0[i].Header.PageLength,
953 		    mpt->mpt_dev_page0[i].Header.PageNumber,
954 		    mpt->mpt_dev_page0[i].Header.PageType);
955 
956 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
957 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
958 		if (rv) {
959 			return (-1);
960 		}
961 		mpt_lprt(mpt, MPT_PRT_DEBUG,
962 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
963 		    mpt->mpt_dev_page1[i].Header.PageVersion,
964 		    mpt->mpt_dev_page1[i].Header.PageLength,
965 		    mpt->mpt_dev_page1[i].Header.PageNumber,
966 		    mpt->mpt_dev_page1[i].Header.PageType);
967 	}
968 
969 	/*
970 	 * At this point, we don't *have* to fail. As long as we have
971 	 * valid config header information, we can (barely) lurch
972 	 * along.
973 	 */
974 
975 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
976 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
977 	if (rv) {
978 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
979 	} else {
980 		mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
981 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
982 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
983 		    mpt->mpt_port_page0.Capabilities,
984 		    mpt->mpt_port_page0.PhysicalInterface);
985 	}
986 
987 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
988 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
989 	if (rv) {
990 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
991 	} else {
992 		mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
993 		mpt_lprt(mpt, MPT_PRT_DEBUG,
994 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
995 		    mpt->mpt_port_page1.Configuration,
996 		    mpt->mpt_port_page1.OnBusTimerValue);
997 	}
998 
999 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1000 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
1001 	if (rv) {
1002 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1003 	} else {
1004 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1005 		    "Port Page 2: Flags %x Settings %x\n",
1006 		    mpt->mpt_port_page2.PortFlags,
1007 		    mpt->mpt_port_page2.PortSettings);
1008 		mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1009 		for (i = 0; i < 16; i++) {
1010 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1011 		  	    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1012 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1013 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1014 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1015 		}
1016 	}
1017 
1018 	for (i = 0; i < 16; i++) {
1019 		rv = mpt_read_cur_cfg_page(mpt, i,
1020 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1021 		    FALSE, 5000);
1022 		if (rv) {
1023 			mpt_prt(mpt,
1024 			    "cannot read SPI Target %d Device Page 0\n", i);
1025 			continue;
1026 		}
1027 		mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1028 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1029 		    "target %d page 0: Negotiated Params %x Information %x\n",
1030 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1031 		    mpt->mpt_dev_page0[i].Information);
1032 
1033 		rv = mpt_read_cur_cfg_page(mpt, i,
1034 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1035 		    FALSE, 5000);
1036 		if (rv) {
1037 			mpt_prt(mpt,
1038 			    "cannot read SPI Target %d Device Page 1\n", i);
1039 			continue;
1040 		}
1041 		mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1042 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1043 		    "target %d page 1: Requested Params %x Configuration %x\n",
1044 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
1045 		    mpt->mpt_dev_page1[i].Configuration);
1046 	}
1047 	return (0);
1048 }
1049 
1050 /*
1051  * Validate SPI configuration information.
1052  *
1053  * In particular, validate SPI Port Page 1.
1054  */
1055 static int
mpt_set_initial_config_spi(struct mpt_softc * mpt)1056 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1057 {
1058 	int error, i, pp1val;
1059 
1060 	mpt->mpt_disc_enable = 0xff;
1061 	mpt->mpt_tag_enable = 0;
1062 
1063 	pp1val = ((1 << mpt->mpt_ini_id) <<
1064 	    MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1065 	if (mpt->mpt_port_page1.Configuration != pp1val) {
1066 		CONFIG_PAGE_SCSI_PORT_1 tmp;
1067 
1068 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1069 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1070 		tmp = mpt->mpt_port_page1;
1071 		tmp.Configuration = pp1val;
1072 		host2mpt_config_page_scsi_port_1(&tmp);
1073 		error = mpt_write_cur_cfg_page(mpt, 0,
1074 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1075 		if (error) {
1076 			return (-1);
1077 		}
1078 		error = mpt_read_cur_cfg_page(mpt, 0,
1079 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1080 		if (error) {
1081 			return (-1);
1082 		}
1083 		mpt2host_config_page_scsi_port_1(&tmp);
1084 		if (tmp.Configuration != pp1val) {
1085 			mpt_prt(mpt,
1086 			    "failed to reset SPI Port Page 1 Config value\n");
1087 			return (-1);
1088 		}
1089 		mpt->mpt_port_page1 = tmp;
1090 	}
1091 
1092 	/*
1093 	 * The purpose of this exercise is to get
1094 	 * all targets back to async/narrow.
1095 	 *
1096 	 * We skip this step if the BIOS has already negotiated
1097 	 * speeds with the targets.
1098 	 */
1099 	i = mpt->mpt_port_page2.PortSettings &
1100 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1101 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1102 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1103 		    "honoring BIOS transfer negotiations\n");
1104 	} else {
1105 		for (i = 0; i < 16; i++) {
1106 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
1107 			mpt->mpt_dev_page1[i].Configuration = 0;
1108 			(void) mpt_update_spi_config(mpt, i);
1109 		}
1110 	}
1111 	return (0);
1112 }
1113 
1114 static int
mpt_cam_enable(struct mpt_softc * mpt)1115 mpt_cam_enable(struct mpt_softc *mpt)
1116 {
1117 	int error;
1118 
1119 	MPT_LOCK(mpt);
1120 
1121 	error = EIO;
1122 	if (mpt->is_fc) {
1123 		if (mpt_read_config_info_fc(mpt)) {
1124 			goto out;
1125 		}
1126 		if (mpt_set_initial_config_fc(mpt)) {
1127 			goto out;
1128 		}
1129 	} else if (mpt->is_sas) {
1130 		if (mpt_read_config_info_sas(mpt)) {
1131 			goto out;
1132 		}
1133 		if (mpt_set_initial_config_sas(mpt)) {
1134 			goto out;
1135 		}
1136 	} else if (mpt->is_spi) {
1137 		if (mpt_read_config_info_spi(mpt)) {
1138 			goto out;
1139 		}
1140 		if (mpt_set_initial_config_spi(mpt)) {
1141 			goto out;
1142 		}
1143 	}
1144 	error = 0;
1145 
1146 out:
1147 	MPT_UNLOCK(mpt);
1148 	return (error);
1149 }
1150 
1151 static void
mpt_cam_ready(struct mpt_softc * mpt)1152 mpt_cam_ready(struct mpt_softc *mpt)
1153 {
1154 
1155 	/*
1156 	 * If we're in target mode, hang out resources now
1157 	 * so we don't cause the world to hang talking to us.
1158 	 */
1159 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1160 		/*
1161 		 * Try to add some target command resources
1162 		 */
1163 		MPT_LOCK(mpt);
1164 		if (mpt_add_target_commands(mpt) == FALSE) {
1165 			mpt_prt(mpt, "failed to add target commands\n");
1166 		}
1167 		MPT_UNLOCK(mpt);
1168 	}
1169 	mpt->ready = 1;
1170 }
1171 
1172 static void
mpt_cam_detach(struct mpt_softc * mpt)1173 mpt_cam_detach(struct mpt_softc *mpt)
1174 {
1175 	mpt_handler_t handler;
1176 
1177 	MPT_LOCK(mpt);
1178 	mpt->ready = 0;
1179 	mpt_terminate_recovery_thread(mpt);
1180 
1181 	handler.reply_handler = mpt_scsi_reply_handler;
1182 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1183 			       scsi_io_handler_id);
1184 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
1185 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1186 			       scsi_tmf_handler_id);
1187 	handler.reply_handler = mpt_fc_els_reply_handler;
1188 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1189 			       fc_els_handler_id);
1190 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
1191 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1192 			       mpt->scsi_tgt_handler_id);
1193 	handler.reply_handler = mpt_sata_pass_reply_handler;
1194 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1195 			       sata_pass_handler_id);
1196 
1197 	if (mpt->tmf_req != NULL) {
1198 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1199 		mpt_free_request(mpt, mpt->tmf_req);
1200 		mpt->tmf_req = NULL;
1201 	}
1202 	if (mpt->sas_portinfo != NULL) {
1203 		free(mpt->sas_portinfo, M_DEVBUF);
1204 		mpt->sas_portinfo = NULL;
1205 	}
1206 
1207 	if (mpt->sim != NULL) {
1208 		xpt_free_path(mpt->path);
1209 		xpt_bus_deregister(cam_sim_path(mpt->sim));
1210 		cam_sim_free(mpt->sim, TRUE);
1211 		mpt->sim = NULL;
1212 	}
1213 
1214 	if (mpt->phydisk_sim != NULL) {
1215 		xpt_free_path(mpt->phydisk_path);
1216 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1217 		cam_sim_free(mpt->phydisk_sim, TRUE);
1218 		mpt->phydisk_sim = NULL;
1219 	}
1220 	MPT_UNLOCK(mpt);
1221 }
1222 
1223 /* This routine is used after a system crash to dump core onto the swap device.
1224  */
1225 static void
mpt_poll(struct cam_sim * sim)1226 mpt_poll(struct cam_sim *sim)
1227 {
1228 	struct mpt_softc *mpt;
1229 
1230 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1231 	mpt_intr(mpt);
1232 }
1233 
1234 /*
1235  * Watchdog timeout routine for SCSI requests.
1236  */
1237 static void
mpt_timeout(void * arg)1238 mpt_timeout(void *arg)
1239 {
1240 	union ccb	 *ccb;
1241 	struct mpt_softc *mpt;
1242 	request_t	 *req;
1243 
1244 	ccb = (union ccb *)arg;
1245 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1246 
1247 	MPT_LOCK_ASSERT(mpt);
1248 	req = ccb->ccb_h.ccb_req_ptr;
1249 	mpt_lprt(mpt, MPT_PRT_DEBUG,
1250 	"request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1251 	    req->serno, ccb, req->ccb);
1252 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1253 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1254 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1255 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1256 		req->state |= REQ_STATE_TIMEDOUT;
1257 		mpt_wakeup_recovery_thread(mpt);
1258 	}
1259 }
1260 
1261 /*
1262  * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
1263  * directly.
1264  *
1265  * Takes a list of physical segments and builds the SGL for SCSI IO command
1266  * and forwards the commard to the IOC after one last check that CAM has not
1267  * aborted the transaction.
1268  */
1269 static void
mpt_execute_req_a64(void * arg,bus_dma_segment_t * dm_segs,int nseg,int error)1270 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1271 {
1272 	request_t *req, *trq;
1273 	char *mpt_off;
1274 	union ccb *ccb;
1275 	struct mpt_softc *mpt;
1276 	bus_addr_t chain_list_addr;
1277 	int first_lim, seg, this_seg_lim;
1278 	uint32_t addr, cur_off, flags, nxt_off, tf;
1279 	void *sglp = NULL;
1280 	MSG_REQUEST_HEADER *hdrp;
1281 	SGE_SIMPLE64 *se;
1282 	SGE_CHAIN64 *ce;
1283 	int istgt = 0;
1284 
1285 	req = (request_t *)arg;
1286 	ccb = req->ccb;
1287 
1288 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1289 	req = ccb->ccb_h.ccb_req_ptr;
1290 
1291 	hdrp = req->req_vbuf;
1292 	mpt_off = req->req_vbuf;
1293 
1294 	if (error == 0) {
1295 		switch (hdrp->Function) {
1296 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1297 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1298 			istgt = 0;
1299 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1300 			break;
1301 		case MPI_FUNCTION_TARGET_ASSIST:
1302 			istgt = 1;
1303 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1304 			break;
1305 		default:
1306 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1307 			    hdrp->Function);
1308 			error = EINVAL;
1309 			break;
1310 		}
1311 	}
1312 
1313 bad:
1314 	if (error != 0) {
1315 		if (error != EFBIG && error != ENOMEM) {
1316 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1317 		}
1318 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1319 			cam_status status;
1320 			mpt_freeze_ccb(ccb);
1321 			if (error == EFBIG) {
1322 				status = CAM_REQ_TOO_BIG;
1323 			} else if (error == ENOMEM) {
1324 				if (mpt->outofbeer == 0) {
1325 					mpt->outofbeer = 1;
1326 					xpt_freeze_simq(mpt->sim, 1);
1327 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1328 					    "FREEZEQ\n");
1329 				}
1330 				status = CAM_REQUEUE_REQ;
1331 			} else {
1332 				status = CAM_REQ_CMP_ERR;
1333 			}
1334 			mpt_set_ccb_status(ccb, status);
1335 		}
1336 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1337 			request_t *cmd_req =
1338 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1339 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1340 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1341 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1342 		}
1343 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1344 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1345 		xpt_done(ccb);
1346 		mpt_free_request(mpt, req);
1347 		return;
1348 	}
1349 
1350 	/*
1351 	 * No data to transfer?
1352 	 * Just make a single simple SGL with zero length.
1353 	 */
1354 
1355 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1356 		int tidx = ((char *)sglp) - mpt_off;
1357 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1358 	}
1359 
1360 	if (nseg == 0) {
1361 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1362 		MPI_pSGE_SET_FLAGS(se1,
1363 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1364 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1365 		se1->FlagsLength = htole32(se1->FlagsLength);
1366 		goto out;
1367 	}
1368 
1369 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1370 	if (istgt == 0) {
1371 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1372 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1373 		}
1374 	} else {
1375 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1376 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1377 		}
1378 	}
1379 
1380 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1381 		bus_dmasync_op_t op;
1382 		if (istgt == 0) {
1383 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1384 				op = BUS_DMASYNC_PREREAD;
1385 			} else {
1386 				op = BUS_DMASYNC_PREWRITE;
1387 			}
1388 		} else {
1389 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1390 				op = BUS_DMASYNC_PREWRITE;
1391 			} else {
1392 				op = BUS_DMASYNC_PREREAD;
1393 			}
1394 		}
1395 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1396 	}
1397 
1398 	/*
1399 	 * Okay, fill in what we can at the end of the command frame.
1400 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1401 	 * the command frame.
1402 	 *
1403 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1404 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1405 	 * that.
1406 	 */
1407 
1408 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1409 		first_lim = nseg;
1410 	} else {
1411 		/*
1412 		 * Leave room for CHAIN element
1413 		 */
1414 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1415 	}
1416 
1417 	se = (SGE_SIMPLE64 *) sglp;
1418 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1419 		tf = flags;
1420 		memset(se, 0, sizeof (*se));
1421 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1422 		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1423 		if (sizeof(bus_addr_t) > 4) {
1424 			addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1425 			/* SAS1078 36GB limitation WAR */
1426 			if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1427 			    MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1428 				addr |= (1U << 31);
1429 				tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1430 			}
1431 			se->Address.High = htole32(addr);
1432 		}
1433 		if (seg == first_lim - 1) {
1434 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1435 		}
1436 		if (seg == nseg - 1) {
1437 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1438 				MPI_SGE_FLAGS_END_OF_BUFFER;
1439 		}
1440 		MPI_pSGE_SET_FLAGS(se, tf);
1441 		se->FlagsLength = htole32(se->FlagsLength);
1442 	}
1443 
1444 	if (seg == nseg) {
1445 		goto out;
1446 	}
1447 
1448 	/*
1449 	 * Tell the IOC where to find the first chain element.
1450 	 */
1451 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1452 	nxt_off = MPT_RQSL(mpt);
1453 	trq = req;
1454 
1455 	/*
1456 	 * Make up the rest of the data segments out of a chain element
1457 	 * (contained in the current request frame) which points to
1458 	 * SIMPLE64 elements in the next request frame, possibly ending
1459 	 * with *another* chain element (if there's more).
1460 	 */
1461 	while (seg < nseg) {
1462 		/*
1463 		 * Point to the chain descriptor. Note that the chain
1464 		 * descriptor is at the end of the *previous* list (whether
1465 		 * chain or simple).
1466 		 */
1467 		ce = (SGE_CHAIN64 *) se;
1468 
1469 		/*
1470 		 * Before we change our current pointer, make  sure we won't
1471 		 * overflow the request area with this frame. Note that we
1472 		 * test against 'greater than' here as it's okay in this case
1473 		 * to have next offset be just outside the request area.
1474 		 */
1475 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1476 			nxt_off = MPT_REQUEST_AREA;
1477 			goto next_chain;
1478 		}
1479 
1480 		/*
1481 		 * Set our SGE element pointer to the beginning of the chain
1482 		 * list and update our next chain list offset.
1483 		 */
1484 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1485 		cur_off = nxt_off;
1486 		nxt_off += MPT_RQSL(mpt);
1487 
1488 		/*
1489 		 * Now initialize the chain descriptor.
1490 		 */
1491 		memset(ce, 0, sizeof (*ce));
1492 
1493 		/*
1494 		 * Get the physical address of the chain list.
1495 		 */
1496 		chain_list_addr = trq->req_pbuf;
1497 		chain_list_addr += cur_off;
1498 		if (sizeof (bus_addr_t) > 4) {
1499 			ce->Address.High =
1500 			    htole32(((uint64_t)chain_list_addr) >> 32);
1501 		}
1502 		ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1503 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1504 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1505 
1506 		/*
1507 		 * If we have more than a frame's worth of segments left,
1508 		 * set up the chain list to have the last element be another
1509 		 * chain descriptor.
1510 		 */
1511 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1512 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1513 			/*
1514 			 * The length of the chain is the length in bytes of the
1515 			 * number of segments plus the next chain element.
1516 			 *
1517 			 * The next chain descriptor offset is the length,
1518 			 * in words, of the number of segments.
1519 			 */
1520 			ce->Length = (this_seg_lim - seg) *
1521 			    sizeof (SGE_SIMPLE64);
1522 			ce->NextChainOffset = ce->Length >> 2;
1523 			ce->Length += sizeof (SGE_CHAIN64);
1524 		} else {
1525 			this_seg_lim = nseg;
1526 			ce->Length = (this_seg_lim - seg) *
1527 			    sizeof (SGE_SIMPLE64);
1528 		}
1529 		ce->Length = htole16(ce->Length);
1530 
1531 		/*
1532 		 * Fill in the chain list SGE elements with our segment data.
1533 		 *
1534 		 * If we're the last element in this chain list, set the last
1535 		 * element flag. If we're the completely last element period,
1536 		 * set the end of list and end of buffer flags.
1537 		 */
1538 		while (seg < this_seg_lim) {
1539 			tf = flags;
1540 			memset(se, 0, sizeof (*se));
1541 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1542 			se->Address.Low = htole32(dm_segs->ds_addr &
1543 			    0xffffffff);
1544 			if (sizeof (bus_addr_t) > 4) {
1545 				addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1546 				/* SAS1078 36GB limitation WAR */
1547 				if (mpt->is_1078 &&
1548 				    (((uint64_t)dm_segs->ds_addr +
1549 				    MPI_SGE_LENGTH(se->FlagsLength)) >>
1550 				    32) == 9) {
1551 					addr |= (1U << 31);
1552 					tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1553 				}
1554 				se->Address.High = htole32(addr);
1555 			}
1556 			if (seg == this_seg_lim - 1) {
1557 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1558 			}
1559 			if (seg == nseg - 1) {
1560 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1561 					MPI_SGE_FLAGS_END_OF_BUFFER;
1562 			}
1563 			MPI_pSGE_SET_FLAGS(se, tf);
1564 			se->FlagsLength = htole32(se->FlagsLength);
1565 			se++;
1566 			seg++;
1567 			dm_segs++;
1568 		}
1569 
1570     next_chain:
1571 		/*
1572 		 * If we have more segments to do and we've used up all of
1573 		 * the space in a request area, go allocate another one
1574 		 * and chain to that.
1575 		 */
1576 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1577 			request_t *nrq;
1578 
1579 			nrq = mpt_get_request(mpt, FALSE);
1580 
1581 			if (nrq == NULL) {
1582 				error = ENOMEM;
1583 				goto bad;
1584 			}
1585 
1586 			/*
1587 			 * Append the new request area on the tail of our list.
1588 			 */
1589 			if ((trq = req->chain) == NULL) {
1590 				req->chain = nrq;
1591 			} else {
1592 				while (trq->chain != NULL) {
1593 					trq = trq->chain;
1594 				}
1595 				trq->chain = nrq;
1596 			}
1597 			trq = nrq;
1598 			mpt_off = trq->req_vbuf;
1599 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1600 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1601 			}
1602 			nxt_off = 0;
1603 		}
1604 	}
1605 out:
1606 
1607 	/*
1608 	 * Last time we need to check if this CCB needs to be aborted.
1609 	 */
1610 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1611 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1612 			request_t *cmd_req =
1613 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1614 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1615 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1616 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1617 		}
1618 		mpt_prt(mpt,
1619 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1620 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1621 		if (nseg) {
1622 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1623 		}
1624 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1625 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1626 		xpt_done(ccb);
1627 		mpt_free_request(mpt, req);
1628 		return;
1629 	}
1630 
1631 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1632 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1633 		mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
1634 		    mpt_timeout, ccb);
1635 	}
1636 	if (mpt->verbose > MPT_PRT_DEBUG) {
1637 		int nc = 0;
1638 		mpt_print_request(req->req_vbuf);
1639 		for (trq = req->chain; trq; trq = trq->chain) {
1640 			printf("  Additional Chain Area %d\n", nc++);
1641 			mpt_dump_sgl(trq->req_vbuf, 0);
1642 		}
1643 	}
1644 
1645 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1646 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1647 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1648 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1649 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1650 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1651 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1652 		} else {
1653 			tgt->state = TGT_STATE_MOVING_DATA;
1654 		}
1655 #else
1656 		tgt->state = TGT_STATE_MOVING_DATA;
1657 #endif
1658 	}
1659 	mpt_send_cmd(mpt, req);
1660 }
1661 
1662 static void
mpt_execute_req(void * arg,bus_dma_segment_t * dm_segs,int nseg,int error)1663 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1664 {
1665 	request_t *req, *trq;
1666 	char *mpt_off;
1667 	union ccb *ccb;
1668 	struct mpt_softc *mpt;
1669 	int seg, first_lim;
1670 	uint32_t flags, nxt_off;
1671 	void *sglp = NULL;
1672 	MSG_REQUEST_HEADER *hdrp;
1673 	SGE_SIMPLE32 *se;
1674 	SGE_CHAIN32 *ce;
1675 	int istgt = 0;
1676 
1677 	req = (request_t *)arg;
1678 	ccb = req->ccb;
1679 
1680 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1681 	req = ccb->ccb_h.ccb_req_ptr;
1682 
1683 	hdrp = req->req_vbuf;
1684 	mpt_off = req->req_vbuf;
1685 
1686 	if (error == 0) {
1687 		switch (hdrp->Function) {
1688 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1689 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1690 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1691 			break;
1692 		case MPI_FUNCTION_TARGET_ASSIST:
1693 			istgt = 1;
1694 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1695 			break;
1696 		default:
1697 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1698 			    hdrp->Function);
1699 			error = EINVAL;
1700 			break;
1701 		}
1702 	}
1703 
1704 bad:
1705 	if (error != 0) {
1706 		if (error != EFBIG && error != ENOMEM) {
1707 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1708 		}
1709 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1710 			cam_status status;
1711 			mpt_freeze_ccb(ccb);
1712 			if (error == EFBIG) {
1713 				status = CAM_REQ_TOO_BIG;
1714 			} else if (error == ENOMEM) {
1715 				if (mpt->outofbeer == 0) {
1716 					mpt->outofbeer = 1;
1717 					xpt_freeze_simq(mpt->sim, 1);
1718 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1719 					    "FREEZEQ\n");
1720 				}
1721 				status = CAM_REQUEUE_REQ;
1722 			} else {
1723 				status = CAM_REQ_CMP_ERR;
1724 			}
1725 			mpt_set_ccb_status(ccb, status);
1726 		}
1727 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1728 			request_t *cmd_req =
1729 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1730 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1731 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1732 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1733 		}
1734 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1735 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1736 		xpt_done(ccb);
1737 		mpt_free_request(mpt, req);
1738 		return;
1739 	}
1740 
1741 	/*
1742 	 * No data to transfer?
1743 	 * Just make a single simple SGL with zero length.
1744 	 */
1745 
1746 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1747 		int tidx = ((char *)sglp) - mpt_off;
1748 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1749 	}
1750 
1751 	if (nseg == 0) {
1752 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1753 		MPI_pSGE_SET_FLAGS(se1,
1754 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1755 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1756 		se1->FlagsLength = htole32(se1->FlagsLength);
1757 		goto out;
1758 	}
1759 
1760 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1761 	if (istgt == 0) {
1762 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1763 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1764 		}
1765 	} else {
1766 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1767 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1768 		}
1769 	}
1770 
1771 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1772 		bus_dmasync_op_t op;
1773 		if (istgt) {
1774 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1775 				op = BUS_DMASYNC_PREREAD;
1776 			} else {
1777 				op = BUS_DMASYNC_PREWRITE;
1778 			}
1779 		} else {
1780 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1781 				op = BUS_DMASYNC_PREWRITE;
1782 			} else {
1783 				op = BUS_DMASYNC_PREREAD;
1784 			}
1785 		}
1786 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1787 	}
1788 
1789 	/*
1790 	 * Okay, fill in what we can at the end of the command frame.
1791 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1792 	 * the command frame.
1793 	 *
1794 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1795 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1796 	 * that.
1797 	 */
1798 
1799 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1800 		first_lim = nseg;
1801 	} else {
1802 		/*
1803 		 * Leave room for CHAIN element
1804 		 */
1805 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1806 	}
1807 
1808 	se = (SGE_SIMPLE32 *) sglp;
1809 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1810 		uint32_t tf;
1811 
1812 		memset(se, 0,sizeof (*se));
1813 		se->Address = htole32(dm_segs->ds_addr);
1814 
1815 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1816 		tf = flags;
1817 		if (seg == first_lim - 1) {
1818 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1819 		}
1820 		if (seg == nseg - 1) {
1821 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1822 				MPI_SGE_FLAGS_END_OF_BUFFER;
1823 		}
1824 		MPI_pSGE_SET_FLAGS(se, tf);
1825 		se->FlagsLength = htole32(se->FlagsLength);
1826 	}
1827 
1828 	if (seg == nseg) {
1829 		goto out;
1830 	}
1831 
1832 	/*
1833 	 * Tell the IOC where to find the first chain element.
1834 	 */
1835 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1836 	nxt_off = MPT_RQSL(mpt);
1837 	trq = req;
1838 
1839 	/*
1840 	 * Make up the rest of the data segments out of a chain element
1841 	 * (contained in the current request frame) which points to
1842 	 * SIMPLE32 elements in the next request frame, possibly ending
1843 	 * with *another* chain element (if there's more).
1844 	 */
1845 	while (seg < nseg) {
1846 		int this_seg_lim;
1847 		uint32_t tf, cur_off;
1848 		bus_addr_t chain_list_addr;
1849 
1850 		/*
1851 		 * Point to the chain descriptor. Note that the chain
1852 		 * descriptor is at the end of the *previous* list (whether
1853 		 * chain or simple).
1854 		 */
1855 		ce = (SGE_CHAIN32 *) se;
1856 
1857 		/*
1858 		 * Before we change our current pointer, make  sure we won't
1859 		 * overflow the request area with this frame. Note that we
1860 		 * test against 'greater than' here as it's okay in this case
1861 		 * to have next offset be just outside the request area.
1862 		 */
1863 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1864 			nxt_off = MPT_REQUEST_AREA;
1865 			goto next_chain;
1866 		}
1867 
1868 		/*
1869 		 * Set our SGE element pointer to the beginning of the chain
1870 		 * list and update our next chain list offset.
1871 		 */
1872 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1873 		cur_off = nxt_off;
1874 		nxt_off += MPT_RQSL(mpt);
1875 
1876 		/*
1877 		 * Now initialize the chain descriptor.
1878 		 */
1879 		memset(ce, 0, sizeof (*ce));
1880 
1881 		/*
1882 		 * Get the physical address of the chain list.
1883 		 */
1884 		chain_list_addr = trq->req_pbuf;
1885 		chain_list_addr += cur_off;
1886 
1887 		ce->Address = htole32(chain_list_addr);
1888 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1889 
1890 		/*
1891 		 * If we have more than a frame's worth of segments left,
1892 		 * set up the chain list to have the last element be another
1893 		 * chain descriptor.
1894 		 */
1895 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1896 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1897 			/*
1898 			 * The length of the chain is the length in bytes of the
1899 			 * number of segments plus the next chain element.
1900 			 *
1901 			 * The next chain descriptor offset is the length,
1902 			 * in words, of the number of segments.
1903 			 */
1904 			ce->Length = (this_seg_lim - seg) *
1905 			    sizeof (SGE_SIMPLE32);
1906 			ce->NextChainOffset = ce->Length >> 2;
1907 			ce->Length += sizeof (SGE_CHAIN32);
1908 		} else {
1909 			this_seg_lim = nseg;
1910 			ce->Length = (this_seg_lim - seg) *
1911 			    sizeof (SGE_SIMPLE32);
1912 		}
1913 		ce->Length = htole16(ce->Length);
1914 
1915 		/*
1916 		 * Fill in the chain list SGE elements with our segment data.
1917 		 *
1918 		 * If we're the last element in this chain list, set the last
1919 		 * element flag. If we're the completely last element period,
1920 		 * set the end of list and end of buffer flags.
1921 		 */
1922 		while (seg < this_seg_lim) {
1923 			memset(se, 0, sizeof (*se));
1924 			se->Address = htole32(dm_segs->ds_addr);
1925 
1926 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1927 			tf = flags;
1928 			if (seg == this_seg_lim - 1) {
1929 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1930 			}
1931 			if (seg == nseg - 1) {
1932 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1933 					MPI_SGE_FLAGS_END_OF_BUFFER;
1934 			}
1935 			MPI_pSGE_SET_FLAGS(se, tf);
1936 			se->FlagsLength = htole32(se->FlagsLength);
1937 			se++;
1938 			seg++;
1939 			dm_segs++;
1940 		}
1941 
1942     next_chain:
1943 		/*
1944 		 * If we have more segments to do and we've used up all of
1945 		 * the space in a request area, go allocate another one
1946 		 * and chain to that.
1947 		 */
1948 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1949 			request_t *nrq;
1950 
1951 			nrq = mpt_get_request(mpt, FALSE);
1952 
1953 			if (nrq == NULL) {
1954 				error = ENOMEM;
1955 				goto bad;
1956 			}
1957 
1958 			/*
1959 			 * Append the new request area on the tail of our list.
1960 			 */
1961 			if ((trq = req->chain) == NULL) {
1962 				req->chain = nrq;
1963 			} else {
1964 				while (trq->chain != NULL) {
1965 					trq = trq->chain;
1966 				}
1967 				trq->chain = nrq;
1968 			}
1969 			trq = nrq;
1970 			mpt_off = trq->req_vbuf;
1971 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1972 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1973 			}
1974 			nxt_off = 0;
1975 		}
1976 	}
1977 out:
1978 
1979 	/*
1980 	 * Last time we need to check if this CCB needs to be aborted.
1981 	 */
1982 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1983 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1984 			request_t *cmd_req =
1985 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1986 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1987 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1988 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1989 		}
1990 		mpt_prt(mpt,
1991 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1992 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1993 		if (nseg) {
1994 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1995 		}
1996 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1997 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1998 		xpt_done(ccb);
1999 		mpt_free_request(mpt, req);
2000 		return;
2001 	}
2002 
2003 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2004 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2005 		mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
2006 		    mpt_timeout, ccb);
2007 	}
2008 	if (mpt->verbose > MPT_PRT_DEBUG) {
2009 		int nc = 0;
2010 		mpt_print_request(req->req_vbuf);
2011 		for (trq = req->chain; trq; trq = trq->chain) {
2012 			printf("  Additional Chain Area %d\n", nc++);
2013 			mpt_dump_sgl(trq->req_vbuf, 0);
2014 		}
2015 	}
2016 
2017 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2018 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2019 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2020 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
2021 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2022 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2023 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2024 		} else {
2025 			tgt->state = TGT_STATE_MOVING_DATA;
2026 		}
2027 #else
2028 		tgt->state = TGT_STATE_MOVING_DATA;
2029 #endif
2030 	}
2031 	mpt_send_cmd(mpt, req);
2032 }
2033 
2034 static void
mpt_start(struct cam_sim * sim,union ccb * ccb)2035 mpt_start(struct cam_sim *sim, union ccb *ccb)
2036 {
2037 	request_t *req;
2038 	struct mpt_softc *mpt;
2039 	MSG_SCSI_IO_REQUEST *mpt_req;
2040 	struct ccb_scsiio *csio = &ccb->csio;
2041 	struct ccb_hdr *ccbh = &ccb->ccb_h;
2042 	bus_dmamap_callback_t *cb;
2043 	target_id_t tgt;
2044 	int raid_passthru;
2045 	int error;
2046 
2047 	/* Get the pointer for the physical addapter */
2048 	mpt = ccb->ccb_h.ccb_mpt_ptr;
2049 	raid_passthru = (sim == mpt->phydisk_sim);
2050 
2051 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2052 		if (mpt->outofbeer == 0) {
2053 			mpt->outofbeer = 1;
2054 			xpt_freeze_simq(mpt->sim, 1);
2055 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2056 		}
2057 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2058 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2059 		xpt_done(ccb);
2060 		return;
2061 	}
2062 #ifdef	INVARIANTS
2063 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2064 #endif
2065 
2066 	if (sizeof (bus_addr_t) > 4) {
2067 		cb = mpt_execute_req_a64;
2068 	} else {
2069 		cb = mpt_execute_req;
2070 	}
2071 
2072 	/*
2073 	 * Link the ccb and the request structure so we can find
2074 	 * the other knowing either the request or the ccb
2075 	 */
2076 	req->ccb = ccb;
2077 	ccb->ccb_h.ccb_req_ptr = req;
2078 
2079 	/* Now we build the command for the IOC */
2080 	mpt_req = req->req_vbuf;
2081 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2082 
2083 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2084 	if (raid_passthru) {
2085 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2086 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2087 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2088 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2089 			xpt_done(ccb);
2090 			return;
2091 		}
2092 		mpt_req->Bus = 0;	/* we never set bus here */
2093 	} else {
2094 		tgt = ccb->ccb_h.target_id;
2095 		mpt_req->Bus = 0;	/* XXX */
2096 
2097 	}
2098 	mpt_req->SenseBufferLength =
2099 		(csio->sense_len < MPT_SENSE_SIZE) ?
2100 		 csio->sense_len : MPT_SENSE_SIZE;
2101 
2102 	/*
2103 	 * We use the message context to find the request structure when we
2104 	 * Get the command completion interrupt from the IOC.
2105 	 */
2106 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2107 
2108 	/* Which physical device to do the I/O on */
2109 	mpt_req->TargetID = tgt;
2110 
2111 	be64enc(mpt_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
2112 
2113 	/* Set the direction of the transfer */
2114 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2115 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2116 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2117 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2118 	} else {
2119 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2120 	}
2121 
2122 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2123 		switch(ccb->csio.tag_action) {
2124 		case MSG_HEAD_OF_Q_TAG:
2125 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2126 			break;
2127 		case MSG_ACA_TASK:
2128 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2129 			break;
2130 		case MSG_ORDERED_Q_TAG:
2131 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2132 			break;
2133 		case MSG_SIMPLE_Q_TAG:
2134 		default:
2135 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2136 			break;
2137 		}
2138 	} else {
2139 		if (mpt->is_fc || mpt->is_sas) {
2140 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2141 		} else {
2142 			/* XXX No such thing for a target doing packetized. */
2143 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2144 		}
2145 	}
2146 
2147 	if (mpt->is_spi) {
2148 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2149 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2150 		}
2151 	}
2152 	mpt_req->Control = htole32(mpt_req->Control);
2153 
2154 	/* Copy the scsi command block into place */
2155 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2156 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2157 	} else {
2158 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2159 	}
2160 
2161 	mpt_req->CDBLength = csio->cdb_len;
2162 	mpt_req->DataLength = htole32(csio->dxfer_len);
2163 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2164 
2165 	/*
2166 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2167 	 */
2168 	if (mpt->verbose == MPT_PRT_DEBUG) {
2169 		U32 df;
2170 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2171 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2172 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2173 		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2174 		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2175 			mpt_prtc(mpt, "(%s %u byte%s ",
2176 			    (df == MPI_SCSIIO_CONTROL_READ)?
2177 			    "read" : "write",  csio->dxfer_len,
2178 			    (csio->dxfer_len == 1)? ")" : "s)");
2179 		}
2180 		mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt,
2181 		    (uintmax_t)ccb->ccb_h.target_lun, req, req->serno);
2182 	}
2183 
2184 	error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
2185 	    req, 0);
2186 	if (error == EINPROGRESS) {
2187 		/*
2188 		 * So as to maintain ordering, freeze the controller queue
2189 		 * until our mapping is returned.
2190 		 */
2191 		xpt_freeze_simq(mpt->sim, 1);
2192 		ccbh->status |= CAM_RELEASE_SIMQ;
2193 	}
2194 }
2195 
2196 static int
mpt_bus_reset(struct mpt_softc * mpt,target_id_t tgt,lun_id_t lun,int sleep_ok)2197 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2198     int sleep_ok)
2199 {
2200 	int   error;
2201 	uint16_t status;
2202 	uint8_t response;
2203 
2204 	error = mpt_scsi_send_tmf(mpt,
2205 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2206 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2207 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2208 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2209 	    0,	/* XXX How do I get the channel ID? */
2210 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2211 	    lun != CAM_LUN_WILDCARD ? lun : 0,
2212 	    0, sleep_ok);
2213 
2214 	if (error != 0) {
2215 		/*
2216 		 * mpt_scsi_send_tmf hard resets on failure, so no
2217 		 * need to do so here.
2218 		 */
2219 		mpt_prt(mpt,
2220 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2221 		return (EIO);
2222 	}
2223 
2224 	/* Wait for bus reset to be processed by the IOC. */
2225 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2226 	    REQ_STATE_DONE, sleep_ok, 5000);
2227 
2228 	status = le16toh(mpt->tmf_req->IOCStatus);
2229 	response = mpt->tmf_req->ResponseCode;
2230 	mpt->tmf_req->state = REQ_STATE_FREE;
2231 
2232 	if (error) {
2233 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2234 		    "Resetting controller.\n");
2235 		mpt_reset(mpt, TRUE);
2236 		return (ETIMEDOUT);
2237 	}
2238 
2239 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2240 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2241 		    "Resetting controller.\n", status);
2242 		mpt_reset(mpt, TRUE);
2243 		return (EIO);
2244 	}
2245 
2246 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2247 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2248 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2249 		    "Resetting controller.\n", response);
2250 		mpt_reset(mpt, TRUE);
2251 		return (EIO);
2252 	}
2253 	return (0);
2254 }
2255 
2256 static int
mpt_fc_reset_link(struct mpt_softc * mpt,int dowait)2257 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2258 {
2259 	int r = 0;
2260 	request_t *req;
2261 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2262 
2263  	req = mpt_get_request(mpt, FALSE);
2264 	if (req == NULL) {
2265 		return (ENOMEM);
2266 	}
2267 	fc = req->req_vbuf;
2268 	memset(fc, 0, sizeof(*fc));
2269 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2270 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2271 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2272 	mpt_send_cmd(mpt, req);
2273 	if (dowait) {
2274 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2275 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2276 		if (r == 0) {
2277 			mpt_free_request(mpt, req);
2278 		}
2279 	}
2280 	return (r);
2281 }
2282 
2283 static int
mpt_cam_event(struct mpt_softc * mpt,request_t * req,MSG_EVENT_NOTIFY_REPLY * msg)2284 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2285 	      MSG_EVENT_NOTIFY_REPLY *msg)
2286 {
2287 	uint32_t data0, data1;
2288 
2289 	data0 = le32toh(msg->Data[0]);
2290 	data1 = le32toh(msg->Data[1]);
2291 	switch(msg->Event & 0xFF) {
2292 	case MPI_EVENT_UNIT_ATTENTION:
2293 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2294 		    (data0 >> 8) & 0xff, data0 & 0xff);
2295 		break;
2296 
2297 	case MPI_EVENT_IOC_BUS_RESET:
2298 		/* We generated a bus reset */
2299 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2300 		    (data0 >> 8) & 0xff);
2301 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2302 		break;
2303 
2304 	case MPI_EVENT_EXT_BUS_RESET:
2305 		/* Someone else generated a bus reset */
2306 		mpt_prt(mpt, "External Bus Reset Detected\n");
2307 		/*
2308 		 * These replies don't return EventData like the MPI
2309 		 * spec says they do
2310 		 */
2311 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2312 		break;
2313 
2314 	case MPI_EVENT_RESCAN:
2315 	{
2316 		union ccb *ccb;
2317 		uint32_t pathid;
2318 		/*
2319 		 * In general this means a device has been added to the loop.
2320 		 */
2321 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2322 		if (mpt->ready == 0) {
2323 			break;
2324 		}
2325 		if (mpt->phydisk_sim) {
2326 			pathid = cam_sim_path(mpt->phydisk_sim);
2327 		} else {
2328 			pathid = cam_sim_path(mpt->sim);
2329 		}
2330 		/*
2331 		 * Allocate a CCB, create a wildcard path for this bus,
2332 		 * and schedule a rescan.
2333 		 */
2334 		ccb = xpt_alloc_ccb_nowait();
2335 		if (ccb == NULL) {
2336 			mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2337 			break;
2338 		}
2339 
2340 		if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
2341 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2342 			mpt_prt(mpt, "unable to create path for rescan\n");
2343 			xpt_free_ccb(ccb);
2344 			break;
2345 		}
2346 		xpt_rescan(ccb);
2347 		break;
2348 	}
2349 
2350 	case MPI_EVENT_LINK_STATUS_CHANGE:
2351 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2352 		    (data1 >> 8) & 0xff,
2353 		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2354 		break;
2355 
2356 	case MPI_EVENT_LOOP_STATE_CHANGE:
2357 		switch ((data0 >> 16) & 0xff) {
2358 		case 0x01:
2359 			mpt_prt(mpt,
2360 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2361 			    "(Loop Initialization)\n",
2362 			    (data1 >> 8) & 0xff,
2363 			    (data0 >> 8) & 0xff,
2364 			    (data0     ) & 0xff);
2365 			switch ((data0 >> 8) & 0xff) {
2366 			case 0xF7:
2367 				if ((data0 & 0xff) == 0xF7) {
2368 					mpt_prt(mpt, "Device needs AL_PA\n");
2369 				} else {
2370 					mpt_prt(mpt, "Device %02x doesn't like "
2371 					    "FC performance\n",
2372 					    data0 & 0xFF);
2373 				}
2374 				break;
2375 			case 0xF8:
2376 				if ((data0 & 0xff) == 0xF7) {
2377 					mpt_prt(mpt, "Device had loop failure "
2378 					    "at its receiver prior to acquiring"
2379 					    " AL_PA\n");
2380 				} else {
2381 					mpt_prt(mpt, "Device %02x detected loop"
2382 					    " failure at its receiver\n",
2383 					    data0 & 0xFF);
2384 				}
2385 				break;
2386 			default:
2387 				mpt_prt(mpt, "Device %02x requests that device "
2388 				    "%02x reset itself\n",
2389 				    data0 & 0xFF,
2390 				    (data0 >> 8) & 0xFF);
2391 				break;
2392 			}
2393 			break;
2394 		case 0x02:
2395 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2396 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2397 			    (data1 >> 8) & 0xff, /* Port */
2398 			    (data0 >>  8) & 0xff, /* Character 3 */
2399 			    (data0      ) & 0xff  /* Character 4 */);
2400 			break;
2401 		case 0x03:
2402 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2403 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2404 			    (data1 >> 8) & 0xff, /* Port */
2405 			    (data0 >> 8) & 0xff, /* Character 3 */
2406 			    (data0     ) & 0xff  /* Character 4 */);
2407 			break;
2408 		default:
2409 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2410 			    "FC event (%02x %02x %02x)\n",
2411 			    (data1 >> 8) & 0xff, /* Port */
2412 			    (data0 >> 16) & 0xff, /* Event */
2413 			    (data0 >>  8) & 0xff, /* Character 3 */
2414 			    (data0      ) & 0xff  /* Character 4 */);
2415 		}
2416 		break;
2417 
2418 	case MPI_EVENT_LOGOUT:
2419 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2420 		    (data1 >> 8) & 0xff, data0);
2421 		break;
2422 	case MPI_EVENT_QUEUE_FULL:
2423 	{
2424 		struct cam_sim *sim;
2425 		struct cam_path *tmppath;
2426 		struct ccb_relsim crs;
2427 		PTR_EVENT_DATA_QUEUE_FULL pqf;
2428 		lun_id_t lun_id;
2429 
2430 		pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2431 		pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2432 		if (bootverbose) {
2433 		    mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x "
2434 			"Depth %d\n",
2435 			pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2436 		}
2437 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2438 		    pqf->TargetID) != 0) {
2439 			sim = mpt->phydisk_sim;
2440 		} else {
2441 			sim = mpt->sim;
2442 		}
2443 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2444 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2445 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2446 				mpt_prt(mpt, "unable to create a path to send "
2447 				    "XPT_REL_SIMQ");
2448 				break;
2449 			}
2450 			memset(&crs, 0, sizeof(crs));
2451 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2452 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2453 			crs.ccb_h.flags = CAM_DEV_QFREEZE;
2454 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2455 			crs.openings = pqf->CurrentDepth - 1;
2456 			xpt_action((union ccb *)&crs);
2457 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2458 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2459 			}
2460 			xpt_free_path(tmppath);
2461 		}
2462 		break;
2463 	}
2464 	case MPI_EVENT_IR_RESYNC_UPDATE:
2465 		mpt_prt(mpt, "IR resync update %d completed\n",
2466 		    (data0 >> 16) & 0xff);
2467 		break;
2468 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2469 	{
2470 		union ccb *ccb;
2471 		struct cam_sim *sim;
2472 		struct cam_path *tmppath;
2473 		PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2474 
2475 		psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2476 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2477 		    psdsc->TargetID) != 0)
2478 			sim = mpt->phydisk_sim;
2479 		else
2480 			sim = mpt->sim;
2481 		switch(psdsc->ReasonCode) {
2482 		case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2483 			ccb = xpt_alloc_ccb_nowait();
2484 			if (ccb == NULL) {
2485 				mpt_prt(mpt,
2486 				    "unable to alloc CCB for rescan\n");
2487 				break;
2488 			}
2489 			if (xpt_create_path(&ccb->ccb_h.path, NULL,
2490 			    cam_sim_path(sim), psdsc->TargetID,
2491 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2492 				mpt_prt(mpt,
2493 				    "unable to create path for rescan\n");
2494 				xpt_free_ccb(ccb);
2495 				break;
2496 			}
2497 			xpt_rescan(ccb);
2498 			break;
2499 		case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2500 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2501 			    psdsc->TargetID, CAM_LUN_WILDCARD) !=
2502 			    CAM_REQ_CMP) {
2503 				mpt_prt(mpt,
2504 				    "unable to create path for async event");
2505 				break;
2506 			}
2507 			xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2508 			xpt_free_path(tmppath);
2509 			break;
2510 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2511 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2512 		case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2513 			break;
2514 		default:
2515 			mpt_lprt(mpt, MPT_PRT_WARN,
2516 			    "SAS device status change: Bus: 0x%02x TargetID: "
2517 			    "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2518 			    psdsc->TargetID, psdsc->ReasonCode);
2519 			break;
2520 		}
2521 		break;
2522 	}
2523 	case MPI_EVENT_SAS_DISCOVERY_ERROR:
2524 	{
2525 		PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2526 
2527 		pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2528 		pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2529 		mpt_lprt(mpt, MPT_PRT_WARN,
2530 		    "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2531 		    pde->Port, pde->DiscoveryStatus);
2532 		break;
2533 	}
2534 	case MPI_EVENT_EVENT_CHANGE:
2535 	case MPI_EVENT_INTEGRATED_RAID:
2536 	case MPI_EVENT_IR2:
2537 	case MPI_EVENT_LOG_ENTRY_ADDED:
2538 	case MPI_EVENT_SAS_DISCOVERY:
2539 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2540 	case MPI_EVENT_SAS_SES:
2541 		break;
2542 	default:
2543 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2544 		    msg->Event & 0xFF);
2545 		return (0);
2546 	}
2547 	return (1);
2548 }
2549 
2550 /*
2551  * Reply path for all SCSI I/O requests, called from our
2552  * interrupt handler by extracting our handler index from
2553  * the MsgContext field of the reply from the IOC.
2554  *
2555  * This routine is optimized for the common case of a
2556  * completion without error.  All exception handling is
2557  * offloaded to non-inlined helper routines to minimize
2558  * cache footprint.
2559  */
2560 static int
mpt_scsi_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)2561 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2562     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2563 {
2564 	MSG_SCSI_IO_REQUEST *scsi_req;
2565 	union ccb *ccb;
2566 
2567 	if (req->state == REQ_STATE_FREE) {
2568 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2569 		return (TRUE);
2570 	}
2571 
2572 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2573 	ccb = req->ccb;
2574 	if (ccb == NULL) {
2575 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2576 		"mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2577 		    req, req->serno);
2578 		return (TRUE);
2579 	}
2580 
2581 	mpt_req_untimeout(req, mpt_timeout, ccb);
2582 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2583 
2584 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2585 		bus_dmasync_op_t op;
2586 
2587 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2588 			op = BUS_DMASYNC_POSTREAD;
2589 		else
2590 			op = BUS_DMASYNC_POSTWRITE;
2591 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2592 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2593 	}
2594 
2595 	if (reply_frame == NULL) {
2596 		/*
2597 		 * Context only reply, completion without error status.
2598 		 */
2599 		ccb->csio.resid = 0;
2600 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2601 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2602 	} else {
2603 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2604 	}
2605 
2606 	if (mpt->outofbeer) {
2607 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2608 		mpt->outofbeer = 0;
2609 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2610 	}
2611 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2612 		struct scsi_inquiry_data *iq =
2613 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2614 		if (scsi_req->Function ==
2615 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2616 			/*
2617 			 * Fake out the device type so that only the
2618 			 * pass-thru device will attach.
2619 			 */
2620 			iq->device &= ~0x1F;
2621 			iq->device |= T_NODEVICE;
2622 		}
2623 	}
2624 	if (mpt->verbose == MPT_PRT_DEBUG) {
2625 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2626 		    req, req->serno);
2627 	}
2628 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2629 	xpt_done(ccb);
2630 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2631 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2632 	} else {
2633 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2634 		"completing timedout/aborted req %p:%u\n",
2635 		    req, req->serno);
2636 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2637 	}
2638 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2639 	    ("CCB req needed wakeup"));
2640 #ifdef	INVARIANTS
2641 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2642 #endif
2643 	mpt_free_request(mpt, req);
2644 	return (TRUE);
2645 }
2646 
2647 static int
mpt_scsi_tmf_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)2648 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2649     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2650 {
2651 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2652 
2653 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2654 #ifdef	INVARIANTS
2655 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2656 #endif
2657 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2658 	/* Record IOC Status and Response Code of TMF for any waiters. */
2659 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2660 	req->ResponseCode = tmf_reply->ResponseCode;
2661 
2662 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2663 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2664 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2665 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2666 		req->state |= REQ_STATE_DONE;
2667 		wakeup(req);
2668 	} else {
2669 		mpt->tmf_req->state = REQ_STATE_FREE;
2670 	}
2671 	return (TRUE);
2672 }
2673 
2674 /*
2675  * XXX: Move to definitions file
2676  */
2677 #define	ELS	0x22
2678 #define	FC4LS	0x32
2679 #define	ABTS	0x81
2680 #define	BA_ACC	0x84
2681 
2682 #define	LS_RJT	0x01
2683 #define	LS_ACC	0x02
2684 #define	PLOGI	0x03
2685 #define	LOGO	0x05
2686 #define SRR	0x14
2687 #define PRLI	0x20
2688 #define PRLO	0x21
2689 #define ADISC	0x52
2690 #define RSCN	0x61
2691 
2692 static void
mpt_fc_els_send_response(struct mpt_softc * mpt,request_t * req,PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp,U8 length)2693 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2694     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2695 {
2696 	uint32_t fl;
2697 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2698 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2699 
2700 	/*
2701 	 * We are going to reuse the ELS request to send this response back.
2702 	 */
2703 	rsp = &tmp;
2704 	memset(rsp, 0, sizeof(*rsp));
2705 
2706 #ifdef	USE_IMMEDIATE_LINK_DATA
2707 	/*
2708 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2709 	 */
2710 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2711 #endif
2712 	rsp->RspLength = length;
2713 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2714 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2715 
2716 	/*
2717 	 * Copy over information from the original reply frame to
2718 	 * it's correct place in the response.
2719 	 */
2720 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2721 
2722 	/*
2723 	 * And now copy back the temporary area to the original frame.
2724 	 */
2725 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2726 	rsp = req->req_vbuf;
2727 
2728 #ifdef	USE_IMMEDIATE_LINK_DATA
2729 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2730 #else
2731 {
2732 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2733 	bus_addr_t paddr = req->req_pbuf;
2734 	paddr += MPT_RQSL(mpt);
2735 
2736 	fl =
2737 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2738 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2739 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2740 		MPI_SGE_FLAGS_END_OF_LIST	|
2741 		MPI_SGE_FLAGS_END_OF_BUFFER;
2742 	fl <<= MPI_SGE_FLAGS_SHIFT;
2743 	fl |= (length);
2744 	se->FlagsLength = htole32(fl);
2745 	se->Address = htole32((uint32_t) paddr);
2746 }
2747 #endif
2748 
2749 	/*
2750 	 * Send it on...
2751 	 */
2752 	mpt_send_cmd(mpt, req);
2753 }
2754 
2755 static int
mpt_fc_els_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)2756 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2757     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2758 {
2759 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2760 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2761 	U8 rctl;
2762 	U8 type;
2763 	U8 cmd;
2764 	U16 status = le16toh(reply_frame->IOCStatus);
2765 	U32 *elsbuf;
2766 	int ioindex;
2767 	int do_refresh = TRUE;
2768 
2769 #ifdef	INVARIANTS
2770 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2771 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2772 	    req, req->serno, rp->Function));
2773 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2774 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2775 	} else {
2776 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2777 	}
2778 #endif
2779 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2780 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2781 	    req, req->serno, reply_frame, reply_frame->Function);
2782 
2783 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2784 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2785 		    status, reply_frame->Function);
2786 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2787 			/*
2788 			 * XXX: to get around shutdown issue
2789 			 */
2790 			mpt->disabled = 1;
2791 			return (TRUE);
2792 		}
2793 		return (TRUE);
2794 	}
2795 
2796 	/*
2797 	 * If the function of a link service response, we recycle the
2798 	 * response to be a refresh for a new link service request.
2799 	 *
2800 	 * The request pointer is bogus in this case and we have to fetch
2801 	 * it based upon the TransactionContext.
2802 	 */
2803 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2804 		/* Freddie Uncle Charlie Katie */
2805 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2806 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2807 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2808 				break;
2809 			}
2810 
2811 		KASSERT(ioindex < mpt->els_cmds_allocated,
2812 		    ("can't find my mommie!"));
2813 
2814 		/* remove from active list as we're going to re-post it */
2815 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2816 		req->state &= ~REQ_STATE_QUEUED;
2817 		req->state |= REQ_STATE_DONE;
2818 		mpt_fc_post_els(mpt, req, ioindex);
2819 		return (TRUE);
2820 	}
2821 
2822 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2823 		/* remove from active list as we're done */
2824 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2825 		req->state &= ~REQ_STATE_QUEUED;
2826 		req->state |= REQ_STATE_DONE;
2827 		if (req->state & REQ_STATE_TIMEDOUT) {
2828 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2829 			    "Sync Primitive Send Completed After Timeout\n");
2830 			mpt_free_request(mpt, req);
2831 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2832 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2833 			    "Async Primitive Send Complete\n");
2834 			mpt_free_request(mpt, req);
2835 		} else {
2836 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2837 			    "Sync Primitive Send Complete- Waking Waiter\n");
2838 			wakeup(req);
2839 		}
2840 		return (TRUE);
2841 	}
2842 
2843 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2844 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2845 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2846 		    rp->MsgLength, rp->MsgFlags);
2847 		return (TRUE);
2848 	}
2849 
2850 	if (rp->MsgLength <= 5) {
2851 		/*
2852 		 * This is just a ack of an original ELS buffer post
2853 		 */
2854 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2855 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2856 		return (TRUE);
2857 	}
2858 
2859 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2860 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2861 
2862 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2863 	cmd = be32toh(elsbuf[0]) >> 24;
2864 
2865 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2866 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2867 		return (TRUE);
2868 	}
2869 
2870 	ioindex = le32toh(rp->TransactionContext);
2871 	req = mpt->els_cmd_ptrs[ioindex];
2872 
2873 	if (rctl == ELS && type == 1) {
2874 		switch (cmd) {
2875 		case PRLI:
2876 			/*
2877 			 * Send back a PRLI ACC
2878 			 */
2879 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2880 			    le32toh(rp->Wwn.PortNameHigh),
2881 			    le32toh(rp->Wwn.PortNameLow));
2882 			elsbuf[0] = htobe32(0x02100014);
2883 			elsbuf[1] |= htobe32(0x00000100);
2884 			elsbuf[4] = htobe32(0x00000002);
2885 			if (mpt->role & MPT_ROLE_TARGET)
2886 				elsbuf[4] |= htobe32(0x00000010);
2887 			if (mpt->role & MPT_ROLE_INITIATOR)
2888 				elsbuf[4] |= htobe32(0x00000020);
2889 			/* remove from active list as we're done */
2890 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2891 			req->state &= ~REQ_STATE_QUEUED;
2892 			req->state |= REQ_STATE_DONE;
2893 			mpt_fc_els_send_response(mpt, req, rp, 20);
2894 			do_refresh = FALSE;
2895 			break;
2896 		case PRLO:
2897 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2898 			elsbuf[0] = htobe32(0x02100014);
2899 			elsbuf[1] = htobe32(0x08000100);
2900 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2901 			    le32toh(rp->Wwn.PortNameHigh),
2902 			    le32toh(rp->Wwn.PortNameLow));
2903 			/* remove from active list as we're done */
2904 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2905 			req->state &= ~REQ_STATE_QUEUED;
2906 			req->state |= REQ_STATE_DONE;
2907 			mpt_fc_els_send_response(mpt, req, rp, 20);
2908 			do_refresh = FALSE;
2909 			break;
2910 		default:
2911 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2912 			break;
2913 		}
2914 	} else if (rctl == ABTS && type == 0) {
2915 		uint16_t rx_id = le16toh(rp->Rxid);
2916 		uint16_t ox_id = le16toh(rp->Oxid);
2917 		mpt_tgt_state_t *tgt;
2918 		request_t *tgt_req = NULL;
2919 		union ccb *ccb;
2920 		uint32_t ct_id;
2921 
2922 		mpt_prt(mpt,
2923 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2924 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2925 		    le32toh(rp->Wwn.PortNameLow));
2926 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2927 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2928 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2929 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2930 		} else {
2931 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2932 		}
2933 		if (tgt_req == NULL) {
2934 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2935 			goto skip;
2936 		}
2937 		tgt = MPT_TGT_STATE(mpt, tgt_req);
2938 
2939 		/* Check to make sure we have the correct command. */
2940 		ct_id = GET_IO_INDEX(tgt->reply_desc);
2941 		if (ct_id != rx_id) {
2942 			mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2943 			    "RX_ID received=0x%x, in cmd=0x%x\n", rx_id, ct_id);
2944 			goto skip;
2945 		}
2946 		if (tgt->itag != ox_id) {
2947 			mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2948 			    "OX_ID received=0x%x, in cmd=0x%x\n", ox_id, tgt->itag);
2949 			goto skip;
2950 		}
2951 
2952 		if ((ccb = tgt->ccb) != NULL) {
2953 			mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n",
2954 			    ccb, (uintmax_t)ccb->ccb_h.target_lun,
2955 			    ccb->ccb_h.flags, ccb->ccb_h.status);
2956 		}
2957 		mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2958 		    "%x nxfers %x\n", tgt->state, tgt->resid,
2959 		    tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers);
2960 		if (mpt_abort_target_cmd(mpt, tgt_req))
2961 			mpt_prt(mpt, "unable to start TargetAbort\n");
2962 
2963 skip:
2964 		memset(elsbuf, 0, 5 * (sizeof (U32)));
2965 		elsbuf[0] = htobe32(0);
2966 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2967 		elsbuf[2] = htobe32(0x000ffff);
2968 		/*
2969 		 * Dork with the reply frame so that the response to it
2970 		 * will be correct.
2971 		 */
2972 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2973 		/* remove from active list as we're done */
2974 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2975 		req->state &= ~REQ_STATE_QUEUED;
2976 		req->state |= REQ_STATE_DONE;
2977 		mpt_fc_els_send_response(mpt, req, rp, 12);
2978 		do_refresh = FALSE;
2979 	} else {
2980 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2981 	}
2982 	if (do_refresh == TRUE) {
2983 		/* remove from active list as we're done */
2984 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2985 		req->state &= ~REQ_STATE_QUEUED;
2986 		req->state |= REQ_STATE_DONE;
2987 		mpt_fc_post_els(mpt, req, ioindex);
2988 	}
2989 	return (TRUE);
2990 }
2991 
2992 /*
2993  * Clean up all SCSI Initiator personality state in response
2994  * to a controller reset.
2995  */
2996 static void
mpt_cam_ioc_reset(struct mpt_softc * mpt,int type)2997 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2998 {
2999 
3000 	/*
3001 	 * The pending list is already run down by
3002 	 * the generic handler.  Perform the same
3003 	 * operation on the timed out request list.
3004 	 */
3005 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3006 				   MPI_IOCSTATUS_INVALID_STATE);
3007 
3008 	/*
3009 	 * XXX: We need to repost ELS and Target Command Buffers?
3010 	 */
3011 
3012 	/*
3013 	 * Inform the XPT that a bus reset has occurred.
3014 	 */
3015 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
3016 }
3017 
3018 /*
3019  * Parse additional completion information in the reply
3020  * frame for SCSI I/O requests.
3021  */
3022 static int
mpt_scsi_reply_frame_handler(struct mpt_softc * mpt,request_t * req,MSG_DEFAULT_REPLY * reply_frame)3023 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3024 			     MSG_DEFAULT_REPLY *reply_frame)
3025 {
3026 	union ccb *ccb;
3027 	MSG_SCSI_IO_REPLY *scsi_io_reply;
3028 	u_int ioc_status;
3029 	u_int sstate;
3030 
3031 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3032 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3033 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3034 		("MPT SCSI I/O Handler called with incorrect reply type"));
3035 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3036 		("MPT SCSI I/O Handler called with continuation reply"));
3037 
3038 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3039 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
3040 	ioc_status &= MPI_IOCSTATUS_MASK;
3041 	sstate = scsi_io_reply->SCSIState;
3042 
3043 	ccb = req->ccb;
3044 	ccb->csio.resid =
3045 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3046 
3047 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3048 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3049 		uint32_t sense_returned;
3050 
3051 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3052 
3053 		sense_returned = le32toh(scsi_io_reply->SenseCount);
3054 		if (sense_returned < ccb->csio.sense_len)
3055 			ccb->csio.sense_resid = ccb->csio.sense_len -
3056 						sense_returned;
3057 		else
3058 			ccb->csio.sense_resid = 0;
3059 
3060 		bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3061 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3062 		    min(ccb->csio.sense_len, sense_returned));
3063 	}
3064 
3065 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3066 		/*
3067 		 * Tag messages rejected, but non-tagged retry
3068 		 * was successful.
3069 XXXX
3070 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3071 		 */
3072 	}
3073 
3074 	switch(ioc_status) {
3075 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3076 		/*
3077 		 * XXX
3078 		 * Linux driver indicates that a zero
3079 		 * transfer length with this error code
3080 		 * indicates a CRC error.
3081 		 *
3082 		 * No need to swap the bytes for checking
3083 		 * against zero.
3084 		 */
3085 		if (scsi_io_reply->TransferCount == 0) {
3086 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3087 			break;
3088 		}
3089 		/* FALLTHROUGH */
3090 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3091 	case MPI_IOCSTATUS_SUCCESS:
3092 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3093 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3094 			/*
3095 			 * Status was never returned for this transaction.
3096 			 */
3097 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3098 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3099 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3100 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3101 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3102 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3103 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3104 			/* XXX Handle SPI-Packet and FCP-2 response info. */
3105 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3106 		} else
3107 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3108 		break;
3109 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3110 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3111 		break;
3112 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3113 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3114 		break;
3115 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3116 		/*
3117 		 * Since selection timeouts and "device really not
3118 		 * there" are grouped into this error code, report
3119 		 * selection timeout.  Selection timeouts are
3120 		 * typically retried before giving up on the device
3121 		 * whereas "device not there" errors are considered
3122 		 * unretryable.
3123 		 */
3124 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3125 		break;
3126 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3127 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3128 		break;
3129 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3130 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3131 		break;
3132 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3133 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3134 		break;
3135 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3136 		ccb->ccb_h.status = CAM_UA_TERMIO;
3137 		break;
3138 	case MPI_IOCSTATUS_INVALID_STATE:
3139 		/*
3140 		 * The IOC has been reset.  Emulate a bus reset.
3141 		 */
3142 		/* FALLTHROUGH */
3143 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3144 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3145 		break;
3146 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3147 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3148 		/*
3149 		 * Don't clobber any timeout status that has
3150 		 * already been set for this transaction.  We
3151 		 * want the SCSI layer to be able to differentiate
3152 		 * between the command we aborted due to timeout
3153 		 * and any innocent bystanders.
3154 		 */
3155 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3156 			break;
3157 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3158 		break;
3159 
3160 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3161 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3162 		break;
3163 	case MPI_IOCSTATUS_BUSY:
3164 		mpt_set_ccb_status(ccb, CAM_BUSY);
3165 		break;
3166 	case MPI_IOCSTATUS_INVALID_FUNCTION:
3167 	case MPI_IOCSTATUS_INVALID_SGL:
3168 	case MPI_IOCSTATUS_INTERNAL_ERROR:
3169 	case MPI_IOCSTATUS_INVALID_FIELD:
3170 	default:
3171 		/* XXX
3172 		 * Some of the above may need to kick
3173 		 * of a recovery action!!!!
3174 		 */
3175 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3176 		break;
3177 	}
3178 
3179 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3180 		mpt_freeze_ccb(ccb);
3181 	}
3182 
3183 	return (TRUE);
3184 }
3185 
3186 static void
mpt_action(struct cam_sim * sim,union ccb * ccb)3187 mpt_action(struct cam_sim *sim, union ccb *ccb)
3188 {
3189 	struct mpt_softc *mpt;
3190 	struct ccb_trans_settings *cts;
3191 	target_id_t tgt;
3192 	lun_id_t lun;
3193 	int raid_passthru;
3194 
3195 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3196 
3197 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
3198 	raid_passthru = (sim == mpt->phydisk_sim);
3199 	MPT_LOCK_ASSERT(mpt);
3200 
3201 	tgt = ccb->ccb_h.target_id;
3202 	lun = ccb->ccb_h.target_lun;
3203 	if (raid_passthru &&
3204 	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
3205 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
3206 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
3207 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3208 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3209 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3210 			xpt_done(ccb);
3211 			return;
3212 		}
3213 	}
3214 	ccb->ccb_h.ccb_mpt_ptr = mpt;
3215 
3216 	switch (ccb->ccb_h.func_code) {
3217 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
3218 		/*
3219 		 * Do a couple of preliminary checks...
3220 		 */
3221 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3222 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3223 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3224 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3225 				break;
3226 			}
3227 		}
3228 		/* Max supported CDB length is 16 bytes */
3229 		/* XXX Unless we implement the new 32byte message type */
3230 		if (ccb->csio.cdb_len >
3231 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3232 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3233 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3234 			break;
3235 		}
3236 #ifdef	MPT_TEST_MULTIPATH
3237 		if (mpt->failure_id == ccb->ccb_h.target_id) {
3238 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3239 			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3240 			break;
3241 		}
3242 #endif
3243 		ccb->csio.scsi_status = SCSI_STATUS_OK;
3244 		mpt_start(sim, ccb);
3245 		return;
3246 
3247 	case XPT_RESET_BUS:
3248 		if (raid_passthru) {
3249 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3250 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3251 			break;
3252 		}
3253 	case XPT_RESET_DEV:
3254 		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3255 			if (bootverbose) {
3256 				xpt_print(ccb->ccb_h.path, "reset bus\n");
3257 			}
3258 		} else {
3259 			xpt_print(ccb->ccb_h.path, "reset device\n");
3260 		}
3261 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3262 
3263 		/*
3264 		 * mpt_bus_reset is always successful in that it
3265 		 * will fall back to a hard reset should a bus
3266 		 * reset attempt fail.
3267 		 */
3268 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3269 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3270 		break;
3271 
3272 	case XPT_ABORT:
3273 	{
3274 		union ccb *accb = ccb->cab.abort_ccb;
3275 		switch (accb->ccb_h.func_code) {
3276 		case XPT_ACCEPT_TARGET_IO:
3277 		case XPT_IMMEDIATE_NOTIFY:
3278 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3279 			break;
3280 		case XPT_CONT_TARGET_IO:
3281 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3282 			ccb->ccb_h.status = CAM_UA_ABORT;
3283 			break;
3284 		case XPT_SCSI_IO:
3285 			ccb->ccb_h.status = CAM_UA_ABORT;
3286 			break;
3287 		default:
3288 			ccb->ccb_h.status = CAM_REQ_INVALID;
3289 			break;
3290 		}
3291 		break;
3292 	}
3293 
3294 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3295 
3296 #define	DP_DISC_ENABLE	0x1
3297 #define	DP_DISC_DISABL	0x2
3298 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3299 
3300 #define	DP_TQING_ENABLE	0x4
3301 #define	DP_TQING_DISABL	0x8
3302 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3303 
3304 #define	DP_WIDE		0x10
3305 #define	DP_NARROW	0x20
3306 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3307 
3308 #define	DP_SYNC		0x40
3309 
3310 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3311 	{
3312 		struct ccb_trans_settings_scsi *scsi;
3313 		struct ccb_trans_settings_spi *spi;
3314 		uint8_t dval;
3315 		u_int period;
3316 		u_int offset;
3317 		int i, j;
3318 
3319 		cts = &ccb->cts;
3320 
3321 		if (mpt->is_fc || mpt->is_sas) {
3322 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3323 			break;
3324 		}
3325 
3326 		scsi = &cts->proto_specific.scsi;
3327 		spi = &cts->xport_specific.spi;
3328 
3329 		/*
3330 		 * We can be called just to valid transport and proto versions
3331 		 */
3332 		if (scsi->valid == 0 && spi->valid == 0) {
3333 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3334 			break;
3335 		}
3336 
3337 		/*
3338 		 * Skip attempting settings on RAID volume disks.
3339 		 * Other devices on the bus get the normal treatment.
3340 		 */
3341 		if (mpt->phydisk_sim && raid_passthru == 0 &&
3342 		    mpt_is_raid_volume(mpt, tgt) != 0) {
3343 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3344 			    "no transfer settings for RAID vols\n");
3345 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3346 			break;
3347 		}
3348 
3349 		i = mpt->mpt_port_page2.PortSettings &
3350 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3351 		j = mpt->mpt_port_page2.PortFlags &
3352 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3353 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3354 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3355 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3356 			    "honoring BIOS transfer negotiations\n");
3357 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3358 			break;
3359 		}
3360 
3361 		dval = 0;
3362 		period = 0;
3363 		offset = 0;
3364 
3365 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3366 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3367 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3368 		}
3369 
3370 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3371 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3372 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3373 		}
3374 
3375 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3376 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3377 			    DP_WIDE : DP_NARROW;
3378 		}
3379 
3380 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3381 			dval |= DP_SYNC;
3382 			offset = spi->sync_offset;
3383 		} else {
3384 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3385 			    &mpt->mpt_dev_page1[tgt];
3386 			offset = ptr->RequestedParameters;
3387 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3388 	    		offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3389 		}
3390 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3391 			dval |= DP_SYNC;
3392 			period = spi->sync_period;
3393 		} else {
3394 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3395 			    &mpt->mpt_dev_page1[tgt];
3396 			period = ptr->RequestedParameters;
3397 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3398 	    		period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3399 		}
3400 
3401 		if (dval & DP_DISC_ENABLE) {
3402 			mpt->mpt_disc_enable |= (1 << tgt);
3403 		} else if (dval & DP_DISC_DISABL) {
3404 			mpt->mpt_disc_enable &= ~(1 << tgt);
3405 		}
3406 		if (dval & DP_TQING_ENABLE) {
3407 			mpt->mpt_tag_enable |= (1 << tgt);
3408 		} else if (dval & DP_TQING_DISABL) {
3409 			mpt->mpt_tag_enable &= ~(1 << tgt);
3410 		}
3411 		if (dval & DP_WIDTH) {
3412 			mpt_setwidth(mpt, tgt, 1);
3413 		}
3414 		if (dval & DP_SYNC) {
3415 			mpt_setsync(mpt, tgt, period, offset);
3416 		}
3417 		if (dval == 0) {
3418 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3419 			break;
3420 		}
3421 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3422 		    "set [%d]: 0x%x period 0x%x offset %d\n",
3423 		    tgt, dval, period, offset);
3424 		if (mpt_update_spi_config(mpt, tgt)) {
3425 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3426 		} else {
3427 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3428 		}
3429 		break;
3430 	}
3431 	case XPT_GET_TRAN_SETTINGS:
3432 	{
3433 		struct ccb_trans_settings_scsi *scsi;
3434 		cts = &ccb->cts;
3435 		cts->protocol = PROTO_SCSI;
3436 		if (mpt->is_fc) {
3437 			struct ccb_trans_settings_fc *fc =
3438 			    &cts->xport_specific.fc;
3439 			cts->protocol_version = SCSI_REV_SPC;
3440 			cts->transport = XPORT_FC;
3441 			cts->transport_version = 0;
3442 			if (mpt->mpt_fcport_speed != 0) {
3443 				fc->valid = CTS_FC_VALID_SPEED;
3444 				fc->bitrate = 100000 * mpt->mpt_fcport_speed;
3445 			}
3446 		} else if (mpt->is_sas) {
3447 			struct ccb_trans_settings_sas *sas =
3448 			    &cts->xport_specific.sas;
3449 			cts->protocol_version = SCSI_REV_SPC2;
3450 			cts->transport = XPORT_SAS;
3451 			cts->transport_version = 0;
3452 			sas->valid = CTS_SAS_VALID_SPEED;
3453 			sas->bitrate = 300000;
3454 		} else {
3455 			cts->protocol_version = SCSI_REV_2;
3456 			cts->transport = XPORT_SPI;
3457 			cts->transport_version = 2;
3458 			if (mpt_get_spi_settings(mpt, cts) != 0) {
3459 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3460 				break;
3461 			}
3462 		}
3463 		scsi = &cts->proto_specific.scsi;
3464 		scsi->valid = CTS_SCSI_VALID_TQ;
3465 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3466 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3467 		break;
3468 	}
3469 	case XPT_CALC_GEOMETRY:
3470 	{
3471 		struct ccb_calc_geometry *ccg;
3472 
3473 		ccg = &ccb->ccg;
3474 		if (ccg->block_size == 0) {
3475 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3476 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3477 			break;
3478 		}
3479 		cam_calc_geometry(ccg, /* extended */ 1);
3480 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3481 		break;
3482 	}
3483 	case XPT_GET_SIM_KNOB:
3484 	{
3485 		struct ccb_sim_knob *kp = &ccb->knob;
3486 
3487 		if (mpt->is_fc) {
3488 			kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3489 			kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3490 			switch (mpt->role) {
3491 			case MPT_ROLE_NONE:
3492 				kp->xport_specific.fc.role = KNOB_ROLE_NONE;
3493 				break;
3494 			case MPT_ROLE_INITIATOR:
3495 				kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
3496 				break;
3497 			case MPT_ROLE_TARGET:
3498 				kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
3499 				break;
3500 			case MPT_ROLE_BOTH:
3501 				kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
3502 				break;
3503 			}
3504 			kp->xport_specific.fc.valid =
3505 			    KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
3506 			ccb->ccb_h.status = CAM_REQ_CMP;
3507 		} else {
3508 			ccb->ccb_h.status = CAM_REQ_INVALID;
3509 		}
3510 		xpt_done(ccb);
3511 		break;
3512 	}
3513 	case XPT_PATH_INQ:		/* Path routing inquiry */
3514 	{
3515 		struct ccb_pathinq *cpi = &ccb->cpi;
3516 
3517 		cpi->version_num = 1;
3518 		cpi->target_sprt = 0;
3519 		cpi->hba_eng_cnt = 0;
3520 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3521 		cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3522 		/*
3523 		 * FC cards report MAX_DEVICES of 512, but
3524 		 * the MSG_SCSI_IO_REQUEST target id field
3525 		 * is only 8 bits. Until we fix the driver
3526 		 * to support 'channels' for bus overflow,
3527 		 * just limit it.
3528 		 */
3529 		if (cpi->max_target > 255) {
3530 			cpi->max_target = 255;
3531 		}
3532 
3533 		/*
3534 		 * VMware ESX reports > 16 devices and then dies when we probe.
3535 		 */
3536 		if (mpt->is_spi && cpi->max_target > 15) {
3537 			cpi->max_target = 15;
3538 		}
3539 		if (mpt->is_spi)
3540 			cpi->max_lun = 7;
3541 		else
3542 			cpi->max_lun = MPT_MAX_LUNS;
3543 		cpi->initiator_id = mpt->mpt_ini_id;
3544 		cpi->bus_id = cam_sim_bus(sim);
3545 
3546 		/*
3547 		 * The base speed is the speed of the underlying connection.
3548 		 */
3549 		cpi->protocol = PROTO_SCSI;
3550 		if (mpt->is_fc) {
3551 			cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3552 			    PIM_EXTLUNS;
3553 			cpi->base_transfer_speed = 100000;
3554 			cpi->hba_inquiry = PI_TAG_ABLE;
3555 			cpi->transport = XPORT_FC;
3556 			cpi->transport_version = 0;
3557 			cpi->protocol_version = SCSI_REV_SPC;
3558 			cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3559 			cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3560 			cpi->xport_specific.fc.port = mpt->scinfo.fc.portid;
3561 			cpi->xport_specific.fc.bitrate =
3562 			    100000 * mpt->mpt_fcport_speed;
3563 		} else if (mpt->is_sas) {
3564 			cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3565 			    PIM_EXTLUNS;
3566 			cpi->base_transfer_speed = 300000;
3567 			cpi->hba_inquiry = PI_TAG_ABLE;
3568 			cpi->transport = XPORT_SAS;
3569 			cpi->transport_version = 0;
3570 			cpi->protocol_version = SCSI_REV_SPC2;
3571 		} else {
3572 			cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED |
3573 			    PIM_EXTLUNS;
3574 			cpi->base_transfer_speed = 3300;
3575 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3576 			cpi->transport = XPORT_SPI;
3577 			cpi->transport_version = 2;
3578 			cpi->protocol_version = SCSI_REV_2;
3579 		}
3580 
3581 		/*
3582 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3583 		 * wide and restrict it to one lun.
3584 		 */
3585 		if (raid_passthru) {
3586 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3587 			cpi->initiator_id = cpi->max_target + 1;
3588 			cpi->max_lun = 0;
3589 		}
3590 
3591 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3592 			cpi->hba_misc |= PIM_NOINITIATOR;
3593 		}
3594 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3595 			cpi->target_sprt =
3596 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3597 		} else {
3598 			cpi->target_sprt = 0;
3599 		}
3600 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3601 		strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3602 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3603 		cpi->unit_number = cam_sim_unit(sim);
3604 		cpi->ccb_h.status = CAM_REQ_CMP;
3605 		break;
3606 	}
3607 	case XPT_EN_LUN:		/* Enable LUN as a target */
3608 	{
3609 		int result;
3610 
3611 		if (ccb->cel.enable)
3612 			result = mpt_enable_lun(mpt,
3613 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3614 		else
3615 			result = mpt_disable_lun(mpt,
3616 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3617 		if (result == 0) {
3618 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3619 		} else {
3620 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3621 		}
3622 		break;
3623 	}
3624 	case XPT_IMMEDIATE_NOTIFY:	/* Add Immediate Notify Resource */
3625 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3626 	{
3627 		tgt_resource_t *trtp;
3628 		lun_id_t lun = ccb->ccb_h.target_lun;
3629 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3630 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3631 
3632 		if (lun == CAM_LUN_WILDCARD) {
3633 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3634 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3635 				break;
3636 			}
3637 			trtp = &mpt->trt_wildcard;
3638 		} else if (lun >= MPT_MAX_LUNS) {
3639 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3640 			break;
3641 		} else {
3642 			trtp = &mpt->trt[lun];
3643 		}
3644 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3645 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3646 			    "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun);
3647 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3648 			    sim_links.stqe);
3649 		} else {
3650 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3651 			    "Put FREE INOT lun %jx\n", (uintmax_t)lun);
3652 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3653 			    sim_links.stqe);
3654 		}
3655 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3656 		return;
3657 	}
3658 	case XPT_NOTIFY_ACKNOWLEDGE:	/* Task management request done. */
3659 	{
3660 		request_t *req = MPT_TAG_2_REQ(mpt, ccb->cna2.tag_id);
3661 
3662 		mpt_lprt(mpt, MPT_PRT_DEBUG, "Got Notify ACK\n");
3663 		mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0);
3664 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3665 		break;
3666 	}
3667 	case XPT_CONT_TARGET_IO:
3668 		mpt_target_start_io(mpt, ccb);
3669 		return;
3670 
3671 	default:
3672 		ccb->ccb_h.status = CAM_REQ_INVALID;
3673 		break;
3674 	}
3675 	xpt_done(ccb);
3676 }
3677 
3678 static int
mpt_get_spi_settings(struct mpt_softc * mpt,struct ccb_trans_settings * cts)3679 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3680 {
3681 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3682 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3683 	target_id_t tgt;
3684 	uint32_t dval, pval, oval;
3685 	int rv;
3686 
3687 	if (IS_CURRENT_SETTINGS(cts) == 0) {
3688 		tgt = cts->ccb_h.target_id;
3689 	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3690 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3691 			return (-1);
3692 		}
3693 	} else {
3694 		tgt = cts->ccb_h.target_id;
3695 	}
3696 
3697 	/*
3698 	 * We aren't looking at Port Page 2 BIOS settings here-
3699 	 * sometimes these have been known to be bogus XXX.
3700 	 *
3701 	 * For user settings, we pick the max from port page 0
3702 	 *
3703 	 * For current settings we read the current settings out from
3704 	 * device page 0 for that target.
3705 	 */
3706 	if (IS_CURRENT_SETTINGS(cts)) {
3707 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3708 		dval = 0;
3709 
3710 		tmp = mpt->mpt_dev_page0[tgt];
3711 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3712 		    sizeof(tmp), FALSE, 5000);
3713 		if (rv) {
3714 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3715 			return (rv);
3716 		}
3717 		mpt2host_config_page_scsi_device_0(&tmp);
3718 
3719 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3720 		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3721 		    tmp.NegotiatedParameters, tmp.Information);
3722 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3723 		    DP_WIDE : DP_NARROW;
3724 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3725 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3726 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3727 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3728 		oval = tmp.NegotiatedParameters;
3729 		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3730 		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3731 		pval = tmp.NegotiatedParameters;
3732 		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3733 		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3734 		mpt->mpt_dev_page0[tgt] = tmp;
3735 	} else {
3736 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3737 		oval = mpt->mpt_port_page0.Capabilities;
3738 		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3739 		pval = mpt->mpt_port_page0.Capabilities;
3740 		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3741 	}
3742 
3743 	spi->valid = 0;
3744 	scsi->valid = 0;
3745 	spi->flags = 0;
3746 	scsi->flags = 0;
3747 	spi->sync_offset = oval;
3748 	spi->sync_period = pval;
3749 	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3750 	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3751 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3752 	if (dval & DP_WIDE) {
3753 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3754 	} else {
3755 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3756 	}
3757 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3758 		scsi->valid = CTS_SCSI_VALID_TQ;
3759 		if (dval & DP_TQING_ENABLE) {
3760 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3761 		}
3762 		spi->valid |= CTS_SPI_VALID_DISC;
3763 		if (dval & DP_DISC_ENABLE) {
3764 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3765 		}
3766 	}
3767 
3768 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3769 	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3770 	    IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
3771 	return (0);
3772 }
3773 
3774 static void
mpt_setwidth(struct mpt_softc * mpt,int tgt,int onoff)3775 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3776 {
3777 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3778 
3779 	ptr = &mpt->mpt_dev_page1[tgt];
3780 	if (onoff) {
3781 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3782 	} else {
3783 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3784 	}
3785 }
3786 
3787 static void
mpt_setsync(struct mpt_softc * mpt,int tgt,int period,int offset)3788 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3789 {
3790 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3791 
3792 	ptr = &mpt->mpt_dev_page1[tgt];
3793 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3794 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3795 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3796 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3797 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3798 	if (period == 0) {
3799 		return;
3800 	}
3801 	ptr->RequestedParameters |=
3802 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3803 	ptr->RequestedParameters |=
3804 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3805 	if (period < 0xa) {
3806 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3807 	}
3808 	if (period < 0x9) {
3809 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3810 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3811 	}
3812 }
3813 
3814 static int
mpt_update_spi_config(struct mpt_softc * mpt,int tgt)3815 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3816 {
3817 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3818 	int rv;
3819 
3820 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3821 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3822 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3823 	tmp = mpt->mpt_dev_page1[tgt];
3824 	host2mpt_config_page_scsi_device_1(&tmp);
3825 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3826 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3827 	if (rv) {
3828 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3829 		return (-1);
3830 	}
3831 	return (0);
3832 }
3833 
3834 /****************************** Timeout Recovery ******************************/
3835 static int
mpt_spawn_recovery_thread(struct mpt_softc * mpt)3836 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3837 {
3838 	int error;
3839 
3840 	error = kproc_create(mpt_recovery_thread, mpt,
3841 	    &mpt->recovery_thread, /*flags*/0,
3842 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3843 	return (error);
3844 }
3845 
3846 static void
mpt_terminate_recovery_thread(struct mpt_softc * mpt)3847 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3848 {
3849 
3850 	if (mpt->recovery_thread == NULL) {
3851 		return;
3852 	}
3853 	mpt->shutdwn_recovery = 1;
3854 	wakeup(mpt);
3855 	/*
3856 	 * Sleep on a slightly different location
3857 	 * for this interlock just for added safety.
3858 	 */
3859 	mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3860 }
3861 
3862 static void
mpt_recovery_thread(void * arg)3863 mpt_recovery_thread(void *arg)
3864 {
3865 	struct mpt_softc *mpt;
3866 
3867 	mpt = (struct mpt_softc *)arg;
3868 	MPT_LOCK(mpt);
3869 	for (;;) {
3870 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3871 			if (mpt->shutdwn_recovery == 0) {
3872 				mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3873 			}
3874 		}
3875 		if (mpt->shutdwn_recovery != 0) {
3876 			break;
3877 		}
3878 		mpt_recover_commands(mpt);
3879 	}
3880 	mpt->recovery_thread = NULL;
3881 	wakeup(&mpt->recovery_thread);
3882 	MPT_UNLOCK(mpt);
3883 	kproc_exit(0);
3884 }
3885 
3886 static int
mpt_scsi_send_tmf(struct mpt_softc * mpt,u_int type,u_int flags,u_int channel,target_id_t target,lun_id_t lun,u_int abort_ctx,int sleep_ok)3887 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3888     u_int channel, target_id_t target, lun_id_t lun, u_int abort_ctx,
3889     int sleep_ok)
3890 {
3891 	MSG_SCSI_TASK_MGMT *tmf_req;
3892 	int		    error;
3893 
3894 	/*
3895 	 * Wait for any current TMF request to complete.
3896 	 * We're only allowed to issue one TMF at a time.
3897 	 */
3898 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3899 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
3900 	if (error != 0) {
3901 		mpt_reset(mpt, TRUE);
3902 		return (ETIMEDOUT);
3903 	}
3904 
3905 	mpt_assign_serno(mpt, mpt->tmf_req);
3906 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3907 
3908 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3909 	memset(tmf_req, 0, sizeof(*tmf_req));
3910 	tmf_req->TargetID = target;
3911 	tmf_req->Bus = channel;
3912 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3913 	tmf_req->TaskType = type;
3914 	tmf_req->MsgFlags = flags;
3915 	tmf_req->MsgContext =
3916 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3917 	be64enc(tmf_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
3918 	tmf_req->TaskMsgContext = abort_ctx;
3919 
3920 	mpt_lprt(mpt, MPT_PRT_DEBUG,
3921 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3922 	    mpt->tmf_req->serno, tmf_req->MsgContext);
3923 	if (mpt->verbose > MPT_PRT_DEBUG) {
3924 		mpt_print_request(tmf_req);
3925 	}
3926 
3927 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3928 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3929 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3930 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3931 	if (error != MPT_OK) {
3932 		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3933 		mpt->tmf_req->state = REQ_STATE_FREE;
3934 		mpt_reset(mpt, TRUE);
3935 	}
3936 	return (error);
3937 }
3938 
3939 /*
3940  * When a command times out, it is placed on the requeust_timeout_list
3941  * and we wake our recovery thread.  The MPT-Fusion architecture supports
3942  * only a single TMF operation at a time, so we serially abort/bdr, etc,
3943  * the timedout transactions.  The next TMF is issued either by the
3944  * completion handler of the current TMF waking our recovery thread,
3945  * or the TMF timeout handler causing a hard reset sequence.
3946  */
3947 static void
mpt_recover_commands(struct mpt_softc * mpt)3948 mpt_recover_commands(struct mpt_softc *mpt)
3949 {
3950 	request_t	   *req;
3951 	union ccb	   *ccb;
3952 	int		    error;
3953 
3954 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3955 		/*
3956 		 * No work to do- leave.
3957 		 */
3958 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3959 		return;
3960 	}
3961 
3962 	/*
3963 	 * Flush any commands whose completion coincides with their timeout.
3964 	 */
3965 	mpt_intr(mpt);
3966 
3967 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3968 		/*
3969 		 * The timedout commands have already
3970 		 * completed.  This typically means
3971 		 * that either the timeout value was on
3972 		 * the hairy edge of what the device
3973 		 * requires or - more likely - interrupts
3974 		 * are not happening.
3975 		 */
3976 		mpt_prt(mpt, "Timedout requests already complete. "
3977 		    "Interrupts may not be functioning.\n");
3978 		mpt_enable_ints(mpt);
3979 		return;
3980 	}
3981 
3982 	/*
3983 	 * We have no visibility into the current state of the
3984 	 * controller, so attempt to abort the commands in the
3985 	 * order they timed-out. For initiator commands, we
3986 	 * depend on the reply handler pulling requests off
3987 	 * the timeout list.
3988 	 */
3989 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3990 		uint16_t status;
3991 		uint8_t response;
3992 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3993 
3994 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3995 		"attempting to abort req %p:%u function %x\n",
3996 		    req, req->serno, hdrp->Function);
3997 		ccb = req->ccb;
3998 		if (ccb == NULL) {
3999 			mpt_prt(mpt, "null ccb in timed out request. "
4000 			    "Resetting Controller.\n");
4001 			mpt_reset(mpt, TRUE);
4002 			continue;
4003 		}
4004 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4005 
4006 		/*
4007 		 * Check to see if this is not an initiator command and
4008 		 * deal with it differently if it is.
4009 		 */
4010 		switch (hdrp->Function) {
4011 		case MPI_FUNCTION_SCSI_IO_REQUEST:
4012 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4013 			break;
4014 		default:
4015 			/*
4016 			 * XXX: FIX ME: need to abort target assists...
4017 			 */
4018 			mpt_prt(mpt, "just putting it back on the pend q\n");
4019 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4020 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4021 			    links);
4022 			continue;
4023 		}
4024 
4025 		error = mpt_scsi_send_tmf(mpt,
4026 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4027 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4028 		    htole32(req->index | scsi_io_handler_id), TRUE);
4029 
4030 		if (error != 0) {
4031 			/*
4032 			 * mpt_scsi_send_tmf hard resets on failure, so no
4033 			 * need to do so here.  Our queue should be emptied
4034 			 * by the hard reset.
4035 			 */
4036 			continue;
4037 		}
4038 
4039 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4040 		    REQ_STATE_DONE, TRUE, 500);
4041 
4042 		status = le16toh(mpt->tmf_req->IOCStatus);
4043 		response = mpt->tmf_req->ResponseCode;
4044 		mpt->tmf_req->state = REQ_STATE_FREE;
4045 
4046 		if (error != 0) {
4047 			/*
4048 			 * If we've errored out,, reset the controller.
4049 			 */
4050 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4051 			    "Resetting controller\n");
4052 			mpt_reset(mpt, TRUE);
4053 			continue;
4054 		}
4055 
4056 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4057 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4058 			    "Resetting controller.\n", status);
4059 			mpt_reset(mpt, TRUE);
4060 			continue;
4061 		}
4062 
4063 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4064 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4065 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4066 			    "Resetting controller.\n", response);
4067 			mpt_reset(mpt, TRUE);
4068 			continue;
4069 		}
4070 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4071 		"abort of req %p:%u completed\n", req, req->serno);
4072 	}
4073 }
4074 
4075 /************************ Target Mode Support ****************************/
4076 static void
mpt_fc_post_els(struct mpt_softc * mpt,request_t * req,int ioindex)4077 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4078 {
4079 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4080 	PTR_SGE_TRANSACTION32 tep;
4081 	PTR_SGE_SIMPLE32 se;
4082 	bus_addr_t paddr;
4083 	uint32_t fl;
4084 
4085 	paddr = req->req_pbuf;
4086 	paddr += MPT_RQSL(mpt);
4087 
4088 	fc = req->req_vbuf;
4089 	memset(fc, 0, MPT_REQUEST_AREA);
4090 	fc->BufferCount = 1;
4091 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4092 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
4093 
4094 	/*
4095 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
4096 	 * consist of a TE SGL element (with details length of zero)
4097 	 * followed by a SIMPLE SGL element which holds the address
4098 	 * of the buffer.
4099 	 */
4100 
4101 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4102 
4103 	tep->ContextSize = 4;
4104 	tep->Flags = 0;
4105 	tep->TransactionContext[0] = htole32(ioindex);
4106 
4107 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4108 	fl =
4109 		MPI_SGE_FLAGS_HOST_TO_IOC	|
4110 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4111 		MPI_SGE_FLAGS_LAST_ELEMENT	|
4112 		MPI_SGE_FLAGS_END_OF_LIST	|
4113 		MPI_SGE_FLAGS_END_OF_BUFFER;
4114 	fl <<= MPI_SGE_FLAGS_SHIFT;
4115 	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4116 	se->FlagsLength = htole32(fl);
4117 	se->Address = htole32((uint32_t) paddr);
4118 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4119 	    "add ELS index %d ioindex %d for %p:%u\n",
4120 	    req->index, ioindex, req, req->serno);
4121 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4122 	    ("mpt_fc_post_els: request not locked"));
4123 	mpt_send_cmd(mpt, req);
4124 }
4125 
4126 static void
mpt_post_target_command(struct mpt_softc * mpt,request_t * req,int ioindex)4127 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4128 {
4129 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4130 	PTR_CMD_BUFFER_DESCRIPTOR cb;
4131 	bus_addr_t paddr;
4132 
4133 	paddr = req->req_pbuf;
4134 	paddr += MPT_RQSL(mpt);
4135 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4136 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4137 
4138 	fc = req->req_vbuf;
4139 	fc->BufferCount = 1;
4140 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4141 	fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX);
4142 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4143 
4144 	cb = &fc->Buffer[0];
4145 	cb->IoIndex = htole16(ioindex);
4146 	cb->u.PhysicalAddress32 = htole32((U32) paddr);
4147 
4148 	mpt_check_doorbell(mpt);
4149 	mpt_send_cmd(mpt, req);
4150 }
4151 
4152 static int
mpt_add_els_buffers(struct mpt_softc * mpt)4153 mpt_add_els_buffers(struct mpt_softc *mpt)
4154 {
4155 	int i;
4156 
4157 	if (mpt->is_fc == 0) {
4158 		return (TRUE);
4159 	}
4160 
4161 	if (mpt->els_cmds_allocated) {
4162 		return (TRUE);
4163 	}
4164 
4165 	mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4166 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4167 
4168 	if (mpt->els_cmd_ptrs == NULL) {
4169 		return (FALSE);
4170 	}
4171 
4172 	/*
4173 	 * Feed the chip some ELS buffer resources
4174 	 */
4175 	for (i = 0; i < MPT_MAX_ELS; i++) {
4176 		request_t *req = mpt_get_request(mpt, FALSE);
4177 		if (req == NULL) {
4178 			break;
4179 		}
4180 		req->state |= REQ_STATE_LOCKED;
4181 		mpt->els_cmd_ptrs[i] = req;
4182 		mpt_fc_post_els(mpt, req, i);
4183 	}
4184 
4185 	if (i == 0) {
4186 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
4187 		free(mpt->els_cmd_ptrs, M_DEVBUF);
4188 		mpt->els_cmd_ptrs = NULL;
4189 		return (FALSE);
4190 	}
4191 	if (i != MPT_MAX_ELS) {
4192 		mpt_lprt(mpt, MPT_PRT_INFO,
4193 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4194 	}
4195 	mpt->els_cmds_allocated = i;
4196 	return(TRUE);
4197 }
4198 
4199 static int
mpt_add_target_commands(struct mpt_softc * mpt)4200 mpt_add_target_commands(struct mpt_softc *mpt)
4201 {
4202 	int i, max;
4203 
4204 	if (mpt->tgt_cmd_ptrs) {
4205 		return (TRUE);
4206 	}
4207 
4208 	max = MPT_MAX_REQUESTS(mpt) >> 1;
4209 	if (max > mpt->mpt_max_tgtcmds) {
4210 		max = mpt->mpt_max_tgtcmds;
4211 	}
4212 	mpt->tgt_cmd_ptrs =
4213 	    malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4214 	if (mpt->tgt_cmd_ptrs == NULL) {
4215 		mpt_prt(mpt,
4216 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4217 		return (FALSE);
4218 	}
4219 
4220 	for (i = 0; i < max; i++) {
4221 		request_t *req;
4222 
4223 		req = mpt_get_request(mpt, FALSE);
4224 		if (req == NULL) {
4225 			break;
4226 		}
4227 		req->state |= REQ_STATE_LOCKED;
4228 		mpt->tgt_cmd_ptrs[i] = req;
4229 		mpt_post_target_command(mpt, req, i);
4230 	}
4231 
4232 	if (i == 0) {
4233 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4234 		free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4235 		mpt->tgt_cmd_ptrs = NULL;
4236 		return (FALSE);
4237 	}
4238 
4239 	mpt->tgt_cmds_allocated = i;
4240 
4241 	if (i < max) {
4242 		mpt_lprt(mpt, MPT_PRT_INFO,
4243 		    "added %d of %d target bufs\n", i, max);
4244 	}
4245 	return (i);
4246 }
4247 
4248 static int
mpt_enable_lun(struct mpt_softc * mpt,target_id_t tgt,lun_id_t lun)4249 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4250 {
4251 
4252 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4253 		mpt->twildcard = 1;
4254 	} else if (lun >= MPT_MAX_LUNS) {
4255 		return (EINVAL);
4256 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4257 		return (EINVAL);
4258 	}
4259 	if (mpt->tenabled == 0) {
4260 		if (mpt->is_fc) {
4261 			(void) mpt_fc_reset_link(mpt, 0);
4262 		}
4263 		mpt->tenabled = 1;
4264 	}
4265 	if (lun == CAM_LUN_WILDCARD) {
4266 		mpt->trt_wildcard.enabled = 1;
4267 	} else {
4268 		mpt->trt[lun].enabled = 1;
4269 	}
4270 	return (0);
4271 }
4272 
4273 static int
mpt_disable_lun(struct mpt_softc * mpt,target_id_t tgt,lun_id_t lun)4274 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4275 {
4276 	int i;
4277 
4278 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4279 		mpt->twildcard = 0;
4280 	} else if (lun >= MPT_MAX_LUNS) {
4281 		return (EINVAL);
4282 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4283 		return (EINVAL);
4284 	}
4285 	if (lun == CAM_LUN_WILDCARD) {
4286 		mpt->trt_wildcard.enabled = 0;
4287 	} else {
4288 		mpt->trt[lun].enabled = 0;
4289 	}
4290 	for (i = 0; i < MPT_MAX_LUNS; i++) {
4291 		if (mpt->trt[i].enabled) {
4292 			break;
4293 		}
4294 	}
4295 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4296 		if (mpt->is_fc) {
4297 			(void) mpt_fc_reset_link(mpt, 0);
4298 		}
4299 		mpt->tenabled = 0;
4300 	}
4301 	return (0);
4302 }
4303 
4304 /*
4305  * Called with MPT lock held
4306  */
4307 static void
mpt_target_start_io(struct mpt_softc * mpt,union ccb * ccb)4308 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4309 {
4310 	struct ccb_scsiio *csio = &ccb->csio;
4311 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4312 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4313 
4314 	switch (tgt->state) {
4315 	case TGT_STATE_IN_CAM:
4316 		break;
4317 	case TGT_STATE_MOVING_DATA:
4318 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4319 		xpt_freeze_simq(mpt->sim, 1);
4320 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4321 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4322 		xpt_done(ccb);
4323 		return;
4324 	default:
4325 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4326 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4327 		mpt_tgt_dump_req_state(mpt, cmd_req);
4328 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4329 		xpt_done(ccb);
4330 		return;
4331 	}
4332 
4333 	if (csio->dxfer_len) {
4334 		bus_dmamap_callback_t *cb;
4335 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4336 		request_t *req;
4337 		int error;
4338 
4339 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4340 		    ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4341 
4342 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4343 			if (mpt->outofbeer == 0) {
4344 				mpt->outofbeer = 1;
4345 				xpt_freeze_simq(mpt->sim, 1);
4346 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4347 			}
4348 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4349 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4350 			xpt_done(ccb);
4351 			return;
4352 		}
4353 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4354 		if (sizeof (bus_addr_t) > 4) {
4355 			cb = mpt_execute_req_a64;
4356 		} else {
4357 			cb = mpt_execute_req;
4358 		}
4359 
4360 		req->ccb = ccb;
4361 		ccb->ccb_h.ccb_req_ptr = req;
4362 
4363 		/*
4364 		 * Record the currently active ccb and the
4365 		 * request for it in our target state area.
4366 		 */
4367 		tgt->ccb = ccb;
4368 		tgt->req = req;
4369 
4370 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4371 		ta = req->req_vbuf;
4372 
4373 		if (mpt->is_sas) {
4374 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4375 			     cmd_req->req_vbuf;
4376 			ta->QueueTag = ssp->InitiatorTag;
4377 		} else if (mpt->is_spi) {
4378 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4379 			     cmd_req->req_vbuf;
4380 			ta->QueueTag = sp->Tag;
4381 		}
4382 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4383 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4384 		ta->ReplyWord = htole32(tgt->reply_desc);
4385 		be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(csio->ccb_h.target_lun));
4386 
4387 		ta->RelativeOffset = tgt->bytes_xfered;
4388 		ta->DataLength = ccb->csio.dxfer_len;
4389 		if (ta->DataLength > tgt->resid) {
4390 			ta->DataLength = tgt->resid;
4391 		}
4392 
4393 		/*
4394 		 * XXX Should be done after data transfer completes?
4395 		 */
4396 		csio->resid = csio->dxfer_len - ta->DataLength;
4397 		tgt->resid -= csio->dxfer_len;
4398 		tgt->bytes_xfered += csio->dxfer_len;
4399 
4400 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4401 			ta->TargetAssistFlags |=
4402 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4403 		}
4404 
4405 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4406 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4407 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4408 			ta->TargetAssistFlags |=
4409 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4410 		}
4411 #endif
4412 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4413 
4414 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4415 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4416 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4417 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4418 
4419 		error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
4420 		    cb, req, 0);
4421 		if (error == EINPROGRESS) {
4422 			xpt_freeze_simq(mpt->sim, 1);
4423 			ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4424 		}
4425 	} else {
4426 		/*
4427 		 * XXX: I don't know why this seems to happen, but
4428 		 * XXX: completing the CCB seems to make things happy.
4429 		 * XXX: This seems to happen if the initiator requests
4430 		 * XXX: enough data that we have to do multiple CTIOs.
4431 		 */
4432 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4433 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4434 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4435 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4436 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4437 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4438 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4439 			xpt_done(ccb);
4440 			return;
4441 		}
4442 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status,
4443 		    (void *)&csio->sense_data,
4444 		    (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
4445 		     csio->sense_len : 0);
4446 	}
4447 }
4448 
4449 static void
mpt_scsi_tgt_local(struct mpt_softc * mpt,request_t * cmd_req,lun_id_t lun,int send,uint8_t * data,size_t length)4450 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4451     lun_id_t lun, int send, uint8_t *data, size_t length)
4452 {
4453 	mpt_tgt_state_t *tgt;
4454 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4455 	SGE_SIMPLE32 *se;
4456 	uint32_t flags;
4457 	uint8_t *dptr;
4458 	bus_addr_t pptr;
4459 	request_t *req;
4460 
4461 	/*
4462 	 * We enter with resid set to the data load for the command.
4463 	 */
4464 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4465 	if (length == 0 || tgt->resid == 0) {
4466 		tgt->resid = 0;
4467 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0);
4468 		return;
4469 	}
4470 
4471 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4472 		mpt_prt(mpt, "out of resources- dropping local response\n");
4473 		return;
4474 	}
4475 	tgt->is_local = 1;
4476 
4477 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4478 	ta = req->req_vbuf;
4479 
4480 	if (mpt->is_sas) {
4481 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4482 		ta->QueueTag = ssp->InitiatorTag;
4483 	} else if (mpt->is_spi) {
4484 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4485 		ta->QueueTag = sp->Tag;
4486 	}
4487 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4488 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4489 	ta->ReplyWord = htole32(tgt->reply_desc);
4490 	be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
4491 	ta->RelativeOffset = 0;
4492 	ta->DataLength = length;
4493 
4494 	dptr = req->req_vbuf;
4495 	dptr += MPT_RQSL(mpt);
4496 	pptr = req->req_pbuf;
4497 	pptr += MPT_RQSL(mpt);
4498 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4499 
4500 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4501 	memset(se, 0,sizeof (*se));
4502 
4503 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4504 	if (send) {
4505 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4506 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4507 	}
4508 	se->Address = pptr;
4509 	MPI_pSGE_SET_LENGTH(se, length);
4510 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4511 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4512 	MPI_pSGE_SET_FLAGS(se, flags);
4513 
4514 	tgt->ccb = NULL;
4515 	tgt->req = req;
4516 	tgt->resid -= length;
4517 	tgt->bytes_xfered = length;
4518 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4519 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4520 #else
4521 	tgt->state = TGT_STATE_MOVING_DATA;
4522 #endif
4523 	mpt_send_cmd(mpt, req);
4524 }
4525 
4526 /*
4527  * Abort queued up CCBs
4528  */
4529 static cam_status
mpt_abort_target_ccb(struct mpt_softc * mpt,union ccb * ccb)4530 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4531 {
4532 	struct mpt_hdr_stailq *lp;
4533 	struct ccb_hdr *srch;
4534 	union ccb *accb = ccb->cab.abort_ccb;
4535 	tgt_resource_t *trtp;
4536 	mpt_tgt_state_t *tgt;
4537 	request_t *req;
4538 	uint32_t tag;
4539 
4540 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4541 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD)
4542 		trtp = &mpt->trt_wildcard;
4543 	else
4544 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4545 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4546 		lp = &trtp->atios;
4547 		tag = accb->atio.tag_id;
4548 	} else {
4549 		lp = &trtp->inots;
4550 		tag = accb->cin1.tag_id;
4551 	}
4552 
4553 	/* Search the CCB among queued. */
4554 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4555 		if (srch != &accb->ccb_h)
4556 			continue;
4557 		STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4558 		accb->ccb_h.status = CAM_REQ_ABORTED;
4559 		xpt_done(accb);
4560 		return (CAM_REQ_CMP);
4561 	}
4562 
4563 	/* Search the CCB among running. */
4564 	req = MPT_TAG_2_REQ(mpt, tag);
4565 	tgt = MPT_TGT_STATE(mpt, req);
4566 	if (tgt->tag_id == tag) {
4567 		mpt_abort_target_cmd(mpt, req);
4568 		return (CAM_REQ_CMP);
4569 	}
4570 
4571 	return (CAM_UA_ABORT);
4572 }
4573 
4574 /*
4575  * Ask the MPT to abort the current target command
4576  */
4577 static int
mpt_abort_target_cmd(struct mpt_softc * mpt,request_t * cmd_req)4578 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4579 {
4580 	int error;
4581 	request_t *req;
4582 	PTR_MSG_TARGET_MODE_ABORT abtp;
4583 
4584 	req = mpt_get_request(mpt, FALSE);
4585 	if (req == NULL) {
4586 		return (-1);
4587 	}
4588 	abtp = req->req_vbuf;
4589 	memset(abtp, 0, sizeof (*abtp));
4590 
4591 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4592 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4593 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4594 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4595 	error = 0;
4596 	if (mpt->is_fc || mpt->is_sas) {
4597 		mpt_send_cmd(mpt, req);
4598 	} else {
4599 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4600 	}
4601 	return (error);
4602 }
4603 
4604 /*
4605  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4606  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4607  * FC929 to set bogus FC_RSP fields (nonzero residuals
4608  * but w/o RESID fields set). This causes QLogic initiators
4609  * to think maybe that a frame was lost.
4610  *
4611  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4612  * we use allocated requests to do TARGET_ASSIST and we
4613  * need to know when to release them.
4614  */
4615 
4616 static void
mpt_scsi_tgt_status(struct mpt_softc * mpt,union ccb * ccb,request_t * cmd_req,uint8_t status,uint8_t const * sense_data,u_int sense_len)4617 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4618     uint8_t status, uint8_t const *sense_data, u_int sense_len)
4619 {
4620 	uint8_t *cmd_vbuf;
4621 	mpt_tgt_state_t *tgt;
4622 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4623 	request_t *req;
4624 	bus_addr_t paddr;
4625 	int resplen = 0;
4626 	uint32_t fl;
4627 
4628 	cmd_vbuf = cmd_req->req_vbuf;
4629 	cmd_vbuf += MPT_RQSL(mpt);
4630 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4631 
4632 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4633 		if (mpt->outofbeer == 0) {
4634 			mpt->outofbeer = 1;
4635 			xpt_freeze_simq(mpt->sim, 1);
4636 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4637 		}
4638 		if (ccb) {
4639 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4640 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4641 			xpt_done(ccb);
4642 		} else {
4643 			mpt_prt(mpt,
4644 			    "could not allocate status request- dropping\n");
4645 		}
4646 		return;
4647 	}
4648 	req->ccb = ccb;
4649 	if (ccb) {
4650 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4651 		ccb->ccb_h.ccb_req_ptr = req;
4652 	}
4653 
4654 	/*
4655 	 * Record the currently active ccb, if any, and the
4656 	 * request for it in our target state area.
4657 	 */
4658 	tgt->ccb = ccb;
4659 	tgt->req = req;
4660 	tgt->state = TGT_STATE_SENDING_STATUS;
4661 
4662 	tp = req->req_vbuf;
4663 	paddr = req->req_pbuf;
4664 	paddr += MPT_RQSL(mpt);
4665 
4666 	memset(tp, 0, sizeof (*tp));
4667 	tp->StatusCode = status;
4668 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4669 	if (mpt->is_fc) {
4670 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4671 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4672 		uint8_t *sts_vbuf;
4673 		uint32_t *rsp;
4674 
4675 		sts_vbuf = req->req_vbuf;
4676 		sts_vbuf += MPT_RQSL(mpt);
4677 		rsp = (uint32_t *) sts_vbuf;
4678 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4679 
4680 		/*
4681 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4682 		 * It has to be big-endian in memory and is organized
4683 		 * in 32 bit words, which are much easier to deal with
4684 		 * as words which are swizzled as needed.
4685 		 *
4686 		 * All we're filling here is the FC_RSP payload.
4687 		 * We may just have the chip synthesize it if
4688 		 * we have no residual and an OK status.
4689 		 *
4690 		 */
4691 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4692 
4693 		rsp[2] = htobe32(status);
4694 #define	MIN_FCP_RESPONSE_SIZE	24
4695 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4696 		resplen = MIN_FCP_RESPONSE_SIZE;
4697 #endif
4698 		if (tgt->resid < 0) {
4699 			rsp[2] |= htobe32(0x400); /* XXXX NEED MNEMONIC!!!! */
4700 			rsp[3] = htobe32(-tgt->resid);
4701 			resplen = MIN_FCP_RESPONSE_SIZE;
4702 		} else if (tgt->resid > 0) {
4703 			rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */
4704 			rsp[3] = htobe32(tgt->resid);
4705 			resplen = MIN_FCP_RESPONSE_SIZE;
4706 		}
4707 		if (sense_len > 0) {
4708 			rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */
4709 			rsp[4] = htobe32(sense_len);
4710 			memcpy(&rsp[6], sense_data, sense_len);
4711 			resplen = MIN_FCP_RESPONSE_SIZE + sense_len;
4712 		}
4713 	} else if (mpt->is_sas) {
4714 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4715 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4716 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4717 	} else {
4718 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4719 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4720 		tp->QueueTag = htole16(sp->Tag);
4721 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4722 	}
4723 
4724 	tp->ReplyWord = htole32(tgt->reply_desc);
4725 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4726 
4727 #ifdef	WE_CAN_USE_AUTO_REPOST
4728 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4729 #endif
4730 	if (status == SCSI_STATUS_OK && resplen == 0) {
4731 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4732 	} else {
4733 		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4734 		fl = MPI_SGE_FLAGS_HOST_TO_IOC |
4735 		     MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4736 		     MPI_SGE_FLAGS_LAST_ELEMENT |
4737 		     MPI_SGE_FLAGS_END_OF_LIST |
4738 		     MPI_SGE_FLAGS_END_OF_BUFFER;
4739 		fl <<= MPI_SGE_FLAGS_SHIFT;
4740 		fl |= resplen;
4741 		tp->StatusDataSGE.FlagsLength = htole32(fl);
4742 	}
4743 
4744 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4745 	    "STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n",
4746 	    ccb, sense_len > 0 ? "" : "out", tgt->tag_id,
4747 	    req, req->serno, tgt->resid);
4748 	if (mpt->verbose > MPT_PRT_DEBUG)
4749 		mpt_print_request(req->req_vbuf);
4750 	if (ccb) {
4751 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4752 		mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb);
4753 	}
4754 	mpt_send_cmd(mpt, req);
4755 }
4756 
4757 static void
mpt_scsi_tgt_tsk_mgmt(struct mpt_softc * mpt,request_t * req,mpt_task_mgmt_t fc,tgt_resource_t * trtp,int init_id)4758 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4759     tgt_resource_t *trtp, int init_id)
4760 {
4761 	struct ccb_immediate_notify *inot;
4762 	mpt_tgt_state_t *tgt;
4763 
4764 	tgt = MPT_TGT_STATE(mpt, req);
4765 	inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
4766 	if (inot == NULL) {
4767 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4768 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0);
4769 		return;
4770 	}
4771 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4772 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4773 	    "Get FREE INOT %p lun %jx\n", inot,
4774 	    (uintmax_t)inot->ccb_h.target_lun);
4775 
4776 	inot->initiator_id = init_id;	/* XXX */
4777 	inot->tag_id = tgt->tag_id;
4778 	inot->seq_id = 0;
4779 	/*
4780 	 * This is a somewhat grotesque attempt to map from task management
4781 	 * to old style SCSI messages. God help us all.
4782 	 */
4783 	switch (fc) {
4784 	case MPT_QUERY_TASK_SET:
4785 		inot->arg = MSG_QUERY_TASK_SET;
4786 		break;
4787 	case MPT_ABORT_TASK_SET:
4788 		inot->arg = MSG_ABORT_TASK_SET;
4789 		break;
4790 	case MPT_CLEAR_TASK_SET:
4791 		inot->arg = MSG_CLEAR_TASK_SET;
4792 		break;
4793 	case MPT_QUERY_ASYNC_EVENT:
4794 		inot->arg = MSG_QUERY_ASYNC_EVENT;
4795 		break;
4796 	case MPT_LOGICAL_UNIT_RESET:
4797 		inot->arg = MSG_LOGICAL_UNIT_RESET;
4798 		break;
4799 	case MPT_TARGET_RESET:
4800 		inot->arg = MSG_TARGET_RESET;
4801 		break;
4802 	case MPT_CLEAR_ACA:
4803 		inot->arg = MSG_CLEAR_ACA;
4804 		break;
4805 	default:
4806 		inot->arg = MSG_NOOP;
4807 		break;
4808 	}
4809 	tgt->ccb = (union ccb *) inot;
4810 	inot->ccb_h.status = CAM_MESSAGE_RECV;
4811 	xpt_done((union ccb *)inot);
4812 }
4813 
4814 static void
mpt_scsi_tgt_atio(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc)4815 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4816 {
4817 	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4818 	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4819 	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
4820 	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
4821 	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
4822 	     '0',  '0',  '0',  '1'
4823 	};
4824 	struct ccb_accept_tio *atiop;
4825 	lun_id_t lun;
4826 	int tag_action = 0;
4827 	mpt_tgt_state_t *tgt;
4828 	tgt_resource_t *trtp = NULL;
4829 	U8 *lunptr;
4830 	U8 *vbuf;
4831 	U16 ioindex;
4832 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4833 	uint8_t *cdbp;
4834 
4835 	/*
4836 	 * Stash info for the current command where we can get at it later.
4837 	 */
4838 	vbuf = req->req_vbuf;
4839 	vbuf += MPT_RQSL(mpt);
4840 	if (mpt->verbose >= MPT_PRT_DEBUG) {
4841 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4842 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4843 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4844 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4845 	}
4846 
4847 	/*
4848 	 * Get our state pointer set up.
4849 	 */
4850 	tgt = MPT_TGT_STATE(mpt, req);
4851 	if (tgt->state != TGT_STATE_LOADED) {
4852 		mpt_tgt_dump_req_state(mpt, req);
4853 		panic("bad target state in mpt_scsi_tgt_atio");
4854 	}
4855 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
4856 	tgt->state = TGT_STATE_IN_CAM;
4857 	tgt->reply_desc = reply_desc;
4858 	ioindex = GET_IO_INDEX(reply_desc);
4859 
4860 	/*
4861 	 * The tag we construct here allows us to find the
4862 	 * original request that the command came in with.
4863 	 *
4864 	 * This way we don't have to depend on anything but the
4865 	 * tag to find things when CCBs show back up from CAM.
4866 	 */
4867 	tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4868 
4869 	if (mpt->is_fc) {
4870 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4871 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4872 		if (fc->FcpCntl[2]) {
4873 			/*
4874 			 * Task Management Request
4875 			 */
4876 			switch (fc->FcpCntl[2]) {
4877 			case 0x1:
4878 				fct = MPT_QUERY_TASK_SET;
4879 				break;
4880 			case 0x2:
4881 				fct = MPT_ABORT_TASK_SET;
4882 				break;
4883 			case 0x4:
4884 				fct = MPT_CLEAR_TASK_SET;
4885 				break;
4886 			case 0x8:
4887 				fct = MPT_QUERY_ASYNC_EVENT;
4888 				break;
4889 			case 0x10:
4890 				fct = MPT_LOGICAL_UNIT_RESET;
4891 				break;
4892 			case 0x20:
4893 				fct = MPT_TARGET_RESET;
4894 				break;
4895 			case 0x40:
4896 				fct = MPT_CLEAR_ACA;
4897 				break;
4898 			default:
4899 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4900 				    fc->FcpCntl[2]);
4901 				mpt_scsi_tgt_status(mpt, NULL, req,
4902 				    SCSI_STATUS_OK, NULL, 0);
4903 				return;
4904 			}
4905 		} else {
4906 			switch (fc->FcpCntl[1]) {
4907 			case 0:
4908 				tag_action = MSG_SIMPLE_Q_TAG;
4909 				break;
4910 			case 1:
4911 				tag_action = MSG_HEAD_OF_Q_TAG;
4912 				break;
4913 			case 2:
4914 				tag_action = MSG_ORDERED_Q_TAG;
4915 				break;
4916 			default:
4917 				/*
4918 				 * Bah. Ignore Untagged Queing and ACA
4919 				 */
4920 				tag_action = MSG_SIMPLE_Q_TAG;
4921 				break;
4922 			}
4923 		}
4924 		tgt->resid = be32toh(fc->FcpDl);
4925 		cdbp = fc->FcpCdb;
4926 		lunptr = fc->FcpLun;
4927 		tgt->itag = fc->OptionalOxid;
4928 	} else if (mpt->is_sas) {
4929 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4930 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4931 		cdbp = ssp->CDB;
4932 		lunptr = ssp->LogicalUnitNumber;
4933 		tgt->itag = ssp->InitiatorTag;
4934 	} else {
4935 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4936 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4937 		cdbp = sp->CDB;
4938 		lunptr = sp->LogicalUnitNumber;
4939 		tgt->itag = sp->Tag;
4940 	}
4941 
4942 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(lunptr));
4943 
4944 	/*
4945 	 * Deal with non-enabled or bad luns here.
4946 	 */
4947 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4948 	    mpt->trt[lun].enabled == 0) {
4949 		if (mpt->twildcard) {
4950 			trtp = &mpt->trt_wildcard;
4951 		} else if (fct == MPT_NIL_TMT_VALUE) {
4952 			/*
4953 			 * In this case, we haven't got an upstream listener
4954 			 * for either a specific lun or wildcard luns. We
4955 			 * have to make some sensible response. For regular
4956 			 * inquiry, just return some NOT HERE inquiry data.
4957 			 * For VPD inquiry, report illegal field in cdb.
4958 			 * For REQUEST SENSE, just return NO SENSE data.
4959 			 * REPORT LUNS gets illegal command.
4960 			 * All other commands get 'no such device'.
4961 			 */
4962 			uint8_t sense[MPT_SENSE_SIZE];
4963 			size_t len;
4964 
4965 			memset(sense, 0, sizeof(sense));
4966 			sense[0] = 0xf0;
4967 			sense[2] = 0x5;
4968 			sense[7] = 0x8;
4969 
4970 			switch (cdbp[0]) {
4971 			case INQUIRY:
4972 			{
4973 				if (cdbp[1] != 0) {
4974 					sense[12] = 0x26;
4975 					sense[13] = 0x01;
4976 					break;
4977 				}
4978 				len = min(tgt->resid, cdbp[4]);
4979 				len = min(len, sizeof (null_iqd));
4980 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4981 				    "local inquiry %ld bytes\n", (long) len);
4982 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4983 				    null_iqd, len);
4984 				return;
4985 			}
4986 			case REQUEST_SENSE:
4987 			{
4988 				sense[2] = 0x0;
4989 				len = min(tgt->resid, cdbp[4]);
4990 				len = min(len, sizeof (sense));
4991 				mpt_lprt(mpt, MPT_PRT_DEBUG,
4992 				    "local reqsense %ld bytes\n", (long) len);
4993 				mpt_scsi_tgt_local(mpt, req, lun, 1,
4994 				    sense, len);
4995 				return;
4996 			}
4997 			case REPORT_LUNS:
4998 				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
4999 				sense[12] = 0x26;
5000 				return;
5001 			default:
5002 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5003 				    "CMD 0x%x to unmanaged lun %jx\n",
5004 				    cdbp[0], (uintmax_t)lun);
5005 				sense[12] = 0x25;
5006 				break;
5007 			}
5008 			mpt_scsi_tgt_status(mpt, NULL, req,
5009 			    SCSI_STATUS_CHECK_COND, sense, sizeof(sense));
5010 			return;
5011 		}
5012 		/* otherwise, leave trtp NULL */
5013 	} else {
5014 		trtp = &mpt->trt[lun];
5015 	}
5016 
5017 	/*
5018 	 * Deal with any task management
5019 	 */
5020 	if (fct != MPT_NIL_TMT_VALUE) {
5021 		if (trtp == NULL) {
5022 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
5023 			    fct);
5024 			mpt_scsi_tgt_status(mpt, NULL, req,
5025 			    SCSI_STATUS_OK, NULL, 0);
5026 		} else {
5027 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5028 			    GET_INITIATOR_INDEX(reply_desc));
5029 		}
5030 		return;
5031 	}
5032 
5033 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5034 	if (atiop == NULL) {
5035 		mpt_lprt(mpt, MPT_PRT_WARN,
5036 		    "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun,
5037 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
5038 		mpt_scsi_tgt_status(mpt, NULL, req,
5039 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5040 		    NULL, 0);
5041 		return;
5042 	}
5043 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5044 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5045 	    "Get FREE ATIO %p lun %jx\n", atiop,
5046 	    (uintmax_t)atiop->ccb_h.target_lun);
5047 	atiop->ccb_h.ccb_mpt_ptr = mpt;
5048 	atiop->ccb_h.status = CAM_CDB_RECVD;
5049 	atiop->ccb_h.target_lun = lun;
5050 	atiop->sense_len = 0;
5051 	atiop->tag_id = tgt->tag_id;
5052 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5053 	atiop->cdb_len = 16;
5054 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5055 	if (tag_action) {
5056 		atiop->tag_action = tag_action;
5057 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
5058 	}
5059 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5060 		int i;
5061 		mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop,
5062 		    (uintmax_t)atiop->ccb_h.target_lun);
5063 		for (i = 0; i < atiop->cdb_len; i++) {
5064 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5065 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
5066 		}
5067 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5068 		    tgt->itag, tgt->tag_id, tgt->reply_desc, tgt->resid);
5069 	}
5070 
5071 	xpt_done((union ccb *)atiop);
5072 }
5073 
5074 static void
mpt_tgt_dump_tgt_state(struct mpt_softc * mpt,request_t * req)5075 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5076 {
5077 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5078 
5079 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5080 	    "nx %d tag 0x%08x itag 0x%04x state=%d\n", req, req->serno,
5081 	    tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb,
5082 	    tgt->req, tgt->nxfers, tgt->tag_id, tgt->itag, tgt->state);
5083 }
5084 
5085 static void
mpt_tgt_dump_req_state(struct mpt_softc * mpt,request_t * req)5086 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5087 {
5088 
5089 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5090 	    req->index, req->index, req->state);
5091 	mpt_tgt_dump_tgt_state(mpt, req);
5092 }
5093 
5094 static int
mpt_scsi_tgt_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)5095 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5096     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5097 {
5098 	int dbg;
5099 	union ccb *ccb;
5100 	U16 status;
5101 
5102 	if (reply_frame == NULL) {
5103 		/*
5104 		 * Figure out what the state of the command is.
5105 		 */
5106 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5107 
5108 #ifdef	INVARIANTS
5109 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5110 		if (tgt->req) {
5111 			mpt_req_not_spcl(mpt, tgt->req,
5112 			    "turbo scsi_tgt_reply associated req", __LINE__);
5113 		}
5114 #endif
5115 		switch(tgt->state) {
5116 		case TGT_STATE_LOADED:
5117 			/*
5118 			 * This is a new command starting.
5119 			 */
5120 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5121 			break;
5122 		case TGT_STATE_MOVING_DATA:
5123 		{
5124 			ccb = tgt->ccb;
5125 			if (tgt->req == NULL) {
5126 				panic("mpt: turbo target reply with null "
5127 				    "associated request moving data");
5128 				/* NOTREACHED */
5129 			}
5130 			if (ccb == NULL) {
5131 				if (tgt->is_local == 0) {
5132 					panic("mpt: turbo target reply with "
5133 					    "null associated ccb moving data");
5134 					/* NOTREACHED */
5135 				}
5136 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5137 				    "TARGET_ASSIST local done\n");
5138 				TAILQ_REMOVE(&mpt->request_pending_list,
5139 				    tgt->req, links);
5140 				mpt_free_request(mpt, tgt->req);
5141 				tgt->req = NULL;
5142 				mpt_scsi_tgt_status(mpt, NULL, req,
5143 				    0, NULL, 0);
5144 				return (TRUE);
5145 			}
5146 			tgt->ccb = NULL;
5147 			tgt->nxfers++;
5148 			mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5149 			mpt_lprt(mpt, MPT_PRT_DEBUG,
5150 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5151 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5152 			/*
5153 			 * Free the Target Assist Request
5154 			 */
5155 			KASSERT(tgt->req->ccb == ccb,
5156 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5157 			    tgt->req->serno, tgt->req->ccb));
5158 			TAILQ_REMOVE(&mpt->request_pending_list,
5159 			    tgt->req, links);
5160 			mpt_free_request(mpt, tgt->req);
5161 			tgt->req = NULL;
5162 
5163 			/*
5164 			 * Do we need to send status now? That is, are
5165 			 * we done with all our data transfers?
5166 			 */
5167 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5168 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5169 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5170 				KASSERT(ccb->ccb_h.status,
5171 				    ("zero ccb sts at %d", __LINE__));
5172 				tgt->state = TGT_STATE_IN_CAM;
5173 				if (mpt->outofbeer) {
5174 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5175 					mpt->outofbeer = 0;
5176 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5177 				}
5178 				xpt_done(ccb);
5179 				break;
5180 			}
5181 			/*
5182 			 * Otherwise, send status (and sense)
5183 			 */
5184 			mpt_scsi_tgt_status(mpt, ccb, req,
5185 			    ccb->csio.scsi_status,
5186 			    (void *)&ccb->csio.sense_data,
5187 			    (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
5188 			     ccb->csio.sense_len : 0);
5189 			break;
5190 		}
5191 		case TGT_STATE_SENDING_STATUS:
5192 		case TGT_STATE_MOVING_DATA_AND_STATUS:
5193 		{
5194 			int ioindex;
5195 			ccb = tgt->ccb;
5196 
5197 			if (tgt->req == NULL) {
5198 				panic("mpt: turbo target reply with null "
5199 				    "associated request sending status");
5200 				/* NOTREACHED */
5201 			}
5202 
5203 			if (ccb) {
5204 				tgt->ccb = NULL;
5205 				if (tgt->state ==
5206 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5207 					tgt->nxfers++;
5208 				}
5209 				mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5210 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5211 					ccb->ccb_h.status |= CAM_SENT_SENSE;
5212 				}
5213 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5214 				    "TARGET_STATUS tag %x sts %x flgs %x req "
5215 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5216 				    ccb->ccb_h.flags, tgt->req);
5217 				/*
5218 				 * Free the Target Send Status Request
5219 				 */
5220 				KASSERT(tgt->req->ccb == ccb,
5221 				    ("tgt->req %p:%u tgt->req->ccb %p",
5222 				    tgt->req, tgt->req->serno, tgt->req->ccb));
5223 				/*
5224 				 * Notify CAM that we're done
5225 				 */
5226 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5227 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5228 				KASSERT(ccb->ccb_h.status,
5229 				    ("ZERO ccb sts at %d", __LINE__));
5230 				tgt->ccb = NULL;
5231 			} else {
5232 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5233 				    "TARGET_STATUS non-CAM for req %p:%u\n",
5234 				    tgt->req, tgt->req->serno);
5235 			}
5236 			TAILQ_REMOVE(&mpt->request_pending_list,
5237 			    tgt->req, links);
5238 			mpt_free_request(mpt, tgt->req);
5239 			tgt->req = NULL;
5240 
5241 			/*
5242 			 * And re-post the Command Buffer.
5243 			 * This will reset the state.
5244 			 */
5245 			ioindex = GET_IO_INDEX(reply_desc);
5246 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5247 			tgt->is_local = 0;
5248 			mpt_post_target_command(mpt, req, ioindex);
5249 
5250 			/*
5251 			 * And post a done for anyone who cares
5252 			 */
5253 			if (ccb) {
5254 				if (mpt->outofbeer) {
5255 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5256 					mpt->outofbeer = 0;
5257 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5258 				}
5259 				xpt_done(ccb);
5260 			}
5261 			break;
5262 		}
5263 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5264 			tgt->state = TGT_STATE_LOADED;
5265 			break;
5266 		default:
5267 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5268 			    "Reply Function\n", tgt->state);
5269 		}
5270 		return (TRUE);
5271 	}
5272 
5273 	status = le16toh(reply_frame->IOCStatus);
5274 	if (status != MPI_IOCSTATUS_SUCCESS) {
5275 		dbg = MPT_PRT_ERROR;
5276 	} else {
5277 		dbg = MPT_PRT_DEBUG1;
5278 	}
5279 
5280 	mpt_lprt(mpt, dbg,
5281 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5282 	     req, req->serno, reply_frame, reply_frame->Function, status);
5283 
5284 	switch (reply_frame->Function) {
5285 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5286 	{
5287 		mpt_tgt_state_t *tgt;
5288 #ifdef	INVARIANTS
5289 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5290 #endif
5291 		if (status != MPI_IOCSTATUS_SUCCESS) {
5292 			/*
5293 			 * XXX What to do?
5294 			 */
5295 			break;
5296 		}
5297 		tgt = MPT_TGT_STATE(mpt, req);
5298 		KASSERT(tgt->state == TGT_STATE_LOADING,
5299 		    ("bad state 0x%x on reply to buffer post", tgt->state));
5300 		mpt_assign_serno(mpt, req);
5301 		tgt->state = TGT_STATE_LOADED;
5302 		break;
5303 	}
5304 	case MPI_FUNCTION_TARGET_ASSIST:
5305 #ifdef	INVARIANTS
5306 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5307 #endif
5308 		mpt_prt(mpt, "target assist completion\n");
5309 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5310 		mpt_free_request(mpt, req);
5311 		break;
5312 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5313 #ifdef	INVARIANTS
5314 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5315 #endif
5316 		mpt_prt(mpt, "status send completion\n");
5317 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5318 		mpt_free_request(mpt, req);
5319 		break;
5320 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5321 	{
5322 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5323 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5324 		PTR_MSG_TARGET_MODE_ABORT abtp =
5325 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5326 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5327 #ifdef	INVARIANTS
5328 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5329 #endif
5330 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5331 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5332 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5333 		mpt_free_request(mpt, req);
5334 		break;
5335 	}
5336 	default:
5337 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5338 		    "0x%x\n", reply_frame->Function);
5339 		break;
5340 	}
5341 	return (TRUE);
5342 }
5343