1 /*-
2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
3 * FreeBSD Version.
4 *
5 * SPDX-License-Identifier: BSD-2-Clause AND BSD-3-Clause
6 *
7 * Copyright (c) 2000, 2001 by Greg Ansley
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30 /*-
31 * Copyright (c) 2002, 2006 by Matthew Jacob
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions are
36 * met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
40 * substantially similar to the "NO WARRANTY" disclaimer below
41 * ("Disclaimer") and any redistribution must be conditioned upon including
42 * a substantially similar Disclaimer requirement for further binary
43 * redistribution.
44 * 3. Neither the names of the above listed copyright holders nor the names
45 * of any contributors may be used to endorse or promote products derived
46 * from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
49 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
52 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
53 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
54 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
55 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
56 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
58 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 * Support from Chris Ellsworth in order to make SAS adapters work
61 * is gratefully acknowledged.
62 *
63 * Support from LSI-Logic has also gone a great deal toward making this a
64 * workable subsystem and is gratefully acknowledged.
65 */
66 /*-
67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
68 * Copyright (c) 2005, WHEEL Sp. z o.o.
69 * Copyright (c) 2004, 2005 Justin T. Gibbs
70 * All rights reserved.
71 *
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions are
74 * met:
75 * 1. Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
78 * substantially similar to the "NO WARRANTY" disclaimer below
79 * ("Disclaimer") and any redistribution must be conditioned upon including
80 * a substantially similar Disclaimer requirement for further binary
81 * redistribution.
82 * 3. Neither the names of the above listed copyright holders nor the names
83 * of any contributors may be used to endorse or promote products derived
84 * from this software without specific prior written permission.
85 *
86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97 */
98 #include <sys/cdefs.h>
99 #include <dev/mpt/mpt.h>
100 #include <dev/mpt/mpt_cam.h>
101 #include <dev/mpt/mpt_raid.h>
102
103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/mpt/mpilib/mpi_init.h"
105 #include "dev/mpt/mpilib/mpi_targ.h"
106 #include "dev/mpt/mpilib/mpi_fc.h"
107 #include "dev/mpt/mpilib/mpi_sas.h"
108
109 #include <sys/callout.h>
110 #include <sys/kthread.h>
111 #include <sys/sysctl.h>
112
113 static void mpt_poll(struct cam_sim *);
114 static callout_func_t mpt_timeout;
115 static void mpt_action(struct cam_sim *, union ccb *);
116 static int
117 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
118 static void mpt_setwidth(struct mpt_softc *, int, int);
119 static void mpt_setsync(struct mpt_softc *, int, int, int);
120 static int mpt_update_spi_config(struct mpt_softc *, int);
121
122 static mpt_reply_handler_t mpt_scsi_reply_handler;
123 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
124 static mpt_reply_handler_t mpt_fc_els_reply_handler;
125 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
126 MSG_DEFAULT_REPLY *);
127 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
128 static int mpt_fc_reset_link(struct mpt_softc *, int);
129
130 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
131 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
132 static void mpt_recovery_thread(void *arg);
133 static void mpt_recover_commands(struct mpt_softc *mpt);
134
135 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
136 target_id_t, lun_id_t, u_int, int);
137
138 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
139 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
140 static int mpt_add_els_buffers(struct mpt_softc *mpt);
141 static int mpt_add_target_commands(struct mpt_softc *mpt);
142 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
143 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
144 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
145 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
146 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
147 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
148 uint8_t, uint8_t const *, u_int);
149 static void
150 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
151 tgt_resource_t *, int);
152 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
153 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
154 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
155 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
156
157 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
158 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
159 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
160 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
161
162 static mpt_probe_handler_t mpt_cam_probe;
163 static mpt_attach_handler_t mpt_cam_attach;
164 static mpt_enable_handler_t mpt_cam_enable;
165 static mpt_ready_handler_t mpt_cam_ready;
166 static mpt_event_handler_t mpt_cam_event;
167 static mpt_reset_handler_t mpt_cam_ioc_reset;
168 static mpt_detach_handler_t mpt_cam_detach;
169
170 static struct mpt_personality mpt_cam_personality =
171 {
172 .name = "mpt_cam",
173 .probe = mpt_cam_probe,
174 .attach = mpt_cam_attach,
175 .enable = mpt_cam_enable,
176 .ready = mpt_cam_ready,
177 .event = mpt_cam_event,
178 .reset = mpt_cam_ioc_reset,
179 .detach = mpt_cam_detach,
180 };
181
182 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
183 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
184
185 int mpt_enable_sata_wc = -1;
186 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
187
188 static int
mpt_cam_probe(struct mpt_softc * mpt)189 mpt_cam_probe(struct mpt_softc *mpt)
190 {
191 int role;
192
193 /*
194 * Only attach to nodes that support the initiator or target role
195 * (or want to) or have RAID physical devices that need CAM pass-thru
196 * support.
197 */
198 if (mpt->do_cfg_role) {
199 role = mpt->cfg_role;
200 } else {
201 role = mpt->role;
202 }
203 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
204 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
205 return (0);
206 }
207 return (ENODEV);
208 }
209
210 static int
mpt_cam_attach(struct mpt_softc * mpt)211 mpt_cam_attach(struct mpt_softc *mpt)
212 {
213 struct cam_devq *devq;
214 mpt_handler_t handler;
215 int maxq;
216 int error;
217
218 MPT_LOCK(mpt);
219 TAILQ_INIT(&mpt->request_timeout_list);
220 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
221 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
222
223 handler.reply_handler = mpt_scsi_reply_handler;
224 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
225 &scsi_io_handler_id);
226 if (error != 0) {
227 MPT_UNLOCK(mpt);
228 goto cleanup;
229 }
230
231 handler.reply_handler = mpt_scsi_tmf_reply_handler;
232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 &scsi_tmf_handler_id);
234 if (error != 0) {
235 MPT_UNLOCK(mpt);
236 goto cleanup;
237 }
238
239 /*
240 * If we're fibre channel and could support target mode, we register
241 * an ELS reply handler and give it resources.
242 */
243 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
244 handler.reply_handler = mpt_fc_els_reply_handler;
245 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
246 &fc_els_handler_id);
247 if (error != 0) {
248 MPT_UNLOCK(mpt);
249 goto cleanup;
250 }
251 if (mpt_add_els_buffers(mpt) == FALSE) {
252 error = ENOMEM;
253 MPT_UNLOCK(mpt);
254 goto cleanup;
255 }
256 maxq -= mpt->els_cmds_allocated;
257 }
258
259 /*
260 * If we support target mode, we register a reply handler for it,
261 * but don't add command resources until we actually enable target
262 * mode.
263 */
264 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
265 handler.reply_handler = mpt_scsi_tgt_reply_handler;
266 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
267 &mpt->scsi_tgt_handler_id);
268 if (error != 0) {
269 MPT_UNLOCK(mpt);
270 goto cleanup;
271 }
272 }
273
274 if (mpt->is_sas) {
275 handler.reply_handler = mpt_sata_pass_reply_handler;
276 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
277 &sata_pass_handler_id);
278 if (error != 0) {
279 MPT_UNLOCK(mpt);
280 goto cleanup;
281 }
282 }
283
284 /*
285 * We keep one request reserved for timeout TMF requests.
286 */
287 mpt->tmf_req = mpt_get_request(mpt, FALSE);
288 if (mpt->tmf_req == NULL) {
289 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
290 error = ENOMEM;
291 MPT_UNLOCK(mpt);
292 goto cleanup;
293 }
294
295 /*
296 * Mark the request as free even though not on the free list.
297 * There is only one TMF request allowed to be outstanding at
298 * a time and the TMF routines perform their own allocation
299 * tracking using the standard state flags.
300 */
301 mpt->tmf_req->state = REQ_STATE_FREE;
302 maxq--;
303
304 /*
305 * The rest of this is CAM foo, for which we need to drop our lock
306 */
307 MPT_UNLOCK(mpt);
308
309 if (mpt_spawn_recovery_thread(mpt) != 0) {
310 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
311 error = ENOMEM;
312 goto cleanup;
313 }
314
315 /*
316 * Create the device queue for our SIM(s).
317 */
318 devq = cam_simq_alloc(maxq);
319 if (devq == NULL) {
320 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
321 error = ENOMEM;
322 goto cleanup;
323 }
324
325 /*
326 * Construct our SIM entry.
327 */
328 mpt->sim =
329 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
330 if (mpt->sim == NULL) {
331 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
332 cam_simq_free(devq);
333 error = ENOMEM;
334 goto cleanup;
335 }
336
337 /*
338 * Register exactly this bus.
339 */
340 MPT_LOCK(mpt);
341 if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
342 mpt_prt(mpt, "Bus registration Failed!\n");
343 error = ENOMEM;
344 MPT_UNLOCK(mpt);
345 goto cleanup;
346 }
347
348 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
349 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
350 mpt_prt(mpt, "Unable to allocate Path!\n");
351 error = ENOMEM;
352 MPT_UNLOCK(mpt);
353 goto cleanup;
354 }
355 MPT_UNLOCK(mpt);
356
357 /*
358 * Only register a second bus for RAID physical
359 * devices if the controller supports RAID.
360 */
361 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
362 return (0);
363 }
364
365 /*
366 * Create a "bus" to export all hidden disks to CAM.
367 */
368 mpt->phydisk_sim =
369 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
370 if (mpt->phydisk_sim == NULL) {
371 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
372 error = ENOMEM;
373 goto cleanup;
374 }
375
376 /*
377 * Register this bus.
378 */
379 MPT_LOCK(mpt);
380 if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
381 CAM_SUCCESS) {
382 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
383 error = ENOMEM;
384 MPT_UNLOCK(mpt);
385 goto cleanup;
386 }
387
388 if (xpt_create_path(&mpt->phydisk_path, NULL,
389 cam_sim_path(mpt->phydisk_sim),
390 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
391 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
392 error = ENOMEM;
393 MPT_UNLOCK(mpt);
394 goto cleanup;
395 }
396 MPT_UNLOCK(mpt);
397 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
398 return (0);
399
400 cleanup:
401 mpt_cam_detach(mpt);
402 return (error);
403 }
404
405 /*
406 * Read FC configuration information
407 */
408 static int
mpt_read_config_info_fc(struct mpt_softc * mpt)409 mpt_read_config_info_fc(struct mpt_softc *mpt)
410 {
411 struct sysctl_ctx_list *ctx;
412 struct sysctl_oid *tree;
413 char *topology = NULL;
414 int rv;
415
416 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
417 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
418 if (rv) {
419 return (-1);
420 }
421 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
422 mpt->mpt_fcport_page0.Header.PageVersion,
423 mpt->mpt_fcport_page0.Header.PageLength,
424 mpt->mpt_fcport_page0.Header.PageNumber,
425 mpt->mpt_fcport_page0.Header.PageType);
426
427 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
428 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
429 if (rv) {
430 mpt_prt(mpt, "failed to read FC Port Page 0\n");
431 return (-1);
432 }
433 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
434
435 switch (mpt->mpt_fcport_page0.CurrentSpeed) {
436 case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT:
437 mpt->mpt_fcport_speed = 1;
438 break;
439 case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT:
440 mpt->mpt_fcport_speed = 2;
441 break;
442 case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT:
443 mpt->mpt_fcport_speed = 10;
444 break;
445 case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT:
446 mpt->mpt_fcport_speed = 4;
447 break;
448 default:
449 mpt->mpt_fcport_speed = 0;
450 break;
451 }
452
453 switch (mpt->mpt_fcport_page0.Flags &
454 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
455 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
456 mpt->mpt_fcport_speed = 0;
457 topology = "<NO LOOP>";
458 break;
459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
460 topology = "N-Port";
461 break;
462 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
463 topology = "NL-Port";
464 break;
465 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
466 topology = "F-Port";
467 break;
468 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
469 topology = "FL-Port";
470 break;
471 default:
472 mpt->mpt_fcport_speed = 0;
473 topology = "?";
474 break;
475 }
476
477 mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32)
478 | mpt->mpt_fcport_page0.WWNN.Low;
479 mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32)
480 | mpt->mpt_fcport_page0.WWPN.Low;
481 mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier;
482
483 mpt_lprt(mpt, MPT_PRT_INFO,
484 "FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx "
485 "Speed %u-Gbit\n", topology,
486 (uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn,
487 mpt->mpt_fcport_speed);
488 MPT_UNLOCK(mpt);
489 ctx = device_get_sysctl_ctx(mpt->dev);
490 tree = device_get_sysctl_tree(mpt->dev);
491
492 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
493 "wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn,
494 "World Wide Node Name");
495
496 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
497 "wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn,
498 "World Wide Port Name");
499
500 MPT_LOCK(mpt);
501 return (0);
502 }
503
504 /*
505 * Set FC configuration information.
506 */
507 static int
mpt_set_initial_config_fc(struct mpt_softc * mpt)508 mpt_set_initial_config_fc(struct mpt_softc *mpt)
509 {
510 CONFIG_PAGE_FC_PORT_1 fc;
511 U32 fl;
512 int r, doit = 0;
513 int role;
514
515 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
516 &fc.Header, FALSE, 5000);
517 if (r) {
518 mpt_prt(mpt, "failed to read FC page 1 header\n");
519 return (mpt_fc_reset_link(mpt, 1));
520 }
521
522 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
523 &fc.Header, sizeof (fc), FALSE, 5000);
524 if (r) {
525 mpt_prt(mpt, "failed to read FC page 1\n");
526 return (mpt_fc_reset_link(mpt, 1));
527 }
528 mpt2host_config_page_fc_port_1(&fc);
529
530 /*
531 * Check our flags to make sure we support the role we want.
532 */
533 doit = 0;
534 role = 0;
535 fl = fc.Flags;
536
537 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
538 role |= MPT_ROLE_INITIATOR;
539 }
540 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
541 role |= MPT_ROLE_TARGET;
542 }
543
544 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
545
546 if (mpt->do_cfg_role == 0) {
547 role = mpt->cfg_role;
548 } else {
549 mpt->do_cfg_role = 0;
550 }
551
552 if (role != mpt->cfg_role) {
553 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
554 if ((role & MPT_ROLE_INITIATOR) == 0) {
555 mpt_prt(mpt, "adding initiator role\n");
556 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
557 doit++;
558 } else {
559 mpt_prt(mpt, "keeping initiator role\n");
560 }
561 } else if (role & MPT_ROLE_INITIATOR) {
562 mpt_prt(mpt, "removing initiator role\n");
563 doit++;
564 }
565 if (mpt->cfg_role & MPT_ROLE_TARGET) {
566 if ((role & MPT_ROLE_TARGET) == 0) {
567 mpt_prt(mpt, "adding target role\n");
568 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
569 doit++;
570 } else {
571 mpt_prt(mpt, "keeping target role\n");
572 }
573 } else if (role & MPT_ROLE_TARGET) {
574 mpt_prt(mpt, "removing target role\n");
575 doit++;
576 }
577 mpt->role = mpt->cfg_role;
578 }
579
580 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
581 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
582 mpt_prt(mpt, "adding OXID option\n");
583 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
584 doit++;
585 }
586 }
587
588 if (doit) {
589 fc.Flags = fl;
590 host2mpt_config_page_fc_port_1(&fc);
591 r = mpt_write_cfg_page(mpt,
592 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
593 sizeof(fc), FALSE, 5000);
594 if (r != 0) {
595 mpt_prt(mpt, "failed to update NVRAM with changes\n");
596 return (0);
597 }
598 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
599 "effect until next reboot or IOC reset\n");
600 }
601 return (0);
602 }
603
604 static int
mptsas_sas_io_unit_pg0(struct mpt_softc * mpt,struct mptsas_portinfo * portinfo)605 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
606 {
607 ConfigExtendedPageHeader_t hdr;
608 struct mptsas_phyinfo *phyinfo;
609 SasIOUnitPage0_t *buffer;
610 int error, len, i;
611
612 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
613 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
614 &hdr, 0, 10000);
615 if (error)
616 goto out;
617 if (hdr.ExtPageLength == 0) {
618 error = ENXIO;
619 goto out;
620 }
621
622 len = hdr.ExtPageLength * 4;
623 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
624 if (buffer == NULL) {
625 error = ENOMEM;
626 goto out;
627 }
628
629 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
630 0, &hdr, buffer, len, 0, 10000);
631 if (error) {
632 free(buffer, M_DEVBUF);
633 goto out;
634 }
635
636 portinfo->num_phys = buffer->NumPhys;
637 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) *
638 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
639 if (portinfo->phy_info == NULL) {
640 free(buffer, M_DEVBUF);
641 error = ENOMEM;
642 goto out;
643 }
644
645 for (i = 0; i < portinfo->num_phys; i++) {
646 phyinfo = &portinfo->phy_info[i];
647 phyinfo->phy_num = i;
648 phyinfo->port_id = buffer->PhyData[i].Port;
649 phyinfo->negotiated_link_rate =
650 buffer->PhyData[i].NegotiatedLinkRate;
651 phyinfo->handle =
652 le16toh(buffer->PhyData[i].ControllerDevHandle);
653 }
654
655 free(buffer, M_DEVBUF);
656 out:
657 return (error);
658 }
659
660 static int
mptsas_sas_phy_pg0(struct mpt_softc * mpt,struct mptsas_phyinfo * phy_info,uint32_t form,uint32_t form_specific)661 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
662 uint32_t form, uint32_t form_specific)
663 {
664 ConfigExtendedPageHeader_t hdr;
665 SasPhyPage0_t *buffer;
666 int error;
667
668 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
669 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
670 0, 10000);
671 if (error)
672 goto out;
673 if (hdr.ExtPageLength == 0) {
674 error = ENXIO;
675 goto out;
676 }
677
678 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
679 if (buffer == NULL) {
680 error = ENOMEM;
681 goto out;
682 }
683
684 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
685 form + form_specific, &hdr, buffer,
686 sizeof(SasPhyPage0_t), 0, 10000);
687 if (error) {
688 free(buffer, M_DEVBUF);
689 goto out;
690 }
691
692 phy_info->hw_link_rate = buffer->HwLinkRate;
693 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
694 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
695 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
696
697 free(buffer, M_DEVBUF);
698 out:
699 return (error);
700 }
701
702 static int
mptsas_sas_device_pg0(struct mpt_softc * mpt,struct mptsas_devinfo * device_info,uint32_t form,uint32_t form_specific)703 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
704 uint32_t form, uint32_t form_specific)
705 {
706 ConfigExtendedPageHeader_t hdr;
707 SasDevicePage0_t *buffer;
708 uint64_t sas_address;
709 int error = 0;
710
711 bzero(device_info, sizeof(*device_info));
712 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
713 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
714 &hdr, 0, 10000);
715 if (error)
716 goto out;
717 if (hdr.ExtPageLength == 0) {
718 error = ENXIO;
719 goto out;
720 }
721
722 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
723 if (buffer == NULL) {
724 error = ENOMEM;
725 goto out;
726 }
727
728 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
729 form + form_specific, &hdr, buffer,
730 sizeof(SasDevicePage0_t), 0, 10000);
731 if (error) {
732 free(buffer, M_DEVBUF);
733 goto out;
734 }
735
736 device_info->dev_handle = le16toh(buffer->DevHandle);
737 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
738 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
739 device_info->slot = le16toh(buffer->Slot);
740 device_info->phy_num = buffer->PhyNum;
741 device_info->physical_port = buffer->PhysicalPort;
742 device_info->target_id = buffer->TargetID;
743 device_info->bus = buffer->Bus;
744 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
745 device_info->sas_address = le64toh(sas_address);
746 device_info->device_info = le32toh(buffer->DeviceInfo);
747
748 free(buffer, M_DEVBUF);
749 out:
750 return (error);
751 }
752
753 /*
754 * Read SAS configuration information. Nothing to do yet.
755 */
756 static int
mpt_read_config_info_sas(struct mpt_softc * mpt)757 mpt_read_config_info_sas(struct mpt_softc *mpt)
758 {
759 struct mptsas_portinfo *portinfo;
760 struct mptsas_phyinfo *phyinfo;
761 int error, i;
762
763 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
764 if (portinfo == NULL)
765 return (ENOMEM);
766
767 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
768 if (error) {
769 free(portinfo, M_DEVBUF);
770 return (0);
771 }
772
773 for (i = 0; i < portinfo->num_phys; i++) {
774 phyinfo = &portinfo->phy_info[i];
775 error = mptsas_sas_phy_pg0(mpt, phyinfo,
776 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
777 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
778 if (error)
779 break;
780 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
781 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
782 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
783 phyinfo->handle);
784 if (error)
785 break;
786 phyinfo->identify.phy_num = phyinfo->phy_num = i;
787 if (phyinfo->attached.dev_handle)
788 error = mptsas_sas_device_pg0(mpt,
789 &phyinfo->attached,
790 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
791 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
792 phyinfo->attached.dev_handle);
793 if (error)
794 break;
795 }
796 mpt->sas_portinfo = portinfo;
797 return (0);
798 }
799
800 static void
mptsas_set_sata_wc(struct mpt_softc * mpt,struct mptsas_devinfo * devinfo,int enabled)801 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
802 int enabled)
803 {
804 SataPassthroughRequest_t *pass;
805 request_t *req;
806 int error, status;
807
808 req = mpt_get_request(mpt, 0);
809 if (req == NULL)
810 return;
811
812 pass = req->req_vbuf;
813 bzero(pass, sizeof(SataPassthroughRequest_t));
814 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
815 pass->TargetID = devinfo->target_id;
816 pass->Bus = devinfo->bus;
817 pass->PassthroughFlags = 0;
818 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
819 pass->DataLength = 0;
820 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
821 pass->CommandFIS[0] = 0x27;
822 pass->CommandFIS[1] = 0x80;
823 pass->CommandFIS[2] = 0xef;
824 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
825 pass->CommandFIS[7] = 0x40;
826 pass->CommandFIS[15] = 0x08;
827
828 mpt_check_doorbell(mpt);
829 mpt_send_cmd(mpt, req);
830 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
831 10 * 1000);
832 if (error) {
833 mpt_free_request(mpt, req);
834 printf("error %d sending passthrough\n", error);
835 return;
836 }
837
838 status = le16toh(req->IOCStatus);
839 if (status != MPI_IOCSTATUS_SUCCESS) {
840 mpt_free_request(mpt, req);
841 printf("IOCSTATUS %d\n", status);
842 return;
843 }
844
845 mpt_free_request(mpt, req);
846 }
847
848 /*
849 * Set SAS configuration information. Nothing to do yet.
850 */
851 static int
mpt_set_initial_config_sas(struct mpt_softc * mpt)852 mpt_set_initial_config_sas(struct mpt_softc *mpt)
853 {
854 struct mptsas_phyinfo *phyinfo;
855 int i;
856
857 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
858 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
859 phyinfo = &mpt->sas_portinfo->phy_info[i];
860 if (phyinfo->attached.dev_handle == 0)
861 continue;
862 if ((phyinfo->attached.device_info &
863 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
864 continue;
865 if (bootverbose)
866 device_printf(mpt->dev,
867 "%sabling SATA WC on phy %d\n",
868 (mpt_enable_sata_wc) ? "En" : "Dis", i);
869 mptsas_set_sata_wc(mpt, &phyinfo->attached,
870 mpt_enable_sata_wc);
871 }
872 }
873
874 return (0);
875 }
876
877 static int
mpt_sata_pass_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)878 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
879 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
880 {
881
882 if (req != NULL) {
883 if (reply_frame != NULL) {
884 req->IOCStatus = le16toh(reply_frame->IOCStatus);
885 }
886 req->state &= ~REQ_STATE_QUEUED;
887 req->state |= REQ_STATE_DONE;
888 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
889 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
890 wakeup(req);
891 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
892 /*
893 * Whew- we can free this request (late completion)
894 */
895 mpt_free_request(mpt, req);
896 }
897 }
898
899 return (TRUE);
900 }
901
902 /*
903 * Read SCSI configuration information
904 */
905 static int
mpt_read_config_info_spi(struct mpt_softc * mpt)906 mpt_read_config_info_spi(struct mpt_softc *mpt)
907 {
908 int rv, i;
909
910 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
911 &mpt->mpt_port_page0.Header, FALSE, 5000);
912 if (rv) {
913 return (-1);
914 }
915 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
916 mpt->mpt_port_page0.Header.PageVersion,
917 mpt->mpt_port_page0.Header.PageLength,
918 mpt->mpt_port_page0.Header.PageNumber,
919 mpt->mpt_port_page0.Header.PageType);
920
921 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
922 &mpt->mpt_port_page1.Header, FALSE, 5000);
923 if (rv) {
924 return (-1);
925 }
926 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
927 mpt->mpt_port_page1.Header.PageVersion,
928 mpt->mpt_port_page1.Header.PageLength,
929 mpt->mpt_port_page1.Header.PageNumber,
930 mpt->mpt_port_page1.Header.PageType);
931
932 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
933 &mpt->mpt_port_page2.Header, FALSE, 5000);
934 if (rv) {
935 return (-1);
936 }
937 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
938 mpt->mpt_port_page2.Header.PageVersion,
939 mpt->mpt_port_page2.Header.PageLength,
940 mpt->mpt_port_page2.Header.PageNumber,
941 mpt->mpt_port_page2.Header.PageType);
942
943 for (i = 0; i < 16; i++) {
944 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
945 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
946 if (rv) {
947 return (-1);
948 }
949 mpt_lprt(mpt, MPT_PRT_DEBUG,
950 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
951 mpt->mpt_dev_page0[i].Header.PageVersion,
952 mpt->mpt_dev_page0[i].Header.PageLength,
953 mpt->mpt_dev_page0[i].Header.PageNumber,
954 mpt->mpt_dev_page0[i].Header.PageType);
955
956 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
957 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
958 if (rv) {
959 return (-1);
960 }
961 mpt_lprt(mpt, MPT_PRT_DEBUG,
962 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
963 mpt->mpt_dev_page1[i].Header.PageVersion,
964 mpt->mpt_dev_page1[i].Header.PageLength,
965 mpt->mpt_dev_page1[i].Header.PageNumber,
966 mpt->mpt_dev_page1[i].Header.PageType);
967 }
968
969 /*
970 * At this point, we don't *have* to fail. As long as we have
971 * valid config header information, we can (barely) lurch
972 * along.
973 */
974
975 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
976 sizeof(mpt->mpt_port_page0), FALSE, 5000);
977 if (rv) {
978 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
979 } else {
980 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
981 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
982 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
983 mpt->mpt_port_page0.Capabilities,
984 mpt->mpt_port_page0.PhysicalInterface);
985 }
986
987 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
988 sizeof(mpt->mpt_port_page1), FALSE, 5000);
989 if (rv) {
990 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
991 } else {
992 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
993 mpt_lprt(mpt, MPT_PRT_DEBUG,
994 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
995 mpt->mpt_port_page1.Configuration,
996 mpt->mpt_port_page1.OnBusTimerValue);
997 }
998
999 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1000 sizeof(mpt->mpt_port_page2), FALSE, 5000);
1001 if (rv) {
1002 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1003 } else {
1004 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1005 "Port Page 2: Flags %x Settings %x\n",
1006 mpt->mpt_port_page2.PortFlags,
1007 mpt->mpt_port_page2.PortSettings);
1008 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1009 for (i = 0; i < 16; i++) {
1010 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1011 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1012 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1013 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1014 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1015 }
1016 }
1017
1018 for (i = 0; i < 16; i++) {
1019 rv = mpt_read_cur_cfg_page(mpt, i,
1020 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1021 FALSE, 5000);
1022 if (rv) {
1023 mpt_prt(mpt,
1024 "cannot read SPI Target %d Device Page 0\n", i);
1025 continue;
1026 }
1027 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1028 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1029 "target %d page 0: Negotiated Params %x Information %x\n",
1030 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1031 mpt->mpt_dev_page0[i].Information);
1032
1033 rv = mpt_read_cur_cfg_page(mpt, i,
1034 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1035 FALSE, 5000);
1036 if (rv) {
1037 mpt_prt(mpt,
1038 "cannot read SPI Target %d Device Page 1\n", i);
1039 continue;
1040 }
1041 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1042 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1043 "target %d page 1: Requested Params %x Configuration %x\n",
1044 i, mpt->mpt_dev_page1[i].RequestedParameters,
1045 mpt->mpt_dev_page1[i].Configuration);
1046 }
1047 return (0);
1048 }
1049
1050 /*
1051 * Validate SPI configuration information.
1052 *
1053 * In particular, validate SPI Port Page 1.
1054 */
1055 static int
mpt_set_initial_config_spi(struct mpt_softc * mpt)1056 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1057 {
1058 int error, i, pp1val;
1059
1060 mpt->mpt_disc_enable = 0xff;
1061 mpt->mpt_tag_enable = 0;
1062
1063 pp1val = ((1 << mpt->mpt_ini_id) <<
1064 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1065 if (mpt->mpt_port_page1.Configuration != pp1val) {
1066 CONFIG_PAGE_SCSI_PORT_1 tmp;
1067
1068 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1069 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1070 tmp = mpt->mpt_port_page1;
1071 tmp.Configuration = pp1val;
1072 host2mpt_config_page_scsi_port_1(&tmp);
1073 error = mpt_write_cur_cfg_page(mpt, 0,
1074 &tmp.Header, sizeof(tmp), FALSE, 5000);
1075 if (error) {
1076 return (-1);
1077 }
1078 error = mpt_read_cur_cfg_page(mpt, 0,
1079 &tmp.Header, sizeof(tmp), FALSE, 5000);
1080 if (error) {
1081 return (-1);
1082 }
1083 mpt2host_config_page_scsi_port_1(&tmp);
1084 if (tmp.Configuration != pp1val) {
1085 mpt_prt(mpt,
1086 "failed to reset SPI Port Page 1 Config value\n");
1087 return (-1);
1088 }
1089 mpt->mpt_port_page1 = tmp;
1090 }
1091
1092 /*
1093 * The purpose of this exercise is to get
1094 * all targets back to async/narrow.
1095 *
1096 * We skip this step if the BIOS has already negotiated
1097 * speeds with the targets.
1098 */
1099 i = mpt->mpt_port_page2.PortSettings &
1100 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1101 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1102 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1103 "honoring BIOS transfer negotiations\n");
1104 } else {
1105 for (i = 0; i < 16; i++) {
1106 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1107 mpt->mpt_dev_page1[i].Configuration = 0;
1108 (void) mpt_update_spi_config(mpt, i);
1109 }
1110 }
1111 return (0);
1112 }
1113
1114 static int
mpt_cam_enable(struct mpt_softc * mpt)1115 mpt_cam_enable(struct mpt_softc *mpt)
1116 {
1117 int error;
1118
1119 MPT_LOCK(mpt);
1120
1121 error = EIO;
1122 if (mpt->is_fc) {
1123 if (mpt_read_config_info_fc(mpt)) {
1124 goto out;
1125 }
1126 if (mpt_set_initial_config_fc(mpt)) {
1127 goto out;
1128 }
1129 } else if (mpt->is_sas) {
1130 if (mpt_read_config_info_sas(mpt)) {
1131 goto out;
1132 }
1133 if (mpt_set_initial_config_sas(mpt)) {
1134 goto out;
1135 }
1136 } else if (mpt->is_spi) {
1137 if (mpt_read_config_info_spi(mpt)) {
1138 goto out;
1139 }
1140 if (mpt_set_initial_config_spi(mpt)) {
1141 goto out;
1142 }
1143 }
1144 error = 0;
1145
1146 out:
1147 MPT_UNLOCK(mpt);
1148 return (error);
1149 }
1150
1151 static void
mpt_cam_ready(struct mpt_softc * mpt)1152 mpt_cam_ready(struct mpt_softc *mpt)
1153 {
1154
1155 /*
1156 * If we're in target mode, hang out resources now
1157 * so we don't cause the world to hang talking to us.
1158 */
1159 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1160 /*
1161 * Try to add some target command resources
1162 */
1163 MPT_LOCK(mpt);
1164 if (mpt_add_target_commands(mpt) == FALSE) {
1165 mpt_prt(mpt, "failed to add target commands\n");
1166 }
1167 MPT_UNLOCK(mpt);
1168 }
1169 mpt->ready = 1;
1170 }
1171
1172 static void
mpt_cam_detach(struct mpt_softc * mpt)1173 mpt_cam_detach(struct mpt_softc *mpt)
1174 {
1175 mpt_handler_t handler;
1176
1177 MPT_LOCK(mpt);
1178 mpt->ready = 0;
1179 mpt_terminate_recovery_thread(mpt);
1180
1181 handler.reply_handler = mpt_scsi_reply_handler;
1182 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1183 scsi_io_handler_id);
1184 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1185 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1186 scsi_tmf_handler_id);
1187 handler.reply_handler = mpt_fc_els_reply_handler;
1188 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1189 fc_els_handler_id);
1190 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1191 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1192 mpt->scsi_tgt_handler_id);
1193 handler.reply_handler = mpt_sata_pass_reply_handler;
1194 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1195 sata_pass_handler_id);
1196
1197 if (mpt->tmf_req != NULL) {
1198 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1199 mpt_free_request(mpt, mpt->tmf_req);
1200 mpt->tmf_req = NULL;
1201 }
1202 if (mpt->sas_portinfo != NULL) {
1203 free(mpt->sas_portinfo, M_DEVBUF);
1204 mpt->sas_portinfo = NULL;
1205 }
1206
1207 if (mpt->sim != NULL) {
1208 xpt_free_path(mpt->path);
1209 xpt_bus_deregister(cam_sim_path(mpt->sim));
1210 cam_sim_free(mpt->sim, TRUE);
1211 mpt->sim = NULL;
1212 }
1213
1214 if (mpt->phydisk_sim != NULL) {
1215 xpt_free_path(mpt->phydisk_path);
1216 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1217 cam_sim_free(mpt->phydisk_sim, TRUE);
1218 mpt->phydisk_sim = NULL;
1219 }
1220 MPT_UNLOCK(mpt);
1221 }
1222
1223 /* This routine is used after a system crash to dump core onto the swap device.
1224 */
1225 static void
mpt_poll(struct cam_sim * sim)1226 mpt_poll(struct cam_sim *sim)
1227 {
1228 struct mpt_softc *mpt;
1229
1230 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1231 mpt_intr(mpt);
1232 }
1233
1234 /*
1235 * Watchdog timeout routine for SCSI requests.
1236 */
1237 static void
mpt_timeout(void * arg)1238 mpt_timeout(void *arg)
1239 {
1240 union ccb *ccb;
1241 struct mpt_softc *mpt;
1242 request_t *req;
1243
1244 ccb = (union ccb *)arg;
1245 mpt = ccb->ccb_h.ccb_mpt_ptr;
1246
1247 MPT_LOCK_ASSERT(mpt);
1248 req = ccb->ccb_h.ccb_req_ptr;
1249 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1250 req->serno, ccb, req->ccb);
1251 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1252 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1253 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1254 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1255 req->state |= REQ_STATE_TIMEDOUT;
1256 mpt_wakeup_recovery_thread(mpt);
1257 }
1258 }
1259
1260 /*
1261 * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called
1262 * directly.
1263 *
1264 * Takes a list of physical segments and builds the SGL for SCSI IO command
1265 * and forwards the commard to the IOC after one last check that CAM has not
1266 * aborted the transaction.
1267 */
1268 static void
mpt_execute_req_a64(void * arg,bus_dma_segment_t * dm_segs,int nseg,int error)1269 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1270 {
1271 request_t *req, *trq;
1272 char *mpt_off;
1273 union ccb *ccb;
1274 struct mpt_softc *mpt;
1275 bus_addr_t chain_list_addr;
1276 int first_lim, seg, this_seg_lim;
1277 uint32_t addr, cur_off, flags, nxt_off, tf;
1278 void *sglp = NULL;
1279 MSG_REQUEST_HEADER *hdrp;
1280 SGE_SIMPLE64 *se;
1281 SGE_CHAIN64 *ce;
1282 int istgt = 0;
1283
1284 req = (request_t *)arg;
1285 ccb = req->ccb;
1286
1287 mpt = ccb->ccb_h.ccb_mpt_ptr;
1288 req = ccb->ccb_h.ccb_req_ptr;
1289
1290 hdrp = req->req_vbuf;
1291 mpt_off = req->req_vbuf;
1292
1293 if (error == 0) {
1294 switch (hdrp->Function) {
1295 case MPI_FUNCTION_SCSI_IO_REQUEST:
1296 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1297 istgt = 0;
1298 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1299 break;
1300 case MPI_FUNCTION_TARGET_ASSIST:
1301 istgt = 1;
1302 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1303 break;
1304 default:
1305 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1306 hdrp->Function);
1307 error = EINVAL;
1308 break;
1309 }
1310 }
1311
1312 bad:
1313 if (error != 0) {
1314 if (error != EFBIG && error != ENOMEM) {
1315 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1316 }
1317 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1318 cam_status status;
1319 mpt_freeze_ccb(ccb);
1320 if (error == EFBIG) {
1321 status = CAM_REQ_TOO_BIG;
1322 } else if (error == ENOMEM) {
1323 if (mpt->outofbeer == 0) {
1324 mpt->outofbeer = 1;
1325 xpt_freeze_simq(mpt->sim, 1);
1326 mpt_lprt(mpt, MPT_PRT_DEBUG,
1327 "FREEZEQ\n");
1328 }
1329 status = CAM_REQUEUE_REQ;
1330 } else {
1331 status = CAM_REQ_CMP_ERR;
1332 }
1333 mpt_set_ccb_status(ccb, status);
1334 }
1335 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1336 request_t *cmd_req =
1337 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1338 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1339 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1340 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1341 }
1342 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1343 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1344 xpt_done(ccb);
1345 mpt_free_request(mpt, req);
1346 return;
1347 }
1348
1349 /*
1350 * No data to transfer?
1351 * Just make a single simple SGL with zero length.
1352 */
1353
1354 if (mpt->verbose >= MPT_PRT_DEBUG) {
1355 int tidx = ((char *)sglp) - mpt_off;
1356 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1357 }
1358
1359 if (nseg == 0) {
1360 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1361 MPI_pSGE_SET_FLAGS(se1,
1362 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1363 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1364 se1->FlagsLength = htole32(se1->FlagsLength);
1365 goto out;
1366 }
1367
1368 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1369 if (istgt == 0) {
1370 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1371 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1372 }
1373 } else {
1374 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1375 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1376 }
1377 }
1378
1379 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1380 bus_dmasync_op_t op;
1381 if (istgt == 0) {
1382 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1383 op = BUS_DMASYNC_PREREAD;
1384 } else {
1385 op = BUS_DMASYNC_PREWRITE;
1386 }
1387 } else {
1388 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1389 op = BUS_DMASYNC_PREWRITE;
1390 } else {
1391 op = BUS_DMASYNC_PREREAD;
1392 }
1393 }
1394 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1395 }
1396
1397 /*
1398 * Okay, fill in what we can at the end of the command frame.
1399 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1400 * the command frame.
1401 *
1402 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1403 * SIMPLE64 pointers and start doing CHAIN64 entries after
1404 * that.
1405 */
1406
1407 if (nseg < MPT_NSGL_FIRST(mpt)) {
1408 first_lim = nseg;
1409 } else {
1410 /*
1411 * Leave room for CHAIN element
1412 */
1413 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1414 }
1415
1416 se = (SGE_SIMPLE64 *) sglp;
1417 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1418 tf = flags;
1419 memset(se, 0, sizeof (*se));
1420 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1421 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1422 if (sizeof(bus_addr_t) > 4) {
1423 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1424 /* SAS1078 36GB limitation WAR */
1425 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1426 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1427 addr |= (1U << 31);
1428 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1429 }
1430 se->Address.High = htole32(addr);
1431 }
1432 if (seg == first_lim - 1) {
1433 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1434 }
1435 if (seg == nseg - 1) {
1436 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1437 MPI_SGE_FLAGS_END_OF_BUFFER;
1438 }
1439 MPI_pSGE_SET_FLAGS(se, tf);
1440 se->FlagsLength = htole32(se->FlagsLength);
1441 }
1442
1443 if (seg == nseg) {
1444 goto out;
1445 }
1446
1447 /*
1448 * Tell the IOC where to find the first chain element.
1449 */
1450 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1451 nxt_off = MPT_RQSL(mpt);
1452 trq = req;
1453
1454 /*
1455 * Make up the rest of the data segments out of a chain element
1456 * (contained in the current request frame) which points to
1457 * SIMPLE64 elements in the next request frame, possibly ending
1458 * with *another* chain element (if there's more).
1459 */
1460 while (seg < nseg) {
1461 /*
1462 * Point to the chain descriptor. Note that the chain
1463 * descriptor is at the end of the *previous* list (whether
1464 * chain or simple).
1465 */
1466 ce = (SGE_CHAIN64 *) se;
1467
1468 /*
1469 * Before we change our current pointer, make sure we won't
1470 * overflow the request area with this frame. Note that we
1471 * test against 'greater than' here as it's okay in this case
1472 * to have next offset be just outside the request area.
1473 */
1474 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1475 nxt_off = MPT_REQUEST_AREA;
1476 goto next_chain;
1477 }
1478
1479 /*
1480 * Set our SGE element pointer to the beginning of the chain
1481 * list and update our next chain list offset.
1482 */
1483 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1484 cur_off = nxt_off;
1485 nxt_off += MPT_RQSL(mpt);
1486
1487 /*
1488 * Now initialize the chain descriptor.
1489 */
1490 memset(ce, 0, sizeof (*ce));
1491
1492 /*
1493 * Get the physical address of the chain list.
1494 */
1495 chain_list_addr = trq->req_pbuf;
1496 chain_list_addr += cur_off;
1497 if (sizeof (bus_addr_t) > 4) {
1498 ce->Address.High =
1499 htole32(((uint64_t)chain_list_addr) >> 32);
1500 }
1501 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1502 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1503 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1504
1505 /*
1506 * If we have more than a frame's worth of segments left,
1507 * set up the chain list to have the last element be another
1508 * chain descriptor.
1509 */
1510 if ((nseg - seg) > MPT_NSGL(mpt)) {
1511 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1512 /*
1513 * The length of the chain is the length in bytes of the
1514 * number of segments plus the next chain element.
1515 *
1516 * The next chain descriptor offset is the length,
1517 * in words, of the number of segments.
1518 */
1519 ce->Length = (this_seg_lim - seg) *
1520 sizeof (SGE_SIMPLE64);
1521 ce->NextChainOffset = ce->Length >> 2;
1522 ce->Length += sizeof (SGE_CHAIN64);
1523 } else {
1524 this_seg_lim = nseg;
1525 ce->Length = (this_seg_lim - seg) *
1526 sizeof (SGE_SIMPLE64);
1527 }
1528 ce->Length = htole16(ce->Length);
1529
1530 /*
1531 * Fill in the chain list SGE elements with our segment data.
1532 *
1533 * If we're the last element in this chain list, set the last
1534 * element flag. If we're the completely last element period,
1535 * set the end of list and end of buffer flags.
1536 */
1537 while (seg < this_seg_lim) {
1538 tf = flags;
1539 memset(se, 0, sizeof (*se));
1540 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1541 se->Address.Low = htole32(dm_segs->ds_addr &
1542 0xffffffff);
1543 if (sizeof (bus_addr_t) > 4) {
1544 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1545 /* SAS1078 36GB limitation WAR */
1546 if (mpt->is_1078 &&
1547 (((uint64_t)dm_segs->ds_addr +
1548 MPI_SGE_LENGTH(se->FlagsLength)) >>
1549 32) == 9) {
1550 addr |= (1U << 31);
1551 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1552 }
1553 se->Address.High = htole32(addr);
1554 }
1555 if (seg == this_seg_lim - 1) {
1556 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1557 }
1558 if (seg == nseg - 1) {
1559 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1560 MPI_SGE_FLAGS_END_OF_BUFFER;
1561 }
1562 MPI_pSGE_SET_FLAGS(se, tf);
1563 se->FlagsLength = htole32(se->FlagsLength);
1564 se++;
1565 seg++;
1566 dm_segs++;
1567 }
1568
1569 next_chain:
1570 /*
1571 * If we have more segments to do and we've used up all of
1572 * the space in a request area, go allocate another one
1573 * and chain to that.
1574 */
1575 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1576 request_t *nrq;
1577
1578 nrq = mpt_get_request(mpt, FALSE);
1579
1580 if (nrq == NULL) {
1581 error = ENOMEM;
1582 goto bad;
1583 }
1584
1585 /*
1586 * Append the new request area on the tail of our list.
1587 */
1588 if ((trq = req->chain) == NULL) {
1589 req->chain = nrq;
1590 } else {
1591 while (trq->chain != NULL) {
1592 trq = trq->chain;
1593 }
1594 trq->chain = nrq;
1595 }
1596 trq = nrq;
1597 mpt_off = trq->req_vbuf;
1598 if (mpt->verbose >= MPT_PRT_DEBUG) {
1599 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1600 }
1601 nxt_off = 0;
1602 }
1603 }
1604 out:
1605
1606 /*
1607 * Last time we need to check if this CCB needs to be aborted.
1608 */
1609 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1610 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1611 request_t *cmd_req =
1612 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1613 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1614 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1615 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1616 }
1617 mpt_prt(mpt,
1618 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1619 ccb->ccb_h.status & CAM_STATUS_MASK);
1620 if (nseg) {
1621 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1622 }
1623 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1624 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1625 xpt_done(ccb);
1626 mpt_free_request(mpt, req);
1627 return;
1628 }
1629
1630 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1631 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1632 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
1633 mpt_timeout, ccb);
1634 }
1635 if (mpt->verbose > MPT_PRT_DEBUG) {
1636 int nc = 0;
1637 mpt_print_request(req->req_vbuf);
1638 for (trq = req->chain; trq; trq = trq->chain) {
1639 printf(" Additional Chain Area %d\n", nc++);
1640 mpt_dump_sgl(trq->req_vbuf, 0);
1641 }
1642 }
1643
1644 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1645 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1646 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1647 #ifdef WE_TRUST_AUTO_GOOD_STATUS
1648 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1649 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1650 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1651 } else {
1652 tgt->state = TGT_STATE_MOVING_DATA;
1653 }
1654 #else
1655 tgt->state = TGT_STATE_MOVING_DATA;
1656 #endif
1657 }
1658 mpt_send_cmd(mpt, req);
1659 }
1660
1661 static void
mpt_execute_req(void * arg,bus_dma_segment_t * dm_segs,int nseg,int error)1662 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1663 {
1664 request_t *req, *trq;
1665 char *mpt_off;
1666 union ccb *ccb;
1667 struct mpt_softc *mpt;
1668 int seg, first_lim;
1669 uint32_t flags, nxt_off;
1670 void *sglp = NULL;
1671 MSG_REQUEST_HEADER *hdrp;
1672 SGE_SIMPLE32 *se;
1673 SGE_CHAIN32 *ce;
1674 int istgt = 0;
1675
1676 req = (request_t *)arg;
1677 ccb = req->ccb;
1678
1679 mpt = ccb->ccb_h.ccb_mpt_ptr;
1680 req = ccb->ccb_h.ccb_req_ptr;
1681
1682 hdrp = req->req_vbuf;
1683 mpt_off = req->req_vbuf;
1684
1685 if (error == 0) {
1686 switch (hdrp->Function) {
1687 case MPI_FUNCTION_SCSI_IO_REQUEST:
1688 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1689 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1690 break;
1691 case MPI_FUNCTION_TARGET_ASSIST:
1692 istgt = 1;
1693 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1694 break;
1695 default:
1696 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1697 hdrp->Function);
1698 error = EINVAL;
1699 break;
1700 }
1701 }
1702
1703 bad:
1704 if (error != 0) {
1705 if (error != EFBIG && error != ENOMEM) {
1706 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1707 }
1708 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1709 cam_status status;
1710 mpt_freeze_ccb(ccb);
1711 if (error == EFBIG) {
1712 status = CAM_REQ_TOO_BIG;
1713 } else if (error == ENOMEM) {
1714 if (mpt->outofbeer == 0) {
1715 mpt->outofbeer = 1;
1716 xpt_freeze_simq(mpt->sim, 1);
1717 mpt_lprt(mpt, MPT_PRT_DEBUG,
1718 "FREEZEQ\n");
1719 }
1720 status = CAM_REQUEUE_REQ;
1721 } else {
1722 status = CAM_REQ_CMP_ERR;
1723 }
1724 mpt_set_ccb_status(ccb, status);
1725 }
1726 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1727 request_t *cmd_req =
1728 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1729 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1730 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1731 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1732 }
1733 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1734 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1735 xpt_done(ccb);
1736 mpt_free_request(mpt, req);
1737 return;
1738 }
1739
1740 /*
1741 * No data to transfer?
1742 * Just make a single simple SGL with zero length.
1743 */
1744
1745 if (mpt->verbose >= MPT_PRT_DEBUG) {
1746 int tidx = ((char *)sglp) - mpt_off;
1747 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1748 }
1749
1750 if (nseg == 0) {
1751 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1752 MPI_pSGE_SET_FLAGS(se1,
1753 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1754 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1755 se1->FlagsLength = htole32(se1->FlagsLength);
1756 goto out;
1757 }
1758
1759 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1760 if (istgt == 0) {
1761 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1762 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1763 }
1764 } else {
1765 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1766 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1767 }
1768 }
1769
1770 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1771 bus_dmasync_op_t op;
1772 if (istgt) {
1773 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1774 op = BUS_DMASYNC_PREREAD;
1775 } else {
1776 op = BUS_DMASYNC_PREWRITE;
1777 }
1778 } else {
1779 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1780 op = BUS_DMASYNC_PREWRITE;
1781 } else {
1782 op = BUS_DMASYNC_PREREAD;
1783 }
1784 }
1785 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1786 }
1787
1788 /*
1789 * Okay, fill in what we can at the end of the command frame.
1790 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1791 * the command frame.
1792 *
1793 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1794 * SIMPLE32 pointers and start doing CHAIN32 entries after
1795 * that.
1796 */
1797
1798 if (nseg < MPT_NSGL_FIRST(mpt)) {
1799 first_lim = nseg;
1800 } else {
1801 /*
1802 * Leave room for CHAIN element
1803 */
1804 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1805 }
1806
1807 se = (SGE_SIMPLE32 *) sglp;
1808 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1809 uint32_t tf;
1810
1811 memset(se, 0,sizeof (*se));
1812 se->Address = htole32(dm_segs->ds_addr);
1813
1814 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1815 tf = flags;
1816 if (seg == first_lim - 1) {
1817 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1818 }
1819 if (seg == nseg - 1) {
1820 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1821 MPI_SGE_FLAGS_END_OF_BUFFER;
1822 }
1823 MPI_pSGE_SET_FLAGS(se, tf);
1824 se->FlagsLength = htole32(se->FlagsLength);
1825 }
1826
1827 if (seg == nseg) {
1828 goto out;
1829 }
1830
1831 /*
1832 * Tell the IOC where to find the first chain element.
1833 */
1834 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1835 nxt_off = MPT_RQSL(mpt);
1836 trq = req;
1837
1838 /*
1839 * Make up the rest of the data segments out of a chain element
1840 * (contained in the current request frame) which points to
1841 * SIMPLE32 elements in the next request frame, possibly ending
1842 * with *another* chain element (if there's more).
1843 */
1844 while (seg < nseg) {
1845 int this_seg_lim;
1846 uint32_t tf, cur_off;
1847 bus_addr_t chain_list_addr;
1848
1849 /*
1850 * Point to the chain descriptor. Note that the chain
1851 * descriptor is at the end of the *previous* list (whether
1852 * chain or simple).
1853 */
1854 ce = (SGE_CHAIN32 *) se;
1855
1856 /*
1857 * Before we change our current pointer, make sure we won't
1858 * overflow the request area with this frame. Note that we
1859 * test against 'greater than' here as it's okay in this case
1860 * to have next offset be just outside the request area.
1861 */
1862 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1863 nxt_off = MPT_REQUEST_AREA;
1864 goto next_chain;
1865 }
1866
1867 /*
1868 * Set our SGE element pointer to the beginning of the chain
1869 * list and update our next chain list offset.
1870 */
1871 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1872 cur_off = nxt_off;
1873 nxt_off += MPT_RQSL(mpt);
1874
1875 /*
1876 * Now initialize the chain descriptor.
1877 */
1878 memset(ce, 0, sizeof (*ce));
1879
1880 /*
1881 * Get the physical address of the chain list.
1882 */
1883 chain_list_addr = trq->req_pbuf;
1884 chain_list_addr += cur_off;
1885
1886 ce->Address = htole32(chain_list_addr);
1887 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1888
1889 /*
1890 * If we have more than a frame's worth of segments left,
1891 * set up the chain list to have the last element be another
1892 * chain descriptor.
1893 */
1894 if ((nseg - seg) > MPT_NSGL(mpt)) {
1895 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1896 /*
1897 * The length of the chain is the length in bytes of the
1898 * number of segments plus the next chain element.
1899 *
1900 * The next chain descriptor offset is the length,
1901 * in words, of the number of segments.
1902 */
1903 ce->Length = (this_seg_lim - seg) *
1904 sizeof (SGE_SIMPLE32);
1905 ce->NextChainOffset = ce->Length >> 2;
1906 ce->Length += sizeof (SGE_CHAIN32);
1907 } else {
1908 this_seg_lim = nseg;
1909 ce->Length = (this_seg_lim - seg) *
1910 sizeof (SGE_SIMPLE32);
1911 }
1912 ce->Length = htole16(ce->Length);
1913
1914 /*
1915 * Fill in the chain list SGE elements with our segment data.
1916 *
1917 * If we're the last element in this chain list, set the last
1918 * element flag. If we're the completely last element period,
1919 * set the end of list and end of buffer flags.
1920 */
1921 while (seg < this_seg_lim) {
1922 memset(se, 0, sizeof (*se));
1923 se->Address = htole32(dm_segs->ds_addr);
1924
1925 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1926 tf = flags;
1927 if (seg == this_seg_lim - 1) {
1928 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1929 }
1930 if (seg == nseg - 1) {
1931 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1932 MPI_SGE_FLAGS_END_OF_BUFFER;
1933 }
1934 MPI_pSGE_SET_FLAGS(se, tf);
1935 se->FlagsLength = htole32(se->FlagsLength);
1936 se++;
1937 seg++;
1938 dm_segs++;
1939 }
1940
1941 next_chain:
1942 /*
1943 * If we have more segments to do and we've used up all of
1944 * the space in a request area, go allocate another one
1945 * and chain to that.
1946 */
1947 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1948 request_t *nrq;
1949
1950 nrq = mpt_get_request(mpt, FALSE);
1951
1952 if (nrq == NULL) {
1953 error = ENOMEM;
1954 goto bad;
1955 }
1956
1957 /*
1958 * Append the new request area on the tail of our list.
1959 */
1960 if ((trq = req->chain) == NULL) {
1961 req->chain = nrq;
1962 } else {
1963 while (trq->chain != NULL) {
1964 trq = trq->chain;
1965 }
1966 trq->chain = nrq;
1967 }
1968 trq = nrq;
1969 mpt_off = trq->req_vbuf;
1970 if (mpt->verbose >= MPT_PRT_DEBUG) {
1971 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1972 }
1973 nxt_off = 0;
1974 }
1975 }
1976 out:
1977
1978 /*
1979 * Last time we need to check if this CCB needs to be aborted.
1980 */
1981 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1982 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1983 request_t *cmd_req =
1984 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1985 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1986 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1987 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1988 }
1989 mpt_prt(mpt,
1990 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
1991 ccb->ccb_h.status & CAM_STATUS_MASK);
1992 if (nseg) {
1993 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1994 }
1995 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1996 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
1997 xpt_done(ccb);
1998 mpt_free_request(mpt, req);
1999 return;
2000 }
2001
2002 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2003 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2004 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout,
2005 mpt_timeout, ccb);
2006 }
2007 if (mpt->verbose > MPT_PRT_DEBUG) {
2008 int nc = 0;
2009 mpt_print_request(req->req_vbuf);
2010 for (trq = req->chain; trq; trq = trq->chain) {
2011 printf(" Additional Chain Area %d\n", nc++);
2012 mpt_dump_sgl(trq->req_vbuf, 0);
2013 }
2014 }
2015
2016 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2017 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2018 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2019 #ifdef WE_TRUST_AUTO_GOOD_STATUS
2020 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2021 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2022 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2023 } else {
2024 tgt->state = TGT_STATE_MOVING_DATA;
2025 }
2026 #else
2027 tgt->state = TGT_STATE_MOVING_DATA;
2028 #endif
2029 }
2030 mpt_send_cmd(mpt, req);
2031 }
2032
2033 static void
mpt_start(struct cam_sim * sim,union ccb * ccb)2034 mpt_start(struct cam_sim *sim, union ccb *ccb)
2035 {
2036 request_t *req;
2037 struct mpt_softc *mpt;
2038 MSG_SCSI_IO_REQUEST *mpt_req;
2039 struct ccb_scsiio *csio = &ccb->csio;
2040 struct ccb_hdr *ccbh = &ccb->ccb_h;
2041 bus_dmamap_callback_t *cb;
2042 target_id_t tgt;
2043 int raid_passthru;
2044 int error;
2045
2046 /* Get the pointer for the physical addapter */
2047 mpt = ccb->ccb_h.ccb_mpt_ptr;
2048 raid_passthru = (sim == mpt->phydisk_sim);
2049
2050 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2051 if (mpt->outofbeer == 0) {
2052 mpt->outofbeer = 1;
2053 xpt_freeze_simq(mpt->sim, 1);
2054 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2055 }
2056 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2057 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2058 xpt_done(ccb);
2059 return;
2060 }
2061 #ifdef INVARIANTS
2062 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2063 #endif
2064
2065 if (sizeof (bus_addr_t) > 4) {
2066 cb = mpt_execute_req_a64;
2067 } else {
2068 cb = mpt_execute_req;
2069 }
2070
2071 /*
2072 * Link the ccb and the request structure so we can find
2073 * the other knowing either the request or the ccb
2074 */
2075 req->ccb = ccb;
2076 ccb->ccb_h.ccb_req_ptr = req;
2077
2078 /* Now we build the command for the IOC */
2079 mpt_req = req->req_vbuf;
2080 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2081
2082 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2083 if (raid_passthru) {
2084 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2085 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2086 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2087 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2088 xpt_done(ccb);
2089 return;
2090 }
2091 mpt_req->Bus = 0; /* we never set bus here */
2092 } else {
2093 tgt = ccb->ccb_h.target_id;
2094 mpt_req->Bus = 0; /* XXX */
2095
2096 }
2097 mpt_req->SenseBufferLength =
2098 (csio->sense_len < MPT_SENSE_SIZE) ?
2099 csio->sense_len : MPT_SENSE_SIZE;
2100
2101 /*
2102 * We use the message context to find the request structure when we
2103 * Get the command completion interrupt from the IOC.
2104 */
2105 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2106
2107 /* Which physical device to do the I/O on */
2108 mpt_req->TargetID = tgt;
2109
2110 be64enc(mpt_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun));
2111
2112 /* Set the direction of the transfer */
2113 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2114 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2115 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2116 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2117 } else {
2118 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2119 }
2120
2121 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2122 switch(ccb->csio.tag_action) {
2123 case MSG_HEAD_OF_Q_TAG:
2124 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2125 break;
2126 case MSG_ACA_TASK:
2127 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2128 break;
2129 case MSG_ORDERED_Q_TAG:
2130 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2131 break;
2132 case MSG_SIMPLE_Q_TAG:
2133 default:
2134 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2135 break;
2136 }
2137 } else {
2138 if (mpt->is_fc || mpt->is_sas) {
2139 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2140 } else {
2141 /* XXX No such thing for a target doing packetized. */
2142 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2143 }
2144 }
2145
2146 if (mpt->is_spi) {
2147 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2148 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2149 }
2150 }
2151 mpt_req->Control = htole32(mpt_req->Control);
2152
2153 /* Copy the scsi command block into place */
2154 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2155 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2156 } else {
2157 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2158 }
2159
2160 mpt_req->CDBLength = csio->cdb_len;
2161 mpt_req->DataLength = htole32(csio->dxfer_len);
2162 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2163
2164 /*
2165 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2166 */
2167 if (mpt->verbose == MPT_PRT_DEBUG) {
2168 U32 df;
2169 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2170 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2171 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2172 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2173 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2174 mpt_prtc(mpt, "(%s %u byte%s ",
2175 (df == MPI_SCSIIO_CONTROL_READ)?
2176 "read" : "write", csio->dxfer_len,
2177 (csio->dxfer_len == 1)? ")" : "s)");
2178 }
2179 mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt,
2180 (uintmax_t)ccb->ccb_h.target_lun, req, req->serno);
2181 }
2182
2183 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb,
2184 req, 0);
2185 if (error == EINPROGRESS) {
2186 /*
2187 * So as to maintain ordering, freeze the controller queue
2188 * until our mapping is returned.
2189 */
2190 xpt_freeze_simq(mpt->sim, 1);
2191 ccbh->status |= CAM_RELEASE_SIMQ;
2192 }
2193 }
2194
2195 static int
mpt_bus_reset(struct mpt_softc * mpt,target_id_t tgt,lun_id_t lun,int sleep_ok)2196 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2197 int sleep_ok)
2198 {
2199 int error;
2200 uint16_t status;
2201 uint8_t response;
2202
2203 error = mpt_scsi_send_tmf(mpt,
2204 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2205 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2206 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2207 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2208 0, /* XXX How do I get the channel ID? */
2209 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2210 lun != CAM_LUN_WILDCARD ? lun : 0,
2211 0, sleep_ok);
2212
2213 if (error != 0) {
2214 /*
2215 * mpt_scsi_send_tmf hard resets on failure, so no
2216 * need to do so here.
2217 */
2218 mpt_prt(mpt,
2219 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2220 return (EIO);
2221 }
2222
2223 /* Wait for bus reset to be processed by the IOC. */
2224 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2225 REQ_STATE_DONE, sleep_ok, 5000);
2226
2227 status = le16toh(mpt->tmf_req->IOCStatus);
2228 response = mpt->tmf_req->ResponseCode;
2229 mpt->tmf_req->state = REQ_STATE_FREE;
2230
2231 if (error) {
2232 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2233 "Resetting controller.\n");
2234 mpt_reset(mpt, TRUE);
2235 return (ETIMEDOUT);
2236 }
2237
2238 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2239 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2240 "Resetting controller.\n", status);
2241 mpt_reset(mpt, TRUE);
2242 return (EIO);
2243 }
2244
2245 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2246 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2247 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2248 "Resetting controller.\n", response);
2249 mpt_reset(mpt, TRUE);
2250 return (EIO);
2251 }
2252 return (0);
2253 }
2254
2255 static int
mpt_fc_reset_link(struct mpt_softc * mpt,int dowait)2256 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2257 {
2258 int r = 0;
2259 request_t *req;
2260 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2261
2262 req = mpt_get_request(mpt, FALSE);
2263 if (req == NULL) {
2264 return (ENOMEM);
2265 }
2266 fc = req->req_vbuf;
2267 memset(fc, 0, sizeof(*fc));
2268 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2269 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2270 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2271 mpt_send_cmd(mpt, req);
2272 if (dowait) {
2273 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2274 REQ_STATE_DONE, FALSE, 60 * 1000);
2275 if (r == 0) {
2276 mpt_free_request(mpt, req);
2277 }
2278 }
2279 return (r);
2280 }
2281
2282 static int
mpt_cam_event(struct mpt_softc * mpt,request_t * req,MSG_EVENT_NOTIFY_REPLY * msg)2283 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2284 MSG_EVENT_NOTIFY_REPLY *msg)
2285 {
2286 uint32_t data0, data1;
2287
2288 data0 = le32toh(msg->Data[0]);
2289 data1 = le32toh(msg->Data[1]);
2290 switch(msg->Event & 0xFF) {
2291 case MPI_EVENT_UNIT_ATTENTION:
2292 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2293 (data0 >> 8) & 0xff, data0 & 0xff);
2294 break;
2295
2296 case MPI_EVENT_IOC_BUS_RESET:
2297 /* We generated a bus reset */
2298 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2299 (data0 >> 8) & 0xff);
2300 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2301 break;
2302
2303 case MPI_EVENT_EXT_BUS_RESET:
2304 /* Someone else generated a bus reset */
2305 mpt_prt(mpt, "External Bus Reset Detected\n");
2306 /*
2307 * These replies don't return EventData like the MPI
2308 * spec says they do
2309 */
2310 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2311 break;
2312
2313 case MPI_EVENT_RESCAN:
2314 {
2315 union ccb *ccb;
2316 uint32_t pathid;
2317 /*
2318 * In general this means a device has been added to the loop.
2319 */
2320 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2321 if (mpt->ready == 0) {
2322 break;
2323 }
2324 if (mpt->phydisk_sim) {
2325 pathid = cam_sim_path(mpt->phydisk_sim);
2326 } else {
2327 pathid = cam_sim_path(mpt->sim);
2328 }
2329 /*
2330 * Allocate a CCB, create a wildcard path for this bus,
2331 * and schedule a rescan.
2332 */
2333 ccb = xpt_alloc_ccb_nowait();
2334 if (ccb == NULL) {
2335 mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2336 break;
2337 }
2338
2339 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
2340 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2341 mpt_prt(mpt, "unable to create path for rescan\n");
2342 xpt_free_ccb(ccb);
2343 break;
2344 }
2345 xpt_rescan(ccb);
2346 break;
2347 }
2348
2349 case MPI_EVENT_LINK_STATUS_CHANGE:
2350 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2351 (data1 >> 8) & 0xff,
2352 ((data0 & 0xff) == 0)? "Failed" : "Active");
2353 break;
2354
2355 case MPI_EVENT_LOOP_STATE_CHANGE:
2356 switch ((data0 >> 16) & 0xff) {
2357 case 0x01:
2358 mpt_prt(mpt,
2359 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2360 "(Loop Initialization)\n",
2361 (data1 >> 8) & 0xff,
2362 (data0 >> 8) & 0xff,
2363 (data0 ) & 0xff);
2364 switch ((data0 >> 8) & 0xff) {
2365 case 0xF7:
2366 if ((data0 & 0xff) == 0xF7) {
2367 mpt_prt(mpt, "Device needs AL_PA\n");
2368 } else {
2369 mpt_prt(mpt, "Device %02x doesn't like "
2370 "FC performance\n",
2371 data0 & 0xFF);
2372 }
2373 break;
2374 case 0xF8:
2375 if ((data0 & 0xff) == 0xF7) {
2376 mpt_prt(mpt, "Device had loop failure "
2377 "at its receiver prior to acquiring"
2378 " AL_PA\n");
2379 } else {
2380 mpt_prt(mpt, "Device %02x detected loop"
2381 " failure at its receiver\n",
2382 data0 & 0xFF);
2383 }
2384 break;
2385 default:
2386 mpt_prt(mpt, "Device %02x requests that device "
2387 "%02x reset itself\n",
2388 data0 & 0xFF,
2389 (data0 >> 8) & 0xFF);
2390 break;
2391 }
2392 break;
2393 case 0x02:
2394 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2395 "LPE(%02x,%02x) (Loop Port Enable)\n",
2396 (data1 >> 8) & 0xff, /* Port */
2397 (data0 >> 8) & 0xff, /* Character 3 */
2398 (data0 ) & 0xff /* Character 4 */);
2399 break;
2400 case 0x03:
2401 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2402 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2403 (data1 >> 8) & 0xff, /* Port */
2404 (data0 >> 8) & 0xff, /* Character 3 */
2405 (data0 ) & 0xff /* Character 4 */);
2406 break;
2407 default:
2408 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2409 "FC event (%02x %02x %02x)\n",
2410 (data1 >> 8) & 0xff, /* Port */
2411 (data0 >> 16) & 0xff, /* Event */
2412 (data0 >> 8) & 0xff, /* Character 3 */
2413 (data0 ) & 0xff /* Character 4 */);
2414 }
2415 break;
2416
2417 case MPI_EVENT_LOGOUT:
2418 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2419 (data1 >> 8) & 0xff, data0);
2420 break;
2421 case MPI_EVENT_QUEUE_FULL:
2422 {
2423 struct cam_sim *sim;
2424 struct cam_path *tmppath;
2425 struct ccb_relsim crs;
2426 PTR_EVENT_DATA_QUEUE_FULL pqf;
2427 lun_id_t lun_id;
2428
2429 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2430 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2431 if (bootverbose) {
2432 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x "
2433 "Depth %d\n",
2434 pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2435 }
2436 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2437 pqf->TargetID) != 0) {
2438 sim = mpt->phydisk_sim;
2439 } else {
2440 sim = mpt->sim;
2441 }
2442 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2443 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2444 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2445 mpt_prt(mpt, "unable to create a path to send "
2446 "XPT_REL_SIMQ");
2447 break;
2448 }
2449 memset(&crs, 0, sizeof(crs));
2450 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2451 crs.ccb_h.func_code = XPT_REL_SIMQ;
2452 crs.ccb_h.flags = CAM_DEV_QFREEZE;
2453 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2454 crs.openings = pqf->CurrentDepth - 1;
2455 xpt_action((union ccb *)&crs);
2456 if (crs.ccb_h.status != CAM_REQ_CMP) {
2457 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2458 }
2459 xpt_free_path(tmppath);
2460 }
2461 break;
2462 }
2463 case MPI_EVENT_IR_RESYNC_UPDATE:
2464 mpt_prt(mpt, "IR resync update %d completed\n",
2465 (data0 >> 16) & 0xff);
2466 break;
2467 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2468 {
2469 union ccb *ccb;
2470 struct cam_sim *sim;
2471 struct cam_path *tmppath;
2472 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2473
2474 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2475 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2476 psdsc->TargetID) != 0)
2477 sim = mpt->phydisk_sim;
2478 else
2479 sim = mpt->sim;
2480 switch(psdsc->ReasonCode) {
2481 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2482 ccb = xpt_alloc_ccb_nowait();
2483 if (ccb == NULL) {
2484 mpt_prt(mpt,
2485 "unable to alloc CCB for rescan\n");
2486 break;
2487 }
2488 if (xpt_create_path(&ccb->ccb_h.path, NULL,
2489 cam_sim_path(sim), psdsc->TargetID,
2490 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2491 mpt_prt(mpt,
2492 "unable to create path for rescan\n");
2493 xpt_free_ccb(ccb);
2494 break;
2495 }
2496 xpt_rescan(ccb);
2497 break;
2498 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
2499 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2500 psdsc->TargetID, CAM_LUN_WILDCARD) !=
2501 CAM_REQ_CMP) {
2502 mpt_prt(mpt,
2503 "unable to create path for async event");
2504 break;
2505 }
2506 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2507 xpt_free_path(tmppath);
2508 break;
2509 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2510 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2511 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2512 break;
2513 default:
2514 mpt_lprt(mpt, MPT_PRT_WARN,
2515 "SAS device status change: Bus: 0x%02x TargetID: "
2516 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2517 psdsc->TargetID, psdsc->ReasonCode);
2518 break;
2519 }
2520 break;
2521 }
2522 case MPI_EVENT_SAS_DISCOVERY_ERROR:
2523 {
2524 PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2525
2526 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2527 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2528 mpt_lprt(mpt, MPT_PRT_WARN,
2529 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2530 pde->Port, pde->DiscoveryStatus);
2531 break;
2532 }
2533 case MPI_EVENT_EVENT_CHANGE:
2534 case MPI_EVENT_INTEGRATED_RAID:
2535 case MPI_EVENT_IR2:
2536 case MPI_EVENT_LOG_ENTRY_ADDED:
2537 case MPI_EVENT_SAS_DISCOVERY:
2538 case MPI_EVENT_SAS_PHY_LINK_STATUS:
2539 case MPI_EVENT_SAS_SES:
2540 break;
2541 default:
2542 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2543 msg->Event & 0xFF);
2544 return (0);
2545 }
2546 return (1);
2547 }
2548
2549 /*
2550 * Reply path for all SCSI I/O requests, called from our
2551 * interrupt handler by extracting our handler index from
2552 * the MsgContext field of the reply from the IOC.
2553 *
2554 * This routine is optimized for the common case of a
2555 * completion without error. All exception handling is
2556 * offloaded to non-inlined helper routines to minimize
2557 * cache footprint.
2558 */
2559 static int
mpt_scsi_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)2560 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2561 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2562 {
2563 MSG_SCSI_IO_REQUEST *scsi_req;
2564 union ccb *ccb;
2565
2566 if (req->state == REQ_STATE_FREE) {
2567 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2568 return (TRUE);
2569 }
2570
2571 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2572 ccb = req->ccb;
2573 if (ccb == NULL) {
2574 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2575 req, req->serno);
2576 return (TRUE);
2577 }
2578
2579 mpt_req_untimeout(req, mpt_timeout, ccb);
2580 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2581
2582 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2583 bus_dmasync_op_t op;
2584
2585 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2586 op = BUS_DMASYNC_POSTREAD;
2587 else
2588 op = BUS_DMASYNC_POSTWRITE;
2589 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2590 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2591 }
2592
2593 if (reply_frame == NULL) {
2594 /*
2595 * Context only reply, completion without error status.
2596 */
2597 ccb->csio.resid = 0;
2598 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2599 ccb->csio.scsi_status = SCSI_STATUS_OK;
2600 } else {
2601 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2602 }
2603
2604 if (mpt->outofbeer) {
2605 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2606 mpt->outofbeer = 0;
2607 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2608 }
2609 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2610 struct scsi_inquiry_data *iq =
2611 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2612 if (scsi_req->Function ==
2613 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2614 /*
2615 * Fake out the device type so that only the
2616 * pass-thru device will attach.
2617 */
2618 iq->device &= ~0x1F;
2619 iq->device |= T_NODEVICE;
2620 }
2621 }
2622 if (mpt->verbose == MPT_PRT_DEBUG) {
2623 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2624 req, req->serno);
2625 }
2626 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2627 xpt_done(ccb);
2628 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2629 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2630 } else {
2631 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2632 req, req->serno);
2633 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2634 }
2635 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2636 ("CCB req needed wakeup"));
2637 #ifdef INVARIANTS
2638 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2639 #endif
2640 mpt_free_request(mpt, req);
2641 return (TRUE);
2642 }
2643
2644 static int
mpt_scsi_tmf_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)2645 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2646 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2647 {
2648 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2649
2650 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2651 #ifdef INVARIANTS
2652 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2653 #endif
2654 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2655 /* Record IOC Status and Response Code of TMF for any waiters. */
2656 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2657 req->ResponseCode = tmf_reply->ResponseCode;
2658
2659 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2660 req, req->serno, le16toh(tmf_reply->IOCStatus));
2661 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2662 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2663 req->state |= REQ_STATE_DONE;
2664 wakeup(req);
2665 } else {
2666 mpt->tmf_req->state = REQ_STATE_FREE;
2667 }
2668 return (TRUE);
2669 }
2670
2671 /*
2672 * XXX: Move to definitions file
2673 */
2674 #define ELS 0x22
2675 #define FC4LS 0x32
2676 #define ABTS 0x81
2677 #define BA_ACC 0x84
2678
2679 #define LS_RJT 0x01
2680 #define LS_ACC 0x02
2681 #define PLOGI 0x03
2682 #define LOGO 0x05
2683 #define SRR 0x14
2684 #define PRLI 0x20
2685 #define PRLO 0x21
2686 #define ADISC 0x52
2687 #define RSCN 0x61
2688
2689 static void
mpt_fc_els_send_response(struct mpt_softc * mpt,request_t * req,PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp,U8 length)2690 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2691 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2692 {
2693 uint32_t fl;
2694 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2695 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2696
2697 /*
2698 * We are going to reuse the ELS request to send this response back.
2699 */
2700 rsp = &tmp;
2701 memset(rsp, 0, sizeof(*rsp));
2702
2703 #ifdef USE_IMMEDIATE_LINK_DATA
2704 /*
2705 * Apparently the IMMEDIATE stuff doesn't seem to work.
2706 */
2707 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2708 #endif
2709 rsp->RspLength = length;
2710 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2711 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2712
2713 /*
2714 * Copy over information from the original reply frame to
2715 * it's correct place in the response.
2716 */
2717 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2718
2719 /*
2720 * And now copy back the temporary area to the original frame.
2721 */
2722 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2723 rsp = req->req_vbuf;
2724
2725 #ifdef USE_IMMEDIATE_LINK_DATA
2726 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2727 #else
2728 {
2729 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2730 bus_addr_t paddr = req->req_pbuf;
2731 paddr += MPT_RQSL(mpt);
2732
2733 fl =
2734 MPI_SGE_FLAGS_HOST_TO_IOC |
2735 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2736 MPI_SGE_FLAGS_LAST_ELEMENT |
2737 MPI_SGE_FLAGS_END_OF_LIST |
2738 MPI_SGE_FLAGS_END_OF_BUFFER;
2739 fl <<= MPI_SGE_FLAGS_SHIFT;
2740 fl |= (length);
2741 se->FlagsLength = htole32(fl);
2742 se->Address = htole32((uint32_t) paddr);
2743 }
2744 #endif
2745
2746 /*
2747 * Send it on...
2748 */
2749 mpt_send_cmd(mpt, req);
2750 }
2751
2752 static int
mpt_fc_els_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)2753 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2754 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2755 {
2756 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2757 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2758 U8 rctl;
2759 U8 type;
2760 U8 cmd;
2761 U16 status = le16toh(reply_frame->IOCStatus);
2762 U32 *elsbuf;
2763 int ioindex;
2764 int do_refresh = TRUE;
2765
2766 #ifdef INVARIANTS
2767 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2768 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2769 req, req->serno, rp->Function));
2770 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2771 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2772 } else {
2773 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2774 }
2775 #endif
2776 mpt_lprt(mpt, MPT_PRT_DEBUG,
2777 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2778 req, req->serno, reply_frame, reply_frame->Function);
2779
2780 if (status != MPI_IOCSTATUS_SUCCESS) {
2781 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2782 status, reply_frame->Function);
2783 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2784 /*
2785 * XXX: to get around shutdown issue
2786 */
2787 mpt->disabled = 1;
2788 return (TRUE);
2789 }
2790 return (TRUE);
2791 }
2792
2793 /*
2794 * If the function of a link service response, we recycle the
2795 * response to be a refresh for a new link service request.
2796 *
2797 * The request pointer is bogus in this case and we have to fetch
2798 * it based upon the TransactionContext.
2799 */
2800 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2801 /* Freddie Uncle Charlie Katie */
2802 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2803 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2804 if (mpt->els_cmd_ptrs[ioindex] == req) {
2805 break;
2806 }
2807
2808 KASSERT(ioindex < mpt->els_cmds_allocated,
2809 ("can't find my mommie!"));
2810
2811 /* remove from active list as we're going to re-post it */
2812 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2813 req->state &= ~REQ_STATE_QUEUED;
2814 req->state |= REQ_STATE_DONE;
2815 mpt_fc_post_els(mpt, req, ioindex);
2816 return (TRUE);
2817 }
2818
2819 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2820 /* remove from active list as we're done */
2821 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2822 req->state &= ~REQ_STATE_QUEUED;
2823 req->state |= REQ_STATE_DONE;
2824 if (req->state & REQ_STATE_TIMEDOUT) {
2825 mpt_lprt(mpt, MPT_PRT_DEBUG,
2826 "Sync Primitive Send Completed After Timeout\n");
2827 mpt_free_request(mpt, req);
2828 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2829 mpt_lprt(mpt, MPT_PRT_DEBUG,
2830 "Async Primitive Send Complete\n");
2831 mpt_free_request(mpt, req);
2832 } else {
2833 mpt_lprt(mpt, MPT_PRT_DEBUG,
2834 "Sync Primitive Send Complete- Waking Waiter\n");
2835 wakeup(req);
2836 }
2837 return (TRUE);
2838 }
2839
2840 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2841 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2842 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2843 rp->MsgLength, rp->MsgFlags);
2844 return (TRUE);
2845 }
2846
2847 if (rp->MsgLength <= 5) {
2848 /*
2849 * This is just a ack of an original ELS buffer post
2850 */
2851 mpt_lprt(mpt, MPT_PRT_DEBUG,
2852 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2853 return (TRUE);
2854 }
2855
2856 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2857 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2858
2859 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2860 cmd = be32toh(elsbuf[0]) >> 24;
2861
2862 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2863 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2864 return (TRUE);
2865 }
2866
2867 ioindex = le32toh(rp->TransactionContext);
2868 req = mpt->els_cmd_ptrs[ioindex];
2869
2870 if (rctl == ELS && type == 1) {
2871 switch (cmd) {
2872 case PRLI:
2873 /*
2874 * Send back a PRLI ACC
2875 */
2876 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2877 le32toh(rp->Wwn.PortNameHigh),
2878 le32toh(rp->Wwn.PortNameLow));
2879 elsbuf[0] = htobe32(0x02100014);
2880 elsbuf[1] |= htobe32(0x00000100);
2881 elsbuf[4] = htobe32(0x00000002);
2882 if (mpt->role & MPT_ROLE_TARGET)
2883 elsbuf[4] |= htobe32(0x00000010);
2884 if (mpt->role & MPT_ROLE_INITIATOR)
2885 elsbuf[4] |= htobe32(0x00000020);
2886 /* remove from active list as we're done */
2887 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2888 req->state &= ~REQ_STATE_QUEUED;
2889 req->state |= REQ_STATE_DONE;
2890 mpt_fc_els_send_response(mpt, req, rp, 20);
2891 do_refresh = FALSE;
2892 break;
2893 case PRLO:
2894 memset(elsbuf, 0, 5 * (sizeof (U32)));
2895 elsbuf[0] = htobe32(0x02100014);
2896 elsbuf[1] = htobe32(0x08000100);
2897 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2898 le32toh(rp->Wwn.PortNameHigh),
2899 le32toh(rp->Wwn.PortNameLow));
2900 /* remove from active list as we're done */
2901 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2902 req->state &= ~REQ_STATE_QUEUED;
2903 req->state |= REQ_STATE_DONE;
2904 mpt_fc_els_send_response(mpt, req, rp, 20);
2905 do_refresh = FALSE;
2906 break;
2907 default:
2908 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2909 break;
2910 }
2911 } else if (rctl == ABTS && type == 0) {
2912 uint16_t rx_id = le16toh(rp->Rxid);
2913 uint16_t ox_id = le16toh(rp->Oxid);
2914 mpt_tgt_state_t *tgt;
2915 request_t *tgt_req = NULL;
2916 union ccb *ccb;
2917 uint32_t ct_id;
2918
2919 mpt_prt(mpt,
2920 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2921 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2922 le32toh(rp->Wwn.PortNameLow));
2923 if (rx_id >= mpt->mpt_max_tgtcmds) {
2924 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2925 } else if (mpt->tgt_cmd_ptrs == NULL) {
2926 mpt_prt(mpt, "No TGT CMD PTRS\n");
2927 } else {
2928 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2929 }
2930 if (tgt_req == NULL) {
2931 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
2932 goto skip;
2933 }
2934 tgt = MPT_TGT_STATE(mpt, tgt_req);
2935
2936 /* Check to make sure we have the correct command. */
2937 ct_id = GET_IO_INDEX(tgt->reply_desc);
2938 if (ct_id != rx_id) {
2939 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2940 "RX_ID received=0x%x, in cmd=0x%x\n", rx_id, ct_id);
2941 goto skip;
2942 }
2943 if (tgt->itag != ox_id) {
2944 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2945 "OX_ID received=0x%x, in cmd=0x%x\n", ox_id, tgt->itag);
2946 goto skip;
2947 }
2948
2949 if ((ccb = tgt->ccb) != NULL) {
2950 mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n",
2951 ccb, (uintmax_t)ccb->ccb_h.target_lun,
2952 ccb->ccb_h.flags, ccb->ccb_h.status);
2953 }
2954 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2955 "%x nxfers %x\n", tgt->state, tgt->resid,
2956 tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers);
2957 if (mpt_abort_target_cmd(mpt, tgt_req))
2958 mpt_prt(mpt, "unable to start TargetAbort\n");
2959
2960 skip:
2961 memset(elsbuf, 0, 5 * (sizeof (U32)));
2962 elsbuf[0] = htobe32(0);
2963 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
2964 elsbuf[2] = htobe32(0x000ffff);
2965 /*
2966 * Dork with the reply frame so that the response to it
2967 * will be correct.
2968 */
2969 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
2970 /* remove from active list as we're done */
2971 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2972 req->state &= ~REQ_STATE_QUEUED;
2973 req->state |= REQ_STATE_DONE;
2974 mpt_fc_els_send_response(mpt, req, rp, 12);
2975 do_refresh = FALSE;
2976 } else {
2977 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
2978 }
2979 if (do_refresh == TRUE) {
2980 /* remove from active list as we're done */
2981 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2982 req->state &= ~REQ_STATE_QUEUED;
2983 req->state |= REQ_STATE_DONE;
2984 mpt_fc_post_els(mpt, req, ioindex);
2985 }
2986 return (TRUE);
2987 }
2988
2989 /*
2990 * Clean up all SCSI Initiator personality state in response
2991 * to a controller reset.
2992 */
2993 static void
mpt_cam_ioc_reset(struct mpt_softc * mpt,int type)2994 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
2995 {
2996
2997 /*
2998 * The pending list is already run down by
2999 * the generic handler. Perform the same
3000 * operation on the timed out request list.
3001 */
3002 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3003 MPI_IOCSTATUS_INVALID_STATE);
3004
3005 /*
3006 * XXX: We need to repost ELS and Target Command Buffers?
3007 */
3008
3009 /*
3010 * Inform the XPT that a bus reset has occurred.
3011 */
3012 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3013 }
3014
3015 /*
3016 * Parse additional completion information in the reply
3017 * frame for SCSI I/O requests.
3018 */
3019 static int
mpt_scsi_reply_frame_handler(struct mpt_softc * mpt,request_t * req,MSG_DEFAULT_REPLY * reply_frame)3020 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3021 MSG_DEFAULT_REPLY *reply_frame)
3022 {
3023 union ccb *ccb;
3024 MSG_SCSI_IO_REPLY *scsi_io_reply;
3025 u_int ioc_status;
3026 u_int sstate;
3027
3028 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3029 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3030 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3031 ("MPT SCSI I/O Handler called with incorrect reply type"));
3032 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3033 ("MPT SCSI I/O Handler called with continuation reply"));
3034
3035 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3036 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3037 ioc_status &= MPI_IOCSTATUS_MASK;
3038 sstate = scsi_io_reply->SCSIState;
3039
3040 ccb = req->ccb;
3041 ccb->csio.resid =
3042 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3043
3044 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3045 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3046 uint32_t sense_returned;
3047
3048 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3049
3050 sense_returned = le32toh(scsi_io_reply->SenseCount);
3051 if (sense_returned < ccb->csio.sense_len)
3052 ccb->csio.sense_resid = ccb->csio.sense_len -
3053 sense_returned;
3054 else
3055 ccb->csio.sense_resid = 0;
3056
3057 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
3058 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3059 min(ccb->csio.sense_len, sense_returned));
3060 }
3061
3062 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3063 /*
3064 * Tag messages rejected, but non-tagged retry
3065 * was successful.
3066 XXXX
3067 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3068 */
3069 }
3070
3071 switch(ioc_status) {
3072 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3073 /*
3074 * XXX
3075 * Linux driver indicates that a zero
3076 * transfer length with this error code
3077 * indicates a CRC error.
3078 *
3079 * No need to swap the bytes for checking
3080 * against zero.
3081 */
3082 if (scsi_io_reply->TransferCount == 0) {
3083 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3084 break;
3085 }
3086 /* FALLTHROUGH */
3087 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3088 case MPI_IOCSTATUS_SUCCESS:
3089 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3090 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3091 /*
3092 * Status was never returned for this transaction.
3093 */
3094 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3095 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3096 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3097 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3098 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3099 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3100 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3101 /* XXX Handle SPI-Packet and FCP-2 response info. */
3102 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3103 } else
3104 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3105 break;
3106 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3107 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3108 break;
3109 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3110 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3111 break;
3112 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3113 /*
3114 * Since selection timeouts and "device really not
3115 * there" are grouped into this error code, report
3116 * selection timeout. Selection timeouts are
3117 * typically retried before giving up on the device
3118 * whereas "device not there" errors are considered
3119 * unretryable.
3120 */
3121 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3122 break;
3123 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3124 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3125 break;
3126 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3127 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3128 break;
3129 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3130 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3131 break;
3132 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3133 ccb->ccb_h.status = CAM_UA_TERMIO;
3134 break;
3135 case MPI_IOCSTATUS_INVALID_STATE:
3136 /*
3137 * The IOC has been reset. Emulate a bus reset.
3138 */
3139 /* FALLTHROUGH */
3140 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3141 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3142 break;
3143 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3144 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3145 /*
3146 * Don't clobber any timeout status that has
3147 * already been set for this transaction. We
3148 * want the SCSI layer to be able to differentiate
3149 * between the command we aborted due to timeout
3150 * and any innocent bystanders.
3151 */
3152 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3153 break;
3154 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3155 break;
3156
3157 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3158 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3159 break;
3160 case MPI_IOCSTATUS_BUSY:
3161 mpt_set_ccb_status(ccb, CAM_BUSY);
3162 break;
3163 case MPI_IOCSTATUS_INVALID_FUNCTION:
3164 case MPI_IOCSTATUS_INVALID_SGL:
3165 case MPI_IOCSTATUS_INTERNAL_ERROR:
3166 case MPI_IOCSTATUS_INVALID_FIELD:
3167 default:
3168 /* XXX
3169 * Some of the above may need to kick
3170 * of a recovery action!!!!
3171 */
3172 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3173 break;
3174 }
3175
3176 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3177 mpt_freeze_ccb(ccb);
3178 }
3179
3180 return (TRUE);
3181 }
3182
3183 static void
mpt_action(struct cam_sim * sim,union ccb * ccb)3184 mpt_action(struct cam_sim *sim, union ccb *ccb)
3185 {
3186 struct mpt_softc *mpt;
3187 struct ccb_trans_settings *cts;
3188 target_id_t tgt;
3189 lun_id_t lun;
3190 int raid_passthru;
3191
3192 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3193
3194 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3195 raid_passthru = (sim == mpt->phydisk_sim);
3196 MPT_LOCK_ASSERT(mpt);
3197
3198 tgt = ccb->ccb_h.target_id;
3199 lun = ccb->ccb_h.target_lun;
3200 if (raid_passthru &&
3201 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3202 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3203 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3204 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3205 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3206 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3207 xpt_done(ccb);
3208 return;
3209 }
3210 }
3211 ccb->ccb_h.ccb_mpt_ptr = mpt;
3212
3213 switch (ccb->ccb_h.func_code) {
3214 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3215 /*
3216 * Do a couple of preliminary checks...
3217 */
3218 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3219 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3220 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3221 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3222 break;
3223 }
3224 }
3225 /* Max supported CDB length is 16 bytes */
3226 /* XXX Unless we implement the new 32byte message type */
3227 if (ccb->csio.cdb_len >
3228 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3229 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3230 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3231 break;
3232 }
3233 #ifdef MPT_TEST_MULTIPATH
3234 if (mpt->failure_id == ccb->ccb_h.target_id) {
3235 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3236 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3237 break;
3238 }
3239 #endif
3240 ccb->csio.scsi_status = SCSI_STATUS_OK;
3241 mpt_start(sim, ccb);
3242 return;
3243
3244 case XPT_RESET_BUS:
3245 if (raid_passthru) {
3246 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3247 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3248 break;
3249 }
3250 case XPT_RESET_DEV:
3251 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3252 if (bootverbose) {
3253 xpt_print(ccb->ccb_h.path, "reset bus\n");
3254 }
3255 } else {
3256 xpt_print(ccb->ccb_h.path, "reset device\n");
3257 }
3258 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3259
3260 /*
3261 * mpt_bus_reset is always successful in that it
3262 * will fall back to a hard reset should a bus
3263 * reset attempt fail.
3264 */
3265 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3266 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3267 break;
3268
3269 case XPT_ABORT:
3270 {
3271 union ccb *accb = ccb->cab.abort_ccb;
3272 switch (accb->ccb_h.func_code) {
3273 case XPT_ACCEPT_TARGET_IO:
3274 case XPT_IMMEDIATE_NOTIFY:
3275 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3276 break;
3277 case XPT_CONT_TARGET_IO:
3278 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3279 ccb->ccb_h.status = CAM_UA_ABORT;
3280 break;
3281 case XPT_SCSI_IO:
3282 ccb->ccb_h.status = CAM_UA_ABORT;
3283 break;
3284 default:
3285 ccb->ccb_h.status = CAM_REQ_INVALID;
3286 break;
3287 }
3288 break;
3289 }
3290
3291 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3292
3293 #define DP_DISC_ENABLE 0x1
3294 #define DP_DISC_DISABL 0x2
3295 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3296
3297 #define DP_TQING_ENABLE 0x4
3298 #define DP_TQING_DISABL 0x8
3299 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3300
3301 #define DP_WIDE 0x10
3302 #define DP_NARROW 0x20
3303 #define DP_WIDTH (DP_WIDE|DP_NARROW)
3304
3305 #define DP_SYNC 0x40
3306
3307 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3308 {
3309 struct ccb_trans_settings_scsi *scsi;
3310 struct ccb_trans_settings_spi *spi;
3311 uint8_t dval;
3312 u_int period;
3313 u_int offset;
3314 int i, j;
3315
3316 cts = &ccb->cts;
3317
3318 if (mpt->is_fc || mpt->is_sas) {
3319 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3320 break;
3321 }
3322
3323 scsi = &cts->proto_specific.scsi;
3324 spi = &cts->xport_specific.spi;
3325
3326 /*
3327 * We can be called just to valid transport and proto versions
3328 */
3329 if (scsi->valid == 0 && spi->valid == 0) {
3330 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3331 break;
3332 }
3333
3334 /*
3335 * Skip attempting settings on RAID volume disks.
3336 * Other devices on the bus get the normal treatment.
3337 */
3338 if (mpt->phydisk_sim && raid_passthru == 0 &&
3339 mpt_is_raid_volume(mpt, tgt) != 0) {
3340 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3341 "no transfer settings for RAID vols\n");
3342 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3343 break;
3344 }
3345
3346 i = mpt->mpt_port_page2.PortSettings &
3347 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3348 j = mpt->mpt_port_page2.PortFlags &
3349 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3350 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3351 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3352 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3353 "honoring BIOS transfer negotiations\n");
3354 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3355 break;
3356 }
3357
3358 dval = 0;
3359 period = 0;
3360 offset = 0;
3361
3362 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3363 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3364 DP_DISC_ENABLE : DP_DISC_DISABL;
3365 }
3366
3367 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3368 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3369 DP_TQING_ENABLE : DP_TQING_DISABL;
3370 }
3371
3372 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3373 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3374 DP_WIDE : DP_NARROW;
3375 }
3376
3377 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3378 dval |= DP_SYNC;
3379 offset = spi->sync_offset;
3380 } else {
3381 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3382 &mpt->mpt_dev_page1[tgt];
3383 offset = ptr->RequestedParameters;
3384 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3385 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3386 }
3387 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3388 dval |= DP_SYNC;
3389 period = spi->sync_period;
3390 } else {
3391 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3392 &mpt->mpt_dev_page1[tgt];
3393 period = ptr->RequestedParameters;
3394 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3395 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3396 }
3397
3398 if (dval & DP_DISC_ENABLE) {
3399 mpt->mpt_disc_enable |= (1 << tgt);
3400 } else if (dval & DP_DISC_DISABL) {
3401 mpt->mpt_disc_enable &= ~(1 << tgt);
3402 }
3403 if (dval & DP_TQING_ENABLE) {
3404 mpt->mpt_tag_enable |= (1 << tgt);
3405 } else if (dval & DP_TQING_DISABL) {
3406 mpt->mpt_tag_enable &= ~(1 << tgt);
3407 }
3408 if (dval & DP_WIDTH) {
3409 mpt_setwidth(mpt, tgt, 1);
3410 }
3411 if (dval & DP_SYNC) {
3412 mpt_setsync(mpt, tgt, period, offset);
3413 }
3414 if (dval == 0) {
3415 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3416 break;
3417 }
3418 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3419 "set [%d]: 0x%x period 0x%x offset %d\n",
3420 tgt, dval, period, offset);
3421 if (mpt_update_spi_config(mpt, tgt)) {
3422 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3423 } else {
3424 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3425 }
3426 break;
3427 }
3428 case XPT_GET_TRAN_SETTINGS:
3429 {
3430 struct ccb_trans_settings_scsi *scsi;
3431 cts = &ccb->cts;
3432 cts->protocol = PROTO_SCSI;
3433 if (mpt->is_fc) {
3434 struct ccb_trans_settings_fc *fc =
3435 &cts->xport_specific.fc;
3436 cts->protocol_version = SCSI_REV_SPC;
3437 cts->transport = XPORT_FC;
3438 cts->transport_version = 0;
3439 if (mpt->mpt_fcport_speed != 0) {
3440 fc->valid = CTS_FC_VALID_SPEED;
3441 fc->bitrate = 100000 * mpt->mpt_fcport_speed;
3442 }
3443 } else if (mpt->is_sas) {
3444 struct ccb_trans_settings_sas *sas =
3445 &cts->xport_specific.sas;
3446 cts->protocol_version = SCSI_REV_SPC2;
3447 cts->transport = XPORT_SAS;
3448 cts->transport_version = 0;
3449 sas->valid = CTS_SAS_VALID_SPEED;
3450 sas->bitrate = 300000;
3451 } else {
3452 cts->protocol_version = SCSI_REV_2;
3453 cts->transport = XPORT_SPI;
3454 cts->transport_version = 2;
3455 if (mpt_get_spi_settings(mpt, cts) != 0) {
3456 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3457 break;
3458 }
3459 }
3460 scsi = &cts->proto_specific.scsi;
3461 scsi->valid = CTS_SCSI_VALID_TQ;
3462 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3463 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3464 break;
3465 }
3466 case XPT_CALC_GEOMETRY:
3467 {
3468 struct ccb_calc_geometry *ccg;
3469
3470 ccg = &ccb->ccg;
3471 if (ccg->block_size == 0) {
3472 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3473 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3474 break;
3475 }
3476 cam_calc_geometry(ccg, /* extended */ 1);
3477 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
3478 break;
3479 }
3480 case XPT_GET_SIM_KNOB:
3481 {
3482 struct ccb_sim_knob *kp = &ccb->knob;
3483
3484 if (mpt->is_fc) {
3485 kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3486 kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3487 switch (mpt->role) {
3488 case MPT_ROLE_NONE:
3489 kp->xport_specific.fc.role = KNOB_ROLE_NONE;
3490 break;
3491 case MPT_ROLE_INITIATOR:
3492 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
3493 break;
3494 case MPT_ROLE_TARGET:
3495 kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
3496 break;
3497 case MPT_ROLE_BOTH:
3498 kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
3499 break;
3500 }
3501 kp->xport_specific.fc.valid =
3502 KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
3503 ccb->ccb_h.status = CAM_REQ_CMP;
3504 } else {
3505 ccb->ccb_h.status = CAM_REQ_INVALID;
3506 }
3507 xpt_done(ccb);
3508 break;
3509 }
3510 case XPT_PATH_INQ: /* Path routing inquiry */
3511 {
3512 struct ccb_pathinq *cpi = &ccb->cpi;
3513
3514 cpi->version_num = 1;
3515 cpi->target_sprt = 0;
3516 cpi->hba_eng_cnt = 0;
3517 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3518 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3519 /*
3520 * FC cards report MAX_DEVICES of 512, but
3521 * the MSG_SCSI_IO_REQUEST target id field
3522 * is only 8 bits. Until we fix the driver
3523 * to support 'channels' for bus overflow,
3524 * just limit it.
3525 */
3526 if (cpi->max_target > 255) {
3527 cpi->max_target = 255;
3528 }
3529
3530 /*
3531 * VMware ESX reports > 16 devices and then dies when we probe.
3532 */
3533 if (mpt->is_spi && cpi->max_target > 15) {
3534 cpi->max_target = 15;
3535 }
3536 if (mpt->is_spi)
3537 cpi->max_lun = 7;
3538 else
3539 cpi->max_lun = MPT_MAX_LUNS;
3540 cpi->initiator_id = mpt->mpt_ini_id;
3541 cpi->bus_id = cam_sim_bus(sim);
3542
3543 /*
3544 * The base speed is the speed of the underlying connection.
3545 */
3546 cpi->protocol = PROTO_SCSI;
3547 if (mpt->is_fc) {
3548 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3549 PIM_EXTLUNS;
3550 cpi->base_transfer_speed = 100000;
3551 cpi->hba_inquiry = PI_TAG_ABLE;
3552 cpi->transport = XPORT_FC;
3553 cpi->transport_version = 0;
3554 cpi->protocol_version = SCSI_REV_SPC;
3555 cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn;
3556 cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn;
3557 cpi->xport_specific.fc.port = mpt->scinfo.fc.portid;
3558 cpi->xport_specific.fc.bitrate =
3559 100000 * mpt->mpt_fcport_speed;
3560 } else if (mpt->is_sas) {
3561 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED |
3562 PIM_EXTLUNS;
3563 cpi->base_transfer_speed = 300000;
3564 cpi->hba_inquiry = PI_TAG_ABLE;
3565 cpi->transport = XPORT_SAS;
3566 cpi->transport_version = 0;
3567 cpi->protocol_version = SCSI_REV_SPC2;
3568 } else {
3569 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED |
3570 PIM_EXTLUNS;
3571 cpi->base_transfer_speed = 3300;
3572 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3573 cpi->transport = XPORT_SPI;
3574 cpi->transport_version = 2;
3575 cpi->protocol_version = SCSI_REV_2;
3576 }
3577
3578 /*
3579 * We give our fake RAID passhtru bus a width that is MaxVolumes
3580 * wide and restrict it to one lun.
3581 */
3582 if (raid_passthru) {
3583 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3584 cpi->initiator_id = cpi->max_target + 1;
3585 cpi->max_lun = 0;
3586 }
3587
3588 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3589 cpi->hba_misc |= PIM_NOINITIATOR;
3590 }
3591 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3592 cpi->target_sprt =
3593 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3594 } else {
3595 cpi->target_sprt = 0;
3596 }
3597 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3598 strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3599 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3600 cpi->unit_number = cam_sim_unit(sim);
3601 cpi->ccb_h.status = CAM_REQ_CMP;
3602 break;
3603 }
3604 case XPT_EN_LUN: /* Enable LUN as a target */
3605 {
3606 int result;
3607
3608 if (ccb->cel.enable)
3609 result = mpt_enable_lun(mpt,
3610 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3611 else
3612 result = mpt_disable_lun(mpt,
3613 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3614 if (result == 0) {
3615 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3616 } else {
3617 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3618 }
3619 break;
3620 }
3621 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */
3622 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3623 {
3624 tgt_resource_t *trtp;
3625 lun_id_t lun = ccb->ccb_h.target_lun;
3626 ccb->ccb_h.sim_priv.entries[0].field = 0;
3627 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3628
3629 if (lun == CAM_LUN_WILDCARD) {
3630 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3631 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3632 break;
3633 }
3634 trtp = &mpt->trt_wildcard;
3635 } else if (lun >= MPT_MAX_LUNS) {
3636 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3637 break;
3638 } else {
3639 trtp = &mpt->trt[lun];
3640 }
3641 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3642 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3643 "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun);
3644 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3645 sim_links.stqe);
3646 } else {
3647 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3648 "Put FREE INOT lun %jx\n", (uintmax_t)lun);
3649 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3650 sim_links.stqe);
3651 }
3652 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3653 return;
3654 }
3655 case XPT_NOTIFY_ACKNOWLEDGE: /* Task management request done. */
3656 {
3657 request_t *req = MPT_TAG_2_REQ(mpt, ccb->cna2.tag_id);
3658
3659 mpt_lprt(mpt, MPT_PRT_DEBUG, "Got Notify ACK\n");
3660 mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0);
3661 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3662 break;
3663 }
3664 case XPT_CONT_TARGET_IO:
3665 mpt_target_start_io(mpt, ccb);
3666 return;
3667
3668 default:
3669 ccb->ccb_h.status = CAM_REQ_INVALID;
3670 break;
3671 }
3672 xpt_done(ccb);
3673 }
3674
3675 static int
mpt_get_spi_settings(struct mpt_softc * mpt,struct ccb_trans_settings * cts)3676 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3677 {
3678 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3679 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3680 target_id_t tgt;
3681 uint32_t dval, pval, oval;
3682 int rv;
3683
3684 if (IS_CURRENT_SETTINGS(cts) == 0) {
3685 tgt = cts->ccb_h.target_id;
3686 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3687 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3688 return (-1);
3689 }
3690 } else {
3691 tgt = cts->ccb_h.target_id;
3692 }
3693
3694 /*
3695 * We aren't looking at Port Page 2 BIOS settings here-
3696 * sometimes these have been known to be bogus XXX.
3697 *
3698 * For user settings, we pick the max from port page 0
3699 *
3700 * For current settings we read the current settings out from
3701 * device page 0 for that target.
3702 */
3703 if (IS_CURRENT_SETTINGS(cts)) {
3704 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3705 dval = 0;
3706
3707 tmp = mpt->mpt_dev_page0[tgt];
3708 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3709 sizeof(tmp), FALSE, 5000);
3710 if (rv) {
3711 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3712 return (rv);
3713 }
3714 mpt2host_config_page_scsi_device_0(&tmp);
3715
3716 mpt_lprt(mpt, MPT_PRT_DEBUG,
3717 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3718 tmp.NegotiatedParameters, tmp.Information);
3719 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3720 DP_WIDE : DP_NARROW;
3721 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3722 DP_DISC_ENABLE : DP_DISC_DISABL;
3723 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3724 DP_TQING_ENABLE : DP_TQING_DISABL;
3725 oval = tmp.NegotiatedParameters;
3726 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3727 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3728 pval = tmp.NegotiatedParameters;
3729 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3730 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3731 mpt->mpt_dev_page0[tgt] = tmp;
3732 } else {
3733 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3734 oval = mpt->mpt_port_page0.Capabilities;
3735 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3736 pval = mpt->mpt_port_page0.Capabilities;
3737 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3738 }
3739
3740 spi->valid = 0;
3741 scsi->valid = 0;
3742 spi->flags = 0;
3743 scsi->flags = 0;
3744 spi->sync_offset = oval;
3745 spi->sync_period = pval;
3746 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3747 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3748 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3749 if (dval & DP_WIDE) {
3750 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3751 } else {
3752 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3753 }
3754 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3755 scsi->valid = CTS_SCSI_VALID_TQ;
3756 if (dval & DP_TQING_ENABLE) {
3757 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3758 }
3759 spi->valid |= CTS_SPI_VALID_DISC;
3760 if (dval & DP_DISC_ENABLE) {
3761 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3762 }
3763 }
3764
3765 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3766 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3767 IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
3768 return (0);
3769 }
3770
3771 static void
mpt_setwidth(struct mpt_softc * mpt,int tgt,int onoff)3772 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3773 {
3774 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3775
3776 ptr = &mpt->mpt_dev_page1[tgt];
3777 if (onoff) {
3778 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3779 } else {
3780 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3781 }
3782 }
3783
3784 static void
mpt_setsync(struct mpt_softc * mpt,int tgt,int period,int offset)3785 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3786 {
3787 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3788
3789 ptr = &mpt->mpt_dev_page1[tgt];
3790 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3791 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3792 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3793 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3794 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3795 if (period == 0) {
3796 return;
3797 }
3798 ptr->RequestedParameters |=
3799 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3800 ptr->RequestedParameters |=
3801 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3802 if (period < 0xa) {
3803 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3804 }
3805 if (period < 0x9) {
3806 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3807 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3808 }
3809 }
3810
3811 static int
mpt_update_spi_config(struct mpt_softc * mpt,int tgt)3812 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3813 {
3814 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3815 int rv;
3816
3817 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3818 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3819 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3820 tmp = mpt->mpt_dev_page1[tgt];
3821 host2mpt_config_page_scsi_device_1(&tmp);
3822 rv = mpt_write_cur_cfg_page(mpt, tgt,
3823 &tmp.Header, sizeof(tmp), FALSE, 5000);
3824 if (rv) {
3825 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3826 return (-1);
3827 }
3828 return (0);
3829 }
3830
3831 /****************************** Timeout Recovery ******************************/
3832 static int
mpt_spawn_recovery_thread(struct mpt_softc * mpt)3833 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3834 {
3835 int error;
3836
3837 error = kproc_create(mpt_recovery_thread, mpt,
3838 &mpt->recovery_thread, /*flags*/0,
3839 /*altstack*/0, "mpt_recovery%d", mpt->unit);
3840 return (error);
3841 }
3842
3843 static void
mpt_terminate_recovery_thread(struct mpt_softc * mpt)3844 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3845 {
3846
3847 if (mpt->recovery_thread == NULL) {
3848 return;
3849 }
3850 mpt->shutdwn_recovery = 1;
3851 wakeup(mpt);
3852 /*
3853 * Sleep on a slightly different location
3854 * for this interlock just for added safety.
3855 */
3856 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3857 }
3858
3859 static void
mpt_recovery_thread(void * arg)3860 mpt_recovery_thread(void *arg)
3861 {
3862 struct mpt_softc *mpt;
3863
3864 mpt = (struct mpt_softc *)arg;
3865 MPT_LOCK(mpt);
3866 for (;;) {
3867 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3868 if (mpt->shutdwn_recovery == 0) {
3869 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3870 }
3871 }
3872 if (mpt->shutdwn_recovery != 0) {
3873 break;
3874 }
3875 mpt_recover_commands(mpt);
3876 }
3877 mpt->recovery_thread = NULL;
3878 wakeup(&mpt->recovery_thread);
3879 MPT_UNLOCK(mpt);
3880 kproc_exit(0);
3881 }
3882
3883 static int
mpt_scsi_send_tmf(struct mpt_softc * mpt,u_int type,u_int flags,u_int channel,target_id_t target,lun_id_t lun,u_int abort_ctx,int sleep_ok)3884 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3885 u_int channel, target_id_t target, lun_id_t lun, u_int abort_ctx,
3886 int sleep_ok)
3887 {
3888 MSG_SCSI_TASK_MGMT *tmf_req;
3889 int error;
3890
3891 /*
3892 * Wait for any current TMF request to complete.
3893 * We're only allowed to issue one TMF at a time.
3894 */
3895 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3896 sleep_ok, MPT_TMF_MAX_TIMEOUT);
3897 if (error != 0) {
3898 mpt_reset(mpt, TRUE);
3899 return (ETIMEDOUT);
3900 }
3901
3902 mpt_assign_serno(mpt, mpt->tmf_req);
3903 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3904
3905 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3906 memset(tmf_req, 0, sizeof(*tmf_req));
3907 tmf_req->TargetID = target;
3908 tmf_req->Bus = channel;
3909 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3910 tmf_req->TaskType = type;
3911 tmf_req->MsgFlags = flags;
3912 tmf_req->MsgContext =
3913 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3914 be64enc(tmf_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
3915 tmf_req->TaskMsgContext = abort_ctx;
3916
3917 mpt_lprt(mpt, MPT_PRT_DEBUG,
3918 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3919 mpt->tmf_req->serno, tmf_req->MsgContext);
3920 if (mpt->verbose > MPT_PRT_DEBUG) {
3921 mpt_print_request(tmf_req);
3922 }
3923
3924 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3925 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3926 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3927 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3928 if (error != MPT_OK) {
3929 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3930 mpt->tmf_req->state = REQ_STATE_FREE;
3931 mpt_reset(mpt, TRUE);
3932 }
3933 return (error);
3934 }
3935
3936 /*
3937 * When a command times out, it is placed on the requeust_timeout_list
3938 * and we wake our recovery thread. The MPT-Fusion architecture supports
3939 * only a single TMF operation at a time, so we serially abort/bdr, etc,
3940 * the timedout transactions. The next TMF is issued either by the
3941 * completion handler of the current TMF waking our recovery thread,
3942 * or the TMF timeout handler causing a hard reset sequence.
3943 */
3944 static void
mpt_recover_commands(struct mpt_softc * mpt)3945 mpt_recover_commands(struct mpt_softc *mpt)
3946 {
3947 request_t *req;
3948 union ccb *ccb;
3949 int error;
3950
3951 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3952 /*
3953 * No work to do- leave.
3954 */
3955 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
3956 return;
3957 }
3958
3959 /*
3960 * Flush any commands whose completion coincides with their timeout.
3961 */
3962 mpt_intr(mpt);
3963
3964 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3965 /*
3966 * The timedout commands have already
3967 * completed. This typically means
3968 * that either the timeout value was on
3969 * the hairy edge of what the device
3970 * requires or - more likely - interrupts
3971 * are not happening.
3972 */
3973 mpt_prt(mpt, "Timedout requests already complete. "
3974 "Interrupts may not be functioning.\n");
3975 mpt_enable_ints(mpt);
3976 return;
3977 }
3978
3979 /*
3980 * We have no visibility into the current state of the
3981 * controller, so attempt to abort the commands in the
3982 * order they timed-out. For initiator commands, we
3983 * depend on the reply handler pulling requests off
3984 * the timeout list.
3985 */
3986 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
3987 uint16_t status;
3988 uint8_t response;
3989 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
3990
3991 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
3992 req, req->serno, hdrp->Function);
3993 ccb = req->ccb;
3994 if (ccb == NULL) {
3995 mpt_prt(mpt, "null ccb in timed out request. "
3996 "Resetting Controller.\n");
3997 mpt_reset(mpt, TRUE);
3998 continue;
3999 }
4000 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4001
4002 /*
4003 * Check to see if this is not an initiator command and
4004 * deal with it differently if it is.
4005 */
4006 switch (hdrp->Function) {
4007 case MPI_FUNCTION_SCSI_IO_REQUEST:
4008 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4009 break;
4010 default:
4011 /*
4012 * XXX: FIX ME: need to abort target assists...
4013 */
4014 mpt_prt(mpt, "just putting it back on the pend q\n");
4015 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4016 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4017 links);
4018 continue;
4019 }
4020
4021 error = mpt_scsi_send_tmf(mpt,
4022 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4023 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4024 htole32(req->index | scsi_io_handler_id), TRUE);
4025
4026 if (error != 0) {
4027 /*
4028 * mpt_scsi_send_tmf hard resets on failure, so no
4029 * need to do so here. Our queue should be emptied
4030 * by the hard reset.
4031 */
4032 continue;
4033 }
4034
4035 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4036 REQ_STATE_DONE, TRUE, 500);
4037
4038 status = le16toh(mpt->tmf_req->IOCStatus);
4039 response = mpt->tmf_req->ResponseCode;
4040 mpt->tmf_req->state = REQ_STATE_FREE;
4041
4042 if (error != 0) {
4043 /*
4044 * If we've errored out,, reset the controller.
4045 */
4046 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4047 "Resetting controller\n");
4048 mpt_reset(mpt, TRUE);
4049 continue;
4050 }
4051
4052 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4053 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4054 "Resetting controller.\n", status);
4055 mpt_reset(mpt, TRUE);
4056 continue;
4057 }
4058
4059 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4060 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4061 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4062 "Resetting controller.\n", response);
4063 mpt_reset(mpt, TRUE);
4064 continue;
4065 }
4066 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4067 }
4068 }
4069
4070 /************************ Target Mode Support ****************************/
4071 static void
mpt_fc_post_els(struct mpt_softc * mpt,request_t * req,int ioindex)4072 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4073 {
4074 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4075 PTR_SGE_TRANSACTION32 tep;
4076 PTR_SGE_SIMPLE32 se;
4077 bus_addr_t paddr;
4078 uint32_t fl;
4079
4080 paddr = req->req_pbuf;
4081 paddr += MPT_RQSL(mpt);
4082
4083 fc = req->req_vbuf;
4084 memset(fc, 0, MPT_REQUEST_AREA);
4085 fc->BufferCount = 1;
4086 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4087 fc->MsgContext = htole32(req->index | fc_els_handler_id);
4088
4089 /*
4090 * Okay, set up ELS buffer pointers. ELS buffer pointers
4091 * consist of a TE SGL element (with details length of zero)
4092 * followed by a SIMPLE SGL element which holds the address
4093 * of the buffer.
4094 */
4095
4096 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4097
4098 tep->ContextSize = 4;
4099 tep->Flags = 0;
4100 tep->TransactionContext[0] = htole32(ioindex);
4101
4102 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4103 fl =
4104 MPI_SGE_FLAGS_HOST_TO_IOC |
4105 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4106 MPI_SGE_FLAGS_LAST_ELEMENT |
4107 MPI_SGE_FLAGS_END_OF_LIST |
4108 MPI_SGE_FLAGS_END_OF_BUFFER;
4109 fl <<= MPI_SGE_FLAGS_SHIFT;
4110 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4111 se->FlagsLength = htole32(fl);
4112 se->Address = htole32((uint32_t) paddr);
4113 mpt_lprt(mpt, MPT_PRT_DEBUG,
4114 "add ELS index %d ioindex %d for %p:%u\n",
4115 req->index, ioindex, req, req->serno);
4116 KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4117 ("mpt_fc_post_els: request not locked"));
4118 mpt_send_cmd(mpt, req);
4119 }
4120
4121 static void
mpt_post_target_command(struct mpt_softc * mpt,request_t * req,int ioindex)4122 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4123 {
4124 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4125 PTR_CMD_BUFFER_DESCRIPTOR cb;
4126 bus_addr_t paddr;
4127
4128 paddr = req->req_pbuf;
4129 paddr += MPT_RQSL(mpt);
4130 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4131 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4132
4133 fc = req->req_vbuf;
4134 fc->BufferCount = 1;
4135 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4136 fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX);
4137 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4138
4139 cb = &fc->Buffer[0];
4140 cb->IoIndex = htole16(ioindex);
4141 cb->u.PhysicalAddress32 = htole32((U32) paddr);
4142
4143 mpt_check_doorbell(mpt);
4144 mpt_send_cmd(mpt, req);
4145 }
4146
4147 static int
mpt_add_els_buffers(struct mpt_softc * mpt)4148 mpt_add_els_buffers(struct mpt_softc *mpt)
4149 {
4150 int i;
4151
4152 if (mpt->is_fc == 0) {
4153 return (TRUE);
4154 }
4155
4156 if (mpt->els_cmds_allocated) {
4157 return (TRUE);
4158 }
4159
4160 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *),
4161 M_DEVBUF, M_NOWAIT | M_ZERO);
4162
4163 if (mpt->els_cmd_ptrs == NULL) {
4164 return (FALSE);
4165 }
4166
4167 /*
4168 * Feed the chip some ELS buffer resources
4169 */
4170 for (i = 0; i < MPT_MAX_ELS; i++) {
4171 request_t *req = mpt_get_request(mpt, FALSE);
4172 if (req == NULL) {
4173 break;
4174 }
4175 req->state |= REQ_STATE_LOCKED;
4176 mpt->els_cmd_ptrs[i] = req;
4177 mpt_fc_post_els(mpt, req, i);
4178 }
4179
4180 if (i == 0) {
4181 mpt_prt(mpt, "unable to add ELS buffer resources\n");
4182 free(mpt->els_cmd_ptrs, M_DEVBUF);
4183 mpt->els_cmd_ptrs = NULL;
4184 return (FALSE);
4185 }
4186 if (i != MPT_MAX_ELS) {
4187 mpt_lprt(mpt, MPT_PRT_INFO,
4188 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
4189 }
4190 mpt->els_cmds_allocated = i;
4191 return(TRUE);
4192 }
4193
4194 static int
mpt_add_target_commands(struct mpt_softc * mpt)4195 mpt_add_target_commands(struct mpt_softc *mpt)
4196 {
4197 int i, max;
4198
4199 if (mpt->tgt_cmd_ptrs) {
4200 return (TRUE);
4201 }
4202
4203 max = MPT_MAX_REQUESTS(mpt) >> 1;
4204 if (max > mpt->mpt_max_tgtcmds) {
4205 max = mpt->mpt_max_tgtcmds;
4206 }
4207 mpt->tgt_cmd_ptrs =
4208 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4209 if (mpt->tgt_cmd_ptrs == NULL) {
4210 mpt_prt(mpt,
4211 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4212 return (FALSE);
4213 }
4214
4215 for (i = 0; i < max; i++) {
4216 request_t *req;
4217
4218 req = mpt_get_request(mpt, FALSE);
4219 if (req == NULL) {
4220 break;
4221 }
4222 req->state |= REQ_STATE_LOCKED;
4223 mpt->tgt_cmd_ptrs[i] = req;
4224 mpt_post_target_command(mpt, req, i);
4225 }
4226
4227 if (i == 0) {
4228 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4229 free(mpt->tgt_cmd_ptrs, M_DEVBUF);
4230 mpt->tgt_cmd_ptrs = NULL;
4231 return (FALSE);
4232 }
4233
4234 mpt->tgt_cmds_allocated = i;
4235
4236 if (i < max) {
4237 mpt_lprt(mpt, MPT_PRT_INFO,
4238 "added %d of %d target bufs\n", i, max);
4239 }
4240 return (i);
4241 }
4242
4243 static int
mpt_enable_lun(struct mpt_softc * mpt,target_id_t tgt,lun_id_t lun)4244 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4245 {
4246
4247 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4248 mpt->twildcard = 1;
4249 } else if (lun >= MPT_MAX_LUNS) {
4250 return (EINVAL);
4251 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4252 return (EINVAL);
4253 }
4254 if (mpt->tenabled == 0) {
4255 if (mpt->is_fc) {
4256 (void) mpt_fc_reset_link(mpt, 0);
4257 }
4258 mpt->tenabled = 1;
4259 }
4260 if (lun == CAM_LUN_WILDCARD) {
4261 mpt->trt_wildcard.enabled = 1;
4262 } else {
4263 mpt->trt[lun].enabled = 1;
4264 }
4265 return (0);
4266 }
4267
4268 static int
mpt_disable_lun(struct mpt_softc * mpt,target_id_t tgt,lun_id_t lun)4269 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4270 {
4271 int i;
4272
4273 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4274 mpt->twildcard = 0;
4275 } else if (lun >= MPT_MAX_LUNS) {
4276 return (EINVAL);
4277 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4278 return (EINVAL);
4279 }
4280 if (lun == CAM_LUN_WILDCARD) {
4281 mpt->trt_wildcard.enabled = 0;
4282 } else {
4283 mpt->trt[lun].enabled = 0;
4284 }
4285 for (i = 0; i < MPT_MAX_LUNS; i++) {
4286 if (mpt->trt[i].enabled) {
4287 break;
4288 }
4289 }
4290 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4291 if (mpt->is_fc) {
4292 (void) mpt_fc_reset_link(mpt, 0);
4293 }
4294 mpt->tenabled = 0;
4295 }
4296 return (0);
4297 }
4298
4299 /*
4300 * Called with MPT lock held
4301 */
4302 static void
mpt_target_start_io(struct mpt_softc * mpt,union ccb * ccb)4303 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4304 {
4305 struct ccb_scsiio *csio = &ccb->csio;
4306 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4307 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4308
4309 switch (tgt->state) {
4310 case TGT_STATE_IN_CAM:
4311 break;
4312 case TGT_STATE_MOVING_DATA:
4313 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4314 xpt_freeze_simq(mpt->sim, 1);
4315 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4316 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4317 xpt_done(ccb);
4318 return;
4319 default:
4320 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4321 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4322 mpt_tgt_dump_req_state(mpt, cmd_req);
4323 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4324 xpt_done(ccb);
4325 return;
4326 }
4327
4328 if (csio->dxfer_len) {
4329 bus_dmamap_callback_t *cb;
4330 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4331 request_t *req;
4332 int error;
4333
4334 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4335 ("dxfer_len %u but direction is NONE", csio->dxfer_len));
4336
4337 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4338 if (mpt->outofbeer == 0) {
4339 mpt->outofbeer = 1;
4340 xpt_freeze_simq(mpt->sim, 1);
4341 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4342 }
4343 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4344 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4345 xpt_done(ccb);
4346 return;
4347 }
4348 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4349 if (sizeof (bus_addr_t) > 4) {
4350 cb = mpt_execute_req_a64;
4351 } else {
4352 cb = mpt_execute_req;
4353 }
4354
4355 req->ccb = ccb;
4356 ccb->ccb_h.ccb_req_ptr = req;
4357
4358 /*
4359 * Record the currently active ccb and the
4360 * request for it in our target state area.
4361 */
4362 tgt->ccb = ccb;
4363 tgt->req = req;
4364
4365 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4366 ta = req->req_vbuf;
4367
4368 if (mpt->is_sas) {
4369 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4370 cmd_req->req_vbuf;
4371 ta->QueueTag = ssp->InitiatorTag;
4372 } else if (mpt->is_spi) {
4373 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4374 cmd_req->req_vbuf;
4375 ta->QueueTag = sp->Tag;
4376 }
4377 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4378 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4379 ta->ReplyWord = htole32(tgt->reply_desc);
4380 be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(csio->ccb_h.target_lun));
4381
4382 ta->RelativeOffset = tgt->bytes_xfered;
4383 ta->DataLength = ccb->csio.dxfer_len;
4384 if (ta->DataLength > tgt->resid) {
4385 ta->DataLength = tgt->resid;
4386 }
4387
4388 /*
4389 * XXX Should be done after data transfer completes?
4390 */
4391 csio->resid = csio->dxfer_len - ta->DataLength;
4392 tgt->resid -= csio->dxfer_len;
4393 tgt->bytes_xfered += csio->dxfer_len;
4394
4395 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4396 ta->TargetAssistFlags |=
4397 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4398 }
4399
4400 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4401 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4402 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4403 ta->TargetAssistFlags |=
4404 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4405 }
4406 #endif
4407 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4408
4409 mpt_lprt(mpt, MPT_PRT_DEBUG,
4410 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4411 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4412 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4413
4414 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb,
4415 cb, req, 0);
4416 if (error == EINPROGRESS) {
4417 xpt_freeze_simq(mpt->sim, 1);
4418 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4419 }
4420 } else {
4421 /*
4422 * XXX: I don't know why this seems to happen, but
4423 * XXX: completing the CCB seems to make things happy.
4424 * XXX: This seems to happen if the initiator requests
4425 * XXX: enough data that we have to do multiple CTIOs.
4426 */
4427 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4428 mpt_lprt(mpt, MPT_PRT_DEBUG,
4429 "Meaningless STATUS CCB (%p): flags %x status %x "
4430 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4431 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4432 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4433 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4434 xpt_done(ccb);
4435 return;
4436 }
4437 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status,
4438 (void *)&csio->sense_data,
4439 (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
4440 csio->sense_len : 0);
4441 }
4442 }
4443
4444 static void
mpt_scsi_tgt_local(struct mpt_softc * mpt,request_t * cmd_req,lun_id_t lun,int send,uint8_t * data,size_t length)4445 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4446 lun_id_t lun, int send, uint8_t *data, size_t length)
4447 {
4448 mpt_tgt_state_t *tgt;
4449 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4450 SGE_SIMPLE32 *se;
4451 uint32_t flags;
4452 uint8_t *dptr;
4453 bus_addr_t pptr;
4454 request_t *req;
4455
4456 /*
4457 * We enter with resid set to the data load for the command.
4458 */
4459 tgt = MPT_TGT_STATE(mpt, cmd_req);
4460 if (length == 0 || tgt->resid == 0) {
4461 tgt->resid = 0;
4462 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0);
4463 return;
4464 }
4465
4466 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4467 mpt_prt(mpt, "out of resources- dropping local response\n");
4468 return;
4469 }
4470 tgt->is_local = 1;
4471
4472 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4473 ta = req->req_vbuf;
4474
4475 if (mpt->is_sas) {
4476 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4477 ta->QueueTag = ssp->InitiatorTag;
4478 } else if (mpt->is_spi) {
4479 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4480 ta->QueueTag = sp->Tag;
4481 }
4482 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4483 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4484 ta->ReplyWord = htole32(tgt->reply_desc);
4485 be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun));
4486 ta->RelativeOffset = 0;
4487 ta->DataLength = length;
4488
4489 dptr = req->req_vbuf;
4490 dptr += MPT_RQSL(mpt);
4491 pptr = req->req_pbuf;
4492 pptr += MPT_RQSL(mpt);
4493 memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4494
4495 se = (SGE_SIMPLE32 *) &ta->SGL[0];
4496 memset(se, 0,sizeof (*se));
4497
4498 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4499 if (send) {
4500 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4501 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4502 }
4503 se->Address = pptr;
4504 MPI_pSGE_SET_LENGTH(se, length);
4505 flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4506 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4507 MPI_pSGE_SET_FLAGS(se, flags);
4508
4509 tgt->ccb = NULL;
4510 tgt->req = req;
4511 tgt->resid -= length;
4512 tgt->bytes_xfered = length;
4513 #ifdef WE_TRUST_AUTO_GOOD_STATUS
4514 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4515 #else
4516 tgt->state = TGT_STATE_MOVING_DATA;
4517 #endif
4518 mpt_send_cmd(mpt, req);
4519 }
4520
4521 /*
4522 * Abort queued up CCBs
4523 */
4524 static cam_status
mpt_abort_target_ccb(struct mpt_softc * mpt,union ccb * ccb)4525 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4526 {
4527 struct mpt_hdr_stailq *lp;
4528 struct ccb_hdr *srch;
4529 union ccb *accb = ccb->cab.abort_ccb;
4530 tgt_resource_t *trtp;
4531 mpt_tgt_state_t *tgt;
4532 request_t *req;
4533 uint32_t tag;
4534
4535 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4536 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD)
4537 trtp = &mpt->trt_wildcard;
4538 else
4539 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4540 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4541 lp = &trtp->atios;
4542 tag = accb->atio.tag_id;
4543 } else {
4544 lp = &trtp->inots;
4545 tag = accb->cin1.tag_id;
4546 }
4547
4548 /* Search the CCB among queued. */
4549 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4550 if (srch != &accb->ccb_h)
4551 continue;
4552 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4553 accb->ccb_h.status = CAM_REQ_ABORTED;
4554 xpt_done(accb);
4555 return (CAM_REQ_CMP);
4556 }
4557
4558 /* Search the CCB among running. */
4559 req = MPT_TAG_2_REQ(mpt, tag);
4560 tgt = MPT_TGT_STATE(mpt, req);
4561 if (tgt->tag_id == tag) {
4562 mpt_abort_target_cmd(mpt, req);
4563 return (CAM_REQ_CMP);
4564 }
4565
4566 return (CAM_UA_ABORT);
4567 }
4568
4569 /*
4570 * Ask the MPT to abort the current target command
4571 */
4572 static int
mpt_abort_target_cmd(struct mpt_softc * mpt,request_t * cmd_req)4573 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4574 {
4575 int error;
4576 request_t *req;
4577 PTR_MSG_TARGET_MODE_ABORT abtp;
4578
4579 req = mpt_get_request(mpt, FALSE);
4580 if (req == NULL) {
4581 return (-1);
4582 }
4583 abtp = req->req_vbuf;
4584 memset(abtp, 0, sizeof (*abtp));
4585
4586 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4587 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4588 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4589 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4590 error = 0;
4591 if (mpt->is_fc || mpt->is_sas) {
4592 mpt_send_cmd(mpt, req);
4593 } else {
4594 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4595 }
4596 return (error);
4597 }
4598
4599 /*
4600 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4601 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4602 * FC929 to set bogus FC_RSP fields (nonzero residuals
4603 * but w/o RESID fields set). This causes QLogic initiators
4604 * to think maybe that a frame was lost.
4605 *
4606 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4607 * we use allocated requests to do TARGET_ASSIST and we
4608 * need to know when to release them.
4609 */
4610
4611 static void
mpt_scsi_tgt_status(struct mpt_softc * mpt,union ccb * ccb,request_t * cmd_req,uint8_t status,uint8_t const * sense_data,u_int sense_len)4612 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4613 uint8_t status, uint8_t const *sense_data, u_int sense_len)
4614 {
4615 uint8_t *cmd_vbuf;
4616 mpt_tgt_state_t *tgt;
4617 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4618 request_t *req;
4619 bus_addr_t paddr;
4620 int resplen = 0;
4621 uint32_t fl;
4622
4623 cmd_vbuf = cmd_req->req_vbuf;
4624 cmd_vbuf += MPT_RQSL(mpt);
4625 tgt = MPT_TGT_STATE(mpt, cmd_req);
4626
4627 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4628 if (mpt->outofbeer == 0) {
4629 mpt->outofbeer = 1;
4630 xpt_freeze_simq(mpt->sim, 1);
4631 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4632 }
4633 if (ccb) {
4634 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4635 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4636 xpt_done(ccb);
4637 } else {
4638 mpt_prt(mpt,
4639 "could not allocate status request- dropping\n");
4640 }
4641 return;
4642 }
4643 req->ccb = ccb;
4644 if (ccb) {
4645 ccb->ccb_h.ccb_mpt_ptr = mpt;
4646 ccb->ccb_h.ccb_req_ptr = req;
4647 }
4648
4649 /*
4650 * Record the currently active ccb, if any, and the
4651 * request for it in our target state area.
4652 */
4653 tgt->ccb = ccb;
4654 tgt->req = req;
4655 tgt->state = TGT_STATE_SENDING_STATUS;
4656
4657 tp = req->req_vbuf;
4658 paddr = req->req_pbuf;
4659 paddr += MPT_RQSL(mpt);
4660
4661 memset(tp, 0, sizeof (*tp));
4662 tp->StatusCode = status;
4663 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4664 if (mpt->is_fc) {
4665 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4666 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4667 uint8_t *sts_vbuf;
4668 uint32_t *rsp;
4669
4670 sts_vbuf = req->req_vbuf;
4671 sts_vbuf += MPT_RQSL(mpt);
4672 rsp = (uint32_t *) sts_vbuf;
4673 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4674
4675 /*
4676 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4677 * It has to be big-endian in memory and is organized
4678 * in 32 bit words, which are much easier to deal with
4679 * as words which are swizzled as needed.
4680 *
4681 * All we're filling here is the FC_RSP payload.
4682 * We may just have the chip synthesize it if
4683 * we have no residual and an OK status.
4684 *
4685 */
4686 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4687
4688 rsp[2] = htobe32(status);
4689 #define MIN_FCP_RESPONSE_SIZE 24
4690 #ifndef WE_TRUST_AUTO_GOOD_STATUS
4691 resplen = MIN_FCP_RESPONSE_SIZE;
4692 #endif
4693 if (tgt->resid < 0) {
4694 rsp[2] |= htobe32(0x400); /* XXXX NEED MNEMONIC!!!! */
4695 rsp[3] = htobe32(-tgt->resid);
4696 resplen = MIN_FCP_RESPONSE_SIZE;
4697 } else if (tgt->resid > 0) {
4698 rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */
4699 rsp[3] = htobe32(tgt->resid);
4700 resplen = MIN_FCP_RESPONSE_SIZE;
4701 }
4702 if (sense_len > 0) {
4703 rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */
4704 rsp[4] = htobe32(sense_len);
4705 memcpy(&rsp[6], sense_data, sense_len);
4706 resplen = MIN_FCP_RESPONSE_SIZE + sense_len;
4707 }
4708 } else if (mpt->is_sas) {
4709 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4710 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4711 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4712 } else {
4713 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4714 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4715 tp->QueueTag = htole16(sp->Tag);
4716 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4717 }
4718
4719 tp->ReplyWord = htole32(tgt->reply_desc);
4720 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4721
4722 #ifdef WE_CAN_USE_AUTO_REPOST
4723 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4724 #endif
4725 if (status == SCSI_STATUS_OK && resplen == 0) {
4726 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4727 } else {
4728 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4729 fl = MPI_SGE_FLAGS_HOST_TO_IOC |
4730 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4731 MPI_SGE_FLAGS_LAST_ELEMENT |
4732 MPI_SGE_FLAGS_END_OF_LIST |
4733 MPI_SGE_FLAGS_END_OF_BUFFER;
4734 fl <<= MPI_SGE_FLAGS_SHIFT;
4735 fl |= resplen;
4736 tp->StatusDataSGE.FlagsLength = htole32(fl);
4737 }
4738
4739 mpt_lprt(mpt, MPT_PRT_DEBUG,
4740 "STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n",
4741 ccb, sense_len > 0 ? "" : "out", tgt->tag_id,
4742 req, req->serno, tgt->resid);
4743 if (mpt->verbose > MPT_PRT_DEBUG)
4744 mpt_print_request(req->req_vbuf);
4745 if (ccb) {
4746 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4747 mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb);
4748 }
4749 mpt_send_cmd(mpt, req);
4750 }
4751
4752 static void
mpt_scsi_tgt_tsk_mgmt(struct mpt_softc * mpt,request_t * req,mpt_task_mgmt_t fc,tgt_resource_t * trtp,int init_id)4753 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4754 tgt_resource_t *trtp, int init_id)
4755 {
4756 struct ccb_immediate_notify *inot;
4757 mpt_tgt_state_t *tgt;
4758
4759 tgt = MPT_TGT_STATE(mpt, req);
4760 inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots);
4761 if (inot == NULL) {
4762 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4763 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0);
4764 return;
4765 }
4766 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4767 mpt_lprt(mpt, MPT_PRT_DEBUG1,
4768 "Get FREE INOT %p lun %jx\n", inot,
4769 (uintmax_t)inot->ccb_h.target_lun);
4770
4771 inot->initiator_id = init_id; /* XXX */
4772 inot->tag_id = tgt->tag_id;
4773 inot->seq_id = 0;
4774 /*
4775 * This is a somewhat grotesque attempt to map from task management
4776 * to old style SCSI messages. God help us all.
4777 */
4778 switch (fc) {
4779 case MPT_QUERY_TASK_SET:
4780 inot->arg = MSG_QUERY_TASK_SET;
4781 break;
4782 case MPT_ABORT_TASK_SET:
4783 inot->arg = MSG_ABORT_TASK_SET;
4784 break;
4785 case MPT_CLEAR_TASK_SET:
4786 inot->arg = MSG_CLEAR_TASK_SET;
4787 break;
4788 case MPT_QUERY_ASYNC_EVENT:
4789 inot->arg = MSG_QUERY_ASYNC_EVENT;
4790 break;
4791 case MPT_LOGICAL_UNIT_RESET:
4792 inot->arg = MSG_LOGICAL_UNIT_RESET;
4793 break;
4794 case MPT_TARGET_RESET:
4795 inot->arg = MSG_TARGET_RESET;
4796 break;
4797 case MPT_CLEAR_ACA:
4798 inot->arg = MSG_CLEAR_ACA;
4799 break;
4800 default:
4801 inot->arg = MSG_NOOP;
4802 break;
4803 }
4804 tgt->ccb = (union ccb *) inot;
4805 inot->ccb_h.status = CAM_MESSAGE_RECV;
4806 xpt_done((union ccb *)inot);
4807 }
4808
4809 static void
mpt_scsi_tgt_atio(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc)4810 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4811 {
4812 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4813 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4814 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
4815 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
4816 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
4817 '0', '0', '0', '1'
4818 };
4819 struct ccb_accept_tio *atiop;
4820 lun_id_t lun;
4821 int tag_action = 0;
4822 mpt_tgt_state_t *tgt;
4823 tgt_resource_t *trtp = NULL;
4824 U8 *lunptr;
4825 U8 *vbuf;
4826 U16 ioindex;
4827 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4828 uint8_t *cdbp;
4829
4830 /*
4831 * Stash info for the current command where we can get at it later.
4832 */
4833 vbuf = req->req_vbuf;
4834 vbuf += MPT_RQSL(mpt);
4835 if (mpt->verbose >= MPT_PRT_DEBUG) {
4836 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4837 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4838 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4839 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4840 }
4841
4842 /*
4843 * Get our state pointer set up.
4844 */
4845 tgt = MPT_TGT_STATE(mpt, req);
4846 if (tgt->state != TGT_STATE_LOADED) {
4847 mpt_tgt_dump_req_state(mpt, req);
4848 panic("bad target state in mpt_scsi_tgt_atio");
4849 }
4850 memset(tgt, 0, sizeof (mpt_tgt_state_t));
4851 tgt->state = TGT_STATE_IN_CAM;
4852 tgt->reply_desc = reply_desc;
4853 ioindex = GET_IO_INDEX(reply_desc);
4854
4855 /*
4856 * The tag we construct here allows us to find the
4857 * original request that the command came in with.
4858 *
4859 * This way we don't have to depend on anything but the
4860 * tag to find things when CCBs show back up from CAM.
4861 */
4862 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
4863
4864 if (mpt->is_fc) {
4865 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4866 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4867 if (fc->FcpCntl[2]) {
4868 /*
4869 * Task Management Request
4870 */
4871 switch (fc->FcpCntl[2]) {
4872 case 0x1:
4873 fct = MPT_QUERY_TASK_SET;
4874 break;
4875 case 0x2:
4876 fct = MPT_ABORT_TASK_SET;
4877 break;
4878 case 0x4:
4879 fct = MPT_CLEAR_TASK_SET;
4880 break;
4881 case 0x8:
4882 fct = MPT_QUERY_ASYNC_EVENT;
4883 break;
4884 case 0x10:
4885 fct = MPT_LOGICAL_UNIT_RESET;
4886 break;
4887 case 0x20:
4888 fct = MPT_TARGET_RESET;
4889 break;
4890 case 0x40:
4891 fct = MPT_CLEAR_ACA;
4892 break;
4893 default:
4894 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4895 fc->FcpCntl[2]);
4896 mpt_scsi_tgt_status(mpt, NULL, req,
4897 SCSI_STATUS_OK, NULL, 0);
4898 return;
4899 }
4900 } else {
4901 switch (fc->FcpCntl[1]) {
4902 case 0:
4903 tag_action = MSG_SIMPLE_Q_TAG;
4904 break;
4905 case 1:
4906 tag_action = MSG_HEAD_OF_Q_TAG;
4907 break;
4908 case 2:
4909 tag_action = MSG_ORDERED_Q_TAG;
4910 break;
4911 default:
4912 /*
4913 * Bah. Ignore Untagged Queing and ACA
4914 */
4915 tag_action = MSG_SIMPLE_Q_TAG;
4916 break;
4917 }
4918 }
4919 tgt->resid = be32toh(fc->FcpDl);
4920 cdbp = fc->FcpCdb;
4921 lunptr = fc->FcpLun;
4922 tgt->itag = fc->OptionalOxid;
4923 } else if (mpt->is_sas) {
4924 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
4925 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
4926 cdbp = ssp->CDB;
4927 lunptr = ssp->LogicalUnitNumber;
4928 tgt->itag = ssp->InitiatorTag;
4929 } else {
4930 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
4931 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
4932 cdbp = sp->CDB;
4933 lunptr = sp->LogicalUnitNumber;
4934 tgt->itag = sp->Tag;
4935 }
4936
4937 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(lunptr));
4938
4939 /*
4940 * Deal with non-enabled or bad luns here.
4941 */
4942 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
4943 mpt->trt[lun].enabled == 0) {
4944 if (mpt->twildcard) {
4945 trtp = &mpt->trt_wildcard;
4946 } else if (fct == MPT_NIL_TMT_VALUE) {
4947 /*
4948 * In this case, we haven't got an upstream listener
4949 * for either a specific lun or wildcard luns. We
4950 * have to make some sensible response. For regular
4951 * inquiry, just return some NOT HERE inquiry data.
4952 * For VPD inquiry, report illegal field in cdb.
4953 * For REQUEST SENSE, just return NO SENSE data.
4954 * REPORT LUNS gets illegal command.
4955 * All other commands get 'no such device'.
4956 */
4957 uint8_t sense[MPT_SENSE_SIZE];
4958 size_t len;
4959
4960 memset(sense, 0, sizeof(sense));
4961 sense[0] = 0xf0;
4962 sense[2] = 0x5;
4963 sense[7] = 0x8;
4964
4965 switch (cdbp[0]) {
4966 case INQUIRY:
4967 {
4968 if (cdbp[1] != 0) {
4969 sense[12] = 0x26;
4970 sense[13] = 0x01;
4971 break;
4972 }
4973 len = min(tgt->resid, cdbp[4]);
4974 len = min(len, sizeof (null_iqd));
4975 mpt_lprt(mpt, MPT_PRT_DEBUG,
4976 "local inquiry %ld bytes\n", (long) len);
4977 mpt_scsi_tgt_local(mpt, req, lun, 1,
4978 null_iqd, len);
4979 return;
4980 }
4981 case REQUEST_SENSE:
4982 {
4983 sense[2] = 0x0;
4984 len = min(tgt->resid, cdbp[4]);
4985 len = min(len, sizeof (sense));
4986 mpt_lprt(mpt, MPT_PRT_DEBUG,
4987 "local reqsense %ld bytes\n", (long) len);
4988 mpt_scsi_tgt_local(mpt, req, lun, 1,
4989 sense, len);
4990 return;
4991 }
4992 case REPORT_LUNS:
4993 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
4994 sense[12] = 0x26;
4995 return;
4996 default:
4997 mpt_lprt(mpt, MPT_PRT_DEBUG,
4998 "CMD 0x%x to unmanaged lun %jx\n",
4999 cdbp[0], (uintmax_t)lun);
5000 sense[12] = 0x25;
5001 break;
5002 }
5003 mpt_scsi_tgt_status(mpt, NULL, req,
5004 SCSI_STATUS_CHECK_COND, sense, sizeof(sense));
5005 return;
5006 }
5007 /* otherwise, leave trtp NULL */
5008 } else {
5009 trtp = &mpt->trt[lun];
5010 }
5011
5012 /*
5013 * Deal with any task management
5014 */
5015 if (fct != MPT_NIL_TMT_VALUE) {
5016 if (trtp == NULL) {
5017 mpt_prt(mpt, "task mgmt function %x but no listener\n",
5018 fct);
5019 mpt_scsi_tgt_status(mpt, NULL, req,
5020 SCSI_STATUS_OK, NULL, 0);
5021 } else {
5022 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5023 GET_INITIATOR_INDEX(reply_desc));
5024 }
5025 return;
5026 }
5027
5028 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5029 if (atiop == NULL) {
5030 mpt_lprt(mpt, MPT_PRT_WARN,
5031 "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun,
5032 mpt->tenabled? "QUEUE FULL" : "BUSY");
5033 mpt_scsi_tgt_status(mpt, NULL, req,
5034 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5035 NULL, 0);
5036 return;
5037 }
5038 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5039 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5040 "Get FREE ATIO %p lun %jx\n", atiop,
5041 (uintmax_t)atiop->ccb_h.target_lun);
5042 atiop->ccb_h.ccb_mpt_ptr = mpt;
5043 atiop->ccb_h.status = CAM_CDB_RECVD;
5044 atiop->ccb_h.target_lun = lun;
5045 atiop->sense_len = 0;
5046 atiop->tag_id = tgt->tag_id;
5047 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5048 atiop->cdb_len = 16;
5049 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5050 if (tag_action) {
5051 atiop->tag_action = tag_action;
5052 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
5053 }
5054 if (mpt->verbose >= MPT_PRT_DEBUG) {
5055 int i;
5056 mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop,
5057 (uintmax_t)atiop->ccb_h.target_lun);
5058 for (i = 0; i < atiop->cdb_len; i++) {
5059 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5060 (i == (atiop->cdb_len - 1))? '>' : ' ');
5061 }
5062 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5063 tgt->itag, tgt->tag_id, tgt->reply_desc, tgt->resid);
5064 }
5065
5066 xpt_done((union ccb *)atiop);
5067 }
5068
5069 static void
mpt_tgt_dump_tgt_state(struct mpt_softc * mpt,request_t * req)5070 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5071 {
5072 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5073
5074 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5075 "nx %d tag 0x%08x itag 0x%04x state=%d\n", req, req->serno,
5076 tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb,
5077 tgt->req, tgt->nxfers, tgt->tag_id, tgt->itag, tgt->state);
5078 }
5079
5080 static void
mpt_tgt_dump_req_state(struct mpt_softc * mpt,request_t * req)5081 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5082 {
5083
5084 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5085 req->index, req->index, req->state);
5086 mpt_tgt_dump_tgt_state(mpt, req);
5087 }
5088
5089 static int
mpt_scsi_tgt_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)5090 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5091 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5092 {
5093 int dbg;
5094 union ccb *ccb;
5095 U16 status;
5096
5097 if (reply_frame == NULL) {
5098 /*
5099 * Figure out what the state of the command is.
5100 */
5101 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5102
5103 #ifdef INVARIANTS
5104 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5105 if (tgt->req) {
5106 mpt_req_not_spcl(mpt, tgt->req,
5107 "turbo scsi_tgt_reply associated req", __LINE__);
5108 }
5109 #endif
5110 switch(tgt->state) {
5111 case TGT_STATE_LOADED:
5112 /*
5113 * This is a new command starting.
5114 */
5115 mpt_scsi_tgt_atio(mpt, req, reply_desc);
5116 break;
5117 case TGT_STATE_MOVING_DATA:
5118 {
5119 ccb = tgt->ccb;
5120 if (tgt->req == NULL) {
5121 panic("mpt: turbo target reply with null "
5122 "associated request moving data");
5123 /* NOTREACHED */
5124 }
5125 if (ccb == NULL) {
5126 if (tgt->is_local == 0) {
5127 panic("mpt: turbo target reply with "
5128 "null associated ccb moving data");
5129 /* NOTREACHED */
5130 }
5131 mpt_lprt(mpt, MPT_PRT_DEBUG,
5132 "TARGET_ASSIST local done\n");
5133 TAILQ_REMOVE(&mpt->request_pending_list,
5134 tgt->req, links);
5135 mpt_free_request(mpt, tgt->req);
5136 tgt->req = NULL;
5137 mpt_scsi_tgt_status(mpt, NULL, req,
5138 0, NULL, 0);
5139 return (TRUE);
5140 }
5141 tgt->ccb = NULL;
5142 tgt->nxfers++;
5143 mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5144 mpt_lprt(mpt, MPT_PRT_DEBUG,
5145 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5146 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5147 /*
5148 * Free the Target Assist Request
5149 */
5150 KASSERT(tgt->req->ccb == ccb,
5151 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5152 tgt->req->serno, tgt->req->ccb));
5153 TAILQ_REMOVE(&mpt->request_pending_list,
5154 tgt->req, links);
5155 mpt_free_request(mpt, tgt->req);
5156 tgt->req = NULL;
5157
5158 /*
5159 * Do we need to send status now? That is, are
5160 * we done with all our data transfers?
5161 */
5162 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5163 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5164 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5165 KASSERT(ccb->ccb_h.status,
5166 ("zero ccb sts at %d", __LINE__));
5167 tgt->state = TGT_STATE_IN_CAM;
5168 if (mpt->outofbeer) {
5169 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5170 mpt->outofbeer = 0;
5171 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5172 }
5173 xpt_done(ccb);
5174 break;
5175 }
5176 /*
5177 * Otherwise, send status (and sense)
5178 */
5179 mpt_scsi_tgt_status(mpt, ccb, req,
5180 ccb->csio.scsi_status,
5181 (void *)&ccb->csio.sense_data,
5182 (ccb->ccb_h.flags & CAM_SEND_SENSE) ?
5183 ccb->csio.sense_len : 0);
5184 break;
5185 }
5186 case TGT_STATE_SENDING_STATUS:
5187 case TGT_STATE_MOVING_DATA_AND_STATUS:
5188 {
5189 int ioindex;
5190 ccb = tgt->ccb;
5191
5192 if (tgt->req == NULL) {
5193 panic("mpt: turbo target reply with null "
5194 "associated request sending status");
5195 /* NOTREACHED */
5196 }
5197
5198 if (ccb) {
5199 tgt->ccb = NULL;
5200 if (tgt->state ==
5201 TGT_STATE_MOVING_DATA_AND_STATUS) {
5202 tgt->nxfers++;
5203 }
5204 mpt_req_untimeout(tgt->req, mpt_timeout, ccb);
5205 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5206 ccb->ccb_h.status |= CAM_SENT_SENSE;
5207 }
5208 mpt_lprt(mpt, MPT_PRT_DEBUG,
5209 "TARGET_STATUS tag %x sts %x flgs %x req "
5210 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5211 ccb->ccb_h.flags, tgt->req);
5212 /*
5213 * Free the Target Send Status Request
5214 */
5215 KASSERT(tgt->req->ccb == ccb,
5216 ("tgt->req %p:%u tgt->req->ccb %p",
5217 tgt->req, tgt->req->serno, tgt->req->ccb));
5218 /*
5219 * Notify CAM that we're done
5220 */
5221 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5222 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5223 KASSERT(ccb->ccb_h.status,
5224 ("ZERO ccb sts at %d", __LINE__));
5225 tgt->ccb = NULL;
5226 } else {
5227 mpt_lprt(mpt, MPT_PRT_DEBUG,
5228 "TARGET_STATUS non-CAM for req %p:%u\n",
5229 tgt->req, tgt->req->serno);
5230 }
5231 TAILQ_REMOVE(&mpt->request_pending_list,
5232 tgt->req, links);
5233 mpt_free_request(mpt, tgt->req);
5234 tgt->req = NULL;
5235
5236 /*
5237 * And re-post the Command Buffer.
5238 * This will reset the state.
5239 */
5240 ioindex = GET_IO_INDEX(reply_desc);
5241 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5242 tgt->is_local = 0;
5243 mpt_post_target_command(mpt, req, ioindex);
5244
5245 /*
5246 * And post a done for anyone who cares
5247 */
5248 if (ccb) {
5249 if (mpt->outofbeer) {
5250 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5251 mpt->outofbeer = 0;
5252 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5253 }
5254 xpt_done(ccb);
5255 }
5256 break;
5257 }
5258 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
5259 tgt->state = TGT_STATE_LOADED;
5260 break;
5261 default:
5262 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5263 "Reply Function\n", tgt->state);
5264 }
5265 return (TRUE);
5266 }
5267
5268 status = le16toh(reply_frame->IOCStatus);
5269 if (status != MPI_IOCSTATUS_SUCCESS) {
5270 dbg = MPT_PRT_ERROR;
5271 } else {
5272 dbg = MPT_PRT_DEBUG1;
5273 }
5274
5275 mpt_lprt(mpt, dbg,
5276 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5277 req, req->serno, reply_frame, reply_frame->Function, status);
5278
5279 switch (reply_frame->Function) {
5280 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5281 {
5282 mpt_tgt_state_t *tgt;
5283 #ifdef INVARIANTS
5284 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5285 #endif
5286 if (status != MPI_IOCSTATUS_SUCCESS) {
5287 /*
5288 * XXX What to do?
5289 */
5290 break;
5291 }
5292 tgt = MPT_TGT_STATE(mpt, req);
5293 KASSERT(tgt->state == TGT_STATE_LOADING,
5294 ("bad state 0x%x on reply to buffer post", tgt->state));
5295 mpt_assign_serno(mpt, req);
5296 tgt->state = TGT_STATE_LOADED;
5297 break;
5298 }
5299 case MPI_FUNCTION_TARGET_ASSIST:
5300 #ifdef INVARIANTS
5301 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5302 #endif
5303 mpt_prt(mpt, "target assist completion\n");
5304 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5305 mpt_free_request(mpt, req);
5306 break;
5307 case MPI_FUNCTION_TARGET_STATUS_SEND:
5308 #ifdef INVARIANTS
5309 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5310 #endif
5311 mpt_prt(mpt, "status send completion\n");
5312 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5313 mpt_free_request(mpt, req);
5314 break;
5315 case MPI_FUNCTION_TARGET_MODE_ABORT:
5316 {
5317 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5318 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5319 PTR_MSG_TARGET_MODE_ABORT abtp =
5320 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5321 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5322 #ifdef INVARIANTS
5323 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5324 #endif
5325 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5326 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5327 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5328 mpt_free_request(mpt, req);
5329 break;
5330 }
5331 default:
5332 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5333 "0x%x\n", reply_frame->Function);
5334 break;
5335 }
5336 return (TRUE);
5337 }
5338