1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-3-Clause 6 * 7 * Copyright (c) 2000, 2001 by Greg Ansley 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice immediately at the beginning of the file, without modification, 14 * this list of conditions, and the following disclaimer. 15 * 2. The name of the author may not be used to endorse or promote products 16 * derived from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 /*- 31 * Copyright (c) 2002, 2006 by Matthew Jacob 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions are 36 * met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 40 * substantially similar to the "NO WARRANTY" disclaimer below 41 * ("Disclaimer") and any redistribution must be conditioned upon including 42 * a substantially similar Disclaimer requirement for further binary 43 * redistribution. 44 * 3. Neither the names of the above listed copyright holders nor the names 45 * of any contributors may be used to endorse or promote products derived 46 * from this software without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 49 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 52 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 53 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 54 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 55 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 56 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 58 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 59 * 60 * Support from Chris Ellsworth in order to make SAS adapters work 61 * is gratefully acknowledged. 62 * 63 * Support from LSI-Logic has also gone a great deal toward making this a 64 * workable subsystem and is gratefully acknowledged. 65 */ 66 /*- 67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 68 * Copyright (c) 2005, WHEEL Sp. z o.o. 69 * Copyright (c) 2004, 2005 Justin T. Gibbs 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions are 74 * met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 78 * substantially similar to the "NO WARRANTY" disclaimer below 79 * ("Disclaimer") and any redistribution must be conditioned upon including 80 * a substantially similar Disclaimer requirement for further binary 81 * redistribution. 82 * 3. Neither the names of the above listed copyright holders nor the names 83 * of any contributors may be used to endorse or promote products derived 84 * from this software without specific prior written permission. 85 * 86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 97 */ 98 #include <sys/cdefs.h> 99 __FBSDID("$FreeBSD$"); 100 101 #include <dev/mpt/mpt.h> 102 #include <dev/mpt/mpt_cam.h> 103 #include <dev/mpt/mpt_raid.h> 104 105 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 106 #include "dev/mpt/mpilib/mpi_init.h" 107 #include "dev/mpt/mpilib/mpi_targ.h" 108 #include "dev/mpt/mpilib/mpi_fc.h" 109 #include "dev/mpt/mpilib/mpi_sas.h" 110 111 #include <sys/callout.h> 112 #include <sys/kthread.h> 113 #include <sys/sysctl.h> 114 115 static void mpt_poll(struct cam_sim *); 116 static callout_func_t mpt_timeout; 117 static void mpt_action(struct cam_sim *, union ccb *); 118 static int 119 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 120 static void mpt_setwidth(struct mpt_softc *, int, int); 121 static void mpt_setsync(struct mpt_softc *, int, int, int); 122 static int mpt_update_spi_config(struct mpt_softc *, int); 123 124 static mpt_reply_handler_t mpt_scsi_reply_handler; 125 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 126 static mpt_reply_handler_t mpt_fc_els_reply_handler; 127 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 128 MSG_DEFAULT_REPLY *); 129 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 130 static int mpt_fc_reset_link(struct mpt_softc *, int); 131 132 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 133 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 134 static void mpt_recovery_thread(void *arg); 135 static void mpt_recover_commands(struct mpt_softc *mpt); 136 137 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 138 target_id_t, lun_id_t, u_int, int); 139 140 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 141 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 142 static int mpt_add_els_buffers(struct mpt_softc *mpt); 143 static int mpt_add_target_commands(struct mpt_softc *mpt); 144 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 145 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 146 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 147 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 148 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 149 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 150 uint8_t, uint8_t const *, u_int); 151 static void 152 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 153 tgt_resource_t *, int); 154 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 155 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 156 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 157 static mpt_reply_handler_t mpt_sata_pass_reply_handler; 158 159 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 160 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 161 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 162 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 163 164 static mpt_probe_handler_t mpt_cam_probe; 165 static mpt_attach_handler_t mpt_cam_attach; 166 static mpt_enable_handler_t mpt_cam_enable; 167 static mpt_ready_handler_t mpt_cam_ready; 168 static mpt_event_handler_t mpt_cam_event; 169 static mpt_reset_handler_t mpt_cam_ioc_reset; 170 static mpt_detach_handler_t mpt_cam_detach; 171 172 static struct mpt_personality mpt_cam_personality = 173 { 174 .name = "mpt_cam", 175 .probe = mpt_cam_probe, 176 .attach = mpt_cam_attach, 177 .enable = mpt_cam_enable, 178 .ready = mpt_cam_ready, 179 .event = mpt_cam_event, 180 .reset = mpt_cam_ioc_reset, 181 .detach = mpt_cam_detach, 182 }; 183 184 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 185 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 186 187 int mpt_enable_sata_wc = -1; 188 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 189 190 static int 191 mpt_cam_probe(struct mpt_softc *mpt) 192 { 193 int role; 194 195 /* 196 * Only attach to nodes that support the initiator or target role 197 * (or want to) or have RAID physical devices that need CAM pass-thru 198 * support. 199 */ 200 if (mpt->do_cfg_role) { 201 role = mpt->cfg_role; 202 } else { 203 role = mpt->role; 204 } 205 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 206 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 207 return (0); 208 } 209 return (ENODEV); 210 } 211 212 static int 213 mpt_cam_attach(struct mpt_softc *mpt) 214 { 215 struct cam_devq *devq; 216 mpt_handler_t handler; 217 int maxq; 218 int error; 219 220 MPT_LOCK(mpt); 221 TAILQ_INIT(&mpt->request_timeout_list); 222 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 223 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 224 225 handler.reply_handler = mpt_scsi_reply_handler; 226 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 227 &scsi_io_handler_id); 228 if (error != 0) { 229 MPT_UNLOCK(mpt); 230 goto cleanup; 231 } 232 233 handler.reply_handler = mpt_scsi_tmf_reply_handler; 234 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 235 &scsi_tmf_handler_id); 236 if (error != 0) { 237 MPT_UNLOCK(mpt); 238 goto cleanup; 239 } 240 241 /* 242 * If we're fibre channel and could support target mode, we register 243 * an ELS reply handler and give it resources. 244 */ 245 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 246 handler.reply_handler = mpt_fc_els_reply_handler; 247 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 248 &fc_els_handler_id); 249 if (error != 0) { 250 MPT_UNLOCK(mpt); 251 goto cleanup; 252 } 253 if (mpt_add_els_buffers(mpt) == FALSE) { 254 error = ENOMEM; 255 MPT_UNLOCK(mpt); 256 goto cleanup; 257 } 258 maxq -= mpt->els_cmds_allocated; 259 } 260 261 /* 262 * If we support target mode, we register a reply handler for it, 263 * but don't add command resources until we actually enable target 264 * mode. 265 */ 266 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 267 handler.reply_handler = mpt_scsi_tgt_reply_handler; 268 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 269 &mpt->scsi_tgt_handler_id); 270 if (error != 0) { 271 MPT_UNLOCK(mpt); 272 goto cleanup; 273 } 274 } 275 276 if (mpt->is_sas) { 277 handler.reply_handler = mpt_sata_pass_reply_handler; 278 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 279 &sata_pass_handler_id); 280 if (error != 0) { 281 MPT_UNLOCK(mpt); 282 goto cleanup; 283 } 284 } 285 286 /* 287 * We keep one request reserved for timeout TMF requests. 288 */ 289 mpt->tmf_req = mpt_get_request(mpt, FALSE); 290 if (mpt->tmf_req == NULL) { 291 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 292 error = ENOMEM; 293 MPT_UNLOCK(mpt); 294 goto cleanup; 295 } 296 297 /* 298 * Mark the request as free even though not on the free list. 299 * There is only one TMF request allowed to be outstanding at 300 * a time and the TMF routines perform their own allocation 301 * tracking using the standard state flags. 302 */ 303 mpt->tmf_req->state = REQ_STATE_FREE; 304 maxq--; 305 306 /* 307 * The rest of this is CAM foo, for which we need to drop our lock 308 */ 309 MPT_UNLOCK(mpt); 310 311 if (mpt_spawn_recovery_thread(mpt) != 0) { 312 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 313 error = ENOMEM; 314 goto cleanup; 315 } 316 317 /* 318 * Create the device queue for our SIM(s). 319 */ 320 devq = cam_simq_alloc(maxq); 321 if (devq == NULL) { 322 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 323 error = ENOMEM; 324 goto cleanup; 325 } 326 327 /* 328 * Construct our SIM entry. 329 */ 330 mpt->sim = 331 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 332 if (mpt->sim == NULL) { 333 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 334 cam_simq_free(devq); 335 error = ENOMEM; 336 goto cleanup; 337 } 338 339 /* 340 * Register exactly this bus. 341 */ 342 MPT_LOCK(mpt); 343 if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { 344 mpt_prt(mpt, "Bus registration Failed!\n"); 345 error = ENOMEM; 346 MPT_UNLOCK(mpt); 347 goto cleanup; 348 } 349 350 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 351 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 352 mpt_prt(mpt, "Unable to allocate Path!\n"); 353 error = ENOMEM; 354 MPT_UNLOCK(mpt); 355 goto cleanup; 356 } 357 MPT_UNLOCK(mpt); 358 359 /* 360 * Only register a second bus for RAID physical 361 * devices if the controller supports RAID. 362 */ 363 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 364 return (0); 365 } 366 367 /* 368 * Create a "bus" to export all hidden disks to CAM. 369 */ 370 mpt->phydisk_sim = 371 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 372 if (mpt->phydisk_sim == NULL) { 373 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 374 error = ENOMEM; 375 goto cleanup; 376 } 377 378 /* 379 * Register this bus. 380 */ 381 MPT_LOCK(mpt); 382 if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != 383 CAM_SUCCESS) { 384 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 385 error = ENOMEM; 386 MPT_UNLOCK(mpt); 387 goto cleanup; 388 } 389 390 if (xpt_create_path(&mpt->phydisk_path, NULL, 391 cam_sim_path(mpt->phydisk_sim), 392 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 393 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 394 error = ENOMEM; 395 MPT_UNLOCK(mpt); 396 goto cleanup; 397 } 398 MPT_UNLOCK(mpt); 399 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 400 return (0); 401 402 cleanup: 403 mpt_cam_detach(mpt); 404 return (error); 405 } 406 407 /* 408 * Read FC configuration information 409 */ 410 static int 411 mpt_read_config_info_fc(struct mpt_softc *mpt) 412 { 413 struct sysctl_ctx_list *ctx; 414 struct sysctl_oid *tree; 415 char *topology = NULL; 416 int rv; 417 418 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 419 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 420 if (rv) { 421 return (-1); 422 } 423 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 424 mpt->mpt_fcport_page0.Header.PageVersion, 425 mpt->mpt_fcport_page0.Header.PageLength, 426 mpt->mpt_fcport_page0.Header.PageNumber, 427 mpt->mpt_fcport_page0.Header.PageType); 428 429 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 430 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 431 if (rv) { 432 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 433 return (-1); 434 } 435 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 436 437 switch (mpt->mpt_fcport_page0.CurrentSpeed) { 438 case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT: 439 mpt->mpt_fcport_speed = 1; 440 break; 441 case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT: 442 mpt->mpt_fcport_speed = 2; 443 break; 444 case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT: 445 mpt->mpt_fcport_speed = 10; 446 break; 447 case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT: 448 mpt->mpt_fcport_speed = 4; 449 break; 450 default: 451 mpt->mpt_fcport_speed = 0; 452 break; 453 } 454 455 switch (mpt->mpt_fcport_page0.Flags & 456 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 457 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 458 mpt->mpt_fcport_speed = 0; 459 topology = "<NO LOOP>"; 460 break; 461 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 462 topology = "N-Port"; 463 break; 464 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 465 topology = "NL-Port"; 466 break; 467 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 468 topology = "F-Port"; 469 break; 470 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 471 topology = "FL-Port"; 472 break; 473 default: 474 mpt->mpt_fcport_speed = 0; 475 topology = "?"; 476 break; 477 } 478 479 mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32) 480 | mpt->mpt_fcport_page0.WWNN.Low; 481 mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32) 482 | mpt->mpt_fcport_page0.WWPN.Low; 483 mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier; 484 485 mpt_lprt(mpt, MPT_PRT_INFO, 486 "FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx " 487 "Speed %u-Gbit\n", topology, 488 (uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn, 489 mpt->mpt_fcport_speed); 490 MPT_UNLOCK(mpt); 491 ctx = device_get_sysctl_ctx(mpt->dev); 492 tree = device_get_sysctl_tree(mpt->dev); 493 494 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 495 "wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn, 496 "World Wide Node Name"); 497 498 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 499 "wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn, 500 "World Wide Port Name"); 501 502 MPT_LOCK(mpt); 503 return (0); 504 } 505 506 /* 507 * Set FC configuration information. 508 */ 509 static int 510 mpt_set_initial_config_fc(struct mpt_softc *mpt) 511 { 512 CONFIG_PAGE_FC_PORT_1 fc; 513 U32 fl; 514 int r, doit = 0; 515 int role; 516 517 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 518 &fc.Header, FALSE, 5000); 519 if (r) { 520 mpt_prt(mpt, "failed to read FC page 1 header\n"); 521 return (mpt_fc_reset_link(mpt, 1)); 522 } 523 524 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 525 &fc.Header, sizeof (fc), FALSE, 5000); 526 if (r) { 527 mpt_prt(mpt, "failed to read FC page 1\n"); 528 return (mpt_fc_reset_link(mpt, 1)); 529 } 530 mpt2host_config_page_fc_port_1(&fc); 531 532 /* 533 * Check our flags to make sure we support the role we want. 534 */ 535 doit = 0; 536 role = 0; 537 fl = fc.Flags; 538 539 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 540 role |= MPT_ROLE_INITIATOR; 541 } 542 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 543 role |= MPT_ROLE_TARGET; 544 } 545 546 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 547 548 if (mpt->do_cfg_role == 0) { 549 role = mpt->cfg_role; 550 } else { 551 mpt->do_cfg_role = 0; 552 } 553 554 if (role != mpt->cfg_role) { 555 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 556 if ((role & MPT_ROLE_INITIATOR) == 0) { 557 mpt_prt(mpt, "adding initiator role\n"); 558 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 559 doit++; 560 } else { 561 mpt_prt(mpt, "keeping initiator role\n"); 562 } 563 } else if (role & MPT_ROLE_INITIATOR) { 564 mpt_prt(mpt, "removing initiator role\n"); 565 doit++; 566 } 567 if (mpt->cfg_role & MPT_ROLE_TARGET) { 568 if ((role & MPT_ROLE_TARGET) == 0) { 569 mpt_prt(mpt, "adding target role\n"); 570 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 571 doit++; 572 } else { 573 mpt_prt(mpt, "keeping target role\n"); 574 } 575 } else if (role & MPT_ROLE_TARGET) { 576 mpt_prt(mpt, "removing target role\n"); 577 doit++; 578 } 579 mpt->role = mpt->cfg_role; 580 } 581 582 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 583 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 584 mpt_prt(mpt, "adding OXID option\n"); 585 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 586 doit++; 587 } 588 } 589 590 if (doit) { 591 fc.Flags = fl; 592 host2mpt_config_page_fc_port_1(&fc); 593 r = mpt_write_cfg_page(mpt, 594 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 595 sizeof(fc), FALSE, 5000); 596 if (r != 0) { 597 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 598 return (0); 599 } 600 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 601 "effect until next reboot or IOC reset\n"); 602 } 603 return (0); 604 } 605 606 static int 607 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 608 { 609 ConfigExtendedPageHeader_t hdr; 610 struct mptsas_phyinfo *phyinfo; 611 SasIOUnitPage0_t *buffer; 612 int error, len, i; 613 614 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 615 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 616 &hdr, 0, 10000); 617 if (error) 618 goto out; 619 if (hdr.ExtPageLength == 0) { 620 error = ENXIO; 621 goto out; 622 } 623 624 len = hdr.ExtPageLength * 4; 625 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 626 if (buffer == NULL) { 627 error = ENOMEM; 628 goto out; 629 } 630 631 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 632 0, &hdr, buffer, len, 0, 10000); 633 if (error) { 634 free(buffer, M_DEVBUF); 635 goto out; 636 } 637 638 portinfo->num_phys = buffer->NumPhys; 639 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * 640 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 641 if (portinfo->phy_info == NULL) { 642 free(buffer, M_DEVBUF); 643 error = ENOMEM; 644 goto out; 645 } 646 647 for (i = 0; i < portinfo->num_phys; i++) { 648 phyinfo = &portinfo->phy_info[i]; 649 phyinfo->phy_num = i; 650 phyinfo->port_id = buffer->PhyData[i].Port; 651 phyinfo->negotiated_link_rate = 652 buffer->PhyData[i].NegotiatedLinkRate; 653 phyinfo->handle = 654 le16toh(buffer->PhyData[i].ControllerDevHandle); 655 } 656 657 free(buffer, M_DEVBUF); 658 out: 659 return (error); 660 } 661 662 static int 663 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 664 uint32_t form, uint32_t form_specific) 665 { 666 ConfigExtendedPageHeader_t hdr; 667 SasPhyPage0_t *buffer; 668 int error; 669 670 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 671 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 672 0, 10000); 673 if (error) 674 goto out; 675 if (hdr.ExtPageLength == 0) { 676 error = ENXIO; 677 goto out; 678 } 679 680 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 681 if (buffer == NULL) { 682 error = ENOMEM; 683 goto out; 684 } 685 686 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 687 form + form_specific, &hdr, buffer, 688 sizeof(SasPhyPage0_t), 0, 10000); 689 if (error) { 690 free(buffer, M_DEVBUF); 691 goto out; 692 } 693 694 phy_info->hw_link_rate = buffer->HwLinkRate; 695 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 696 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 697 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 698 699 free(buffer, M_DEVBUF); 700 out: 701 return (error); 702 } 703 704 static int 705 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 706 uint32_t form, uint32_t form_specific) 707 { 708 ConfigExtendedPageHeader_t hdr; 709 SasDevicePage0_t *buffer; 710 uint64_t sas_address; 711 int error = 0; 712 713 bzero(device_info, sizeof(*device_info)); 714 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 715 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 716 &hdr, 0, 10000); 717 if (error) 718 goto out; 719 if (hdr.ExtPageLength == 0) { 720 error = ENXIO; 721 goto out; 722 } 723 724 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 725 if (buffer == NULL) { 726 error = ENOMEM; 727 goto out; 728 } 729 730 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 731 form + form_specific, &hdr, buffer, 732 sizeof(SasDevicePage0_t), 0, 10000); 733 if (error) { 734 free(buffer, M_DEVBUF); 735 goto out; 736 } 737 738 device_info->dev_handle = le16toh(buffer->DevHandle); 739 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 740 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 741 device_info->slot = le16toh(buffer->Slot); 742 device_info->phy_num = buffer->PhyNum; 743 device_info->physical_port = buffer->PhysicalPort; 744 device_info->target_id = buffer->TargetID; 745 device_info->bus = buffer->Bus; 746 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 747 device_info->sas_address = le64toh(sas_address); 748 device_info->device_info = le32toh(buffer->DeviceInfo); 749 750 free(buffer, M_DEVBUF); 751 out: 752 return (error); 753 } 754 755 /* 756 * Read SAS configuration information. Nothing to do yet. 757 */ 758 static int 759 mpt_read_config_info_sas(struct mpt_softc *mpt) 760 { 761 struct mptsas_portinfo *portinfo; 762 struct mptsas_phyinfo *phyinfo; 763 int error, i; 764 765 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 766 if (portinfo == NULL) 767 return (ENOMEM); 768 769 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 770 if (error) { 771 free(portinfo, M_DEVBUF); 772 return (0); 773 } 774 775 for (i = 0; i < portinfo->num_phys; i++) { 776 phyinfo = &portinfo->phy_info[i]; 777 error = mptsas_sas_phy_pg0(mpt, phyinfo, 778 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 779 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 780 if (error) 781 break; 782 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 783 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 784 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 785 phyinfo->handle); 786 if (error) 787 break; 788 phyinfo->identify.phy_num = phyinfo->phy_num = i; 789 if (phyinfo->attached.dev_handle) 790 error = mptsas_sas_device_pg0(mpt, 791 &phyinfo->attached, 792 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 793 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 794 phyinfo->attached.dev_handle); 795 if (error) 796 break; 797 } 798 mpt->sas_portinfo = portinfo; 799 return (0); 800 } 801 802 static void 803 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 804 int enabled) 805 { 806 SataPassthroughRequest_t *pass; 807 request_t *req; 808 int error, status; 809 810 req = mpt_get_request(mpt, 0); 811 if (req == NULL) 812 return; 813 814 pass = req->req_vbuf; 815 bzero(pass, sizeof(SataPassthroughRequest_t)); 816 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 817 pass->TargetID = devinfo->target_id; 818 pass->Bus = devinfo->bus; 819 pass->PassthroughFlags = 0; 820 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 821 pass->DataLength = 0; 822 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 823 pass->CommandFIS[0] = 0x27; 824 pass->CommandFIS[1] = 0x80; 825 pass->CommandFIS[2] = 0xef; 826 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 827 pass->CommandFIS[7] = 0x40; 828 pass->CommandFIS[15] = 0x08; 829 830 mpt_check_doorbell(mpt); 831 mpt_send_cmd(mpt, req); 832 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 833 10 * 1000); 834 if (error) { 835 mpt_free_request(mpt, req); 836 printf("error %d sending passthrough\n", error); 837 return; 838 } 839 840 status = le16toh(req->IOCStatus); 841 if (status != MPI_IOCSTATUS_SUCCESS) { 842 mpt_free_request(mpt, req); 843 printf("IOCSTATUS %d\n", status); 844 return; 845 } 846 847 mpt_free_request(mpt, req); 848 } 849 850 /* 851 * Set SAS configuration information. Nothing to do yet. 852 */ 853 static int 854 mpt_set_initial_config_sas(struct mpt_softc *mpt) 855 { 856 struct mptsas_phyinfo *phyinfo; 857 int i; 858 859 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 860 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 861 phyinfo = &mpt->sas_portinfo->phy_info[i]; 862 if (phyinfo->attached.dev_handle == 0) 863 continue; 864 if ((phyinfo->attached.device_info & 865 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 866 continue; 867 if (bootverbose) 868 device_printf(mpt->dev, 869 "%sabling SATA WC on phy %d\n", 870 (mpt_enable_sata_wc) ? "En" : "Dis", i); 871 mptsas_set_sata_wc(mpt, &phyinfo->attached, 872 mpt_enable_sata_wc); 873 } 874 } 875 876 return (0); 877 } 878 879 static int 880 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 881 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 882 { 883 884 if (req != NULL) { 885 if (reply_frame != NULL) { 886 req->IOCStatus = le16toh(reply_frame->IOCStatus); 887 } 888 req->state &= ~REQ_STATE_QUEUED; 889 req->state |= REQ_STATE_DONE; 890 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 891 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 892 wakeup(req); 893 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 894 /* 895 * Whew- we can free this request (late completion) 896 */ 897 mpt_free_request(mpt, req); 898 } 899 } 900 901 return (TRUE); 902 } 903 904 /* 905 * Read SCSI configuration information 906 */ 907 static int 908 mpt_read_config_info_spi(struct mpt_softc *mpt) 909 { 910 int rv, i; 911 912 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 913 &mpt->mpt_port_page0.Header, FALSE, 5000); 914 if (rv) { 915 return (-1); 916 } 917 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 918 mpt->mpt_port_page0.Header.PageVersion, 919 mpt->mpt_port_page0.Header.PageLength, 920 mpt->mpt_port_page0.Header.PageNumber, 921 mpt->mpt_port_page0.Header.PageType); 922 923 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 924 &mpt->mpt_port_page1.Header, FALSE, 5000); 925 if (rv) { 926 return (-1); 927 } 928 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 929 mpt->mpt_port_page1.Header.PageVersion, 930 mpt->mpt_port_page1.Header.PageLength, 931 mpt->mpt_port_page1.Header.PageNumber, 932 mpt->mpt_port_page1.Header.PageType); 933 934 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 935 &mpt->mpt_port_page2.Header, FALSE, 5000); 936 if (rv) { 937 return (-1); 938 } 939 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 940 mpt->mpt_port_page2.Header.PageVersion, 941 mpt->mpt_port_page2.Header.PageLength, 942 mpt->mpt_port_page2.Header.PageNumber, 943 mpt->mpt_port_page2.Header.PageType); 944 945 for (i = 0; i < 16; i++) { 946 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 947 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 948 if (rv) { 949 return (-1); 950 } 951 mpt_lprt(mpt, MPT_PRT_DEBUG, 952 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 953 mpt->mpt_dev_page0[i].Header.PageVersion, 954 mpt->mpt_dev_page0[i].Header.PageLength, 955 mpt->mpt_dev_page0[i].Header.PageNumber, 956 mpt->mpt_dev_page0[i].Header.PageType); 957 958 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 959 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 960 if (rv) { 961 return (-1); 962 } 963 mpt_lprt(mpt, MPT_PRT_DEBUG, 964 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 965 mpt->mpt_dev_page1[i].Header.PageVersion, 966 mpt->mpt_dev_page1[i].Header.PageLength, 967 mpt->mpt_dev_page1[i].Header.PageNumber, 968 mpt->mpt_dev_page1[i].Header.PageType); 969 } 970 971 /* 972 * At this point, we don't *have* to fail. As long as we have 973 * valid config header information, we can (barely) lurch 974 * along. 975 */ 976 977 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 978 sizeof(mpt->mpt_port_page0), FALSE, 5000); 979 if (rv) { 980 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 981 } else { 982 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 983 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 984 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 985 mpt->mpt_port_page0.Capabilities, 986 mpt->mpt_port_page0.PhysicalInterface); 987 } 988 989 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 990 sizeof(mpt->mpt_port_page1), FALSE, 5000); 991 if (rv) { 992 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 993 } else { 994 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 995 mpt_lprt(mpt, MPT_PRT_DEBUG, 996 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 997 mpt->mpt_port_page1.Configuration, 998 mpt->mpt_port_page1.OnBusTimerValue); 999 } 1000 1001 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 1002 sizeof(mpt->mpt_port_page2), FALSE, 5000); 1003 if (rv) { 1004 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 1005 } else { 1006 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1007 "Port Page 2: Flags %x Settings %x\n", 1008 mpt->mpt_port_page2.PortFlags, 1009 mpt->mpt_port_page2.PortSettings); 1010 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 1011 for (i = 0; i < 16; i++) { 1012 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1013 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1014 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1015 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1016 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1017 } 1018 } 1019 1020 for (i = 0; i < 16; i++) { 1021 rv = mpt_read_cur_cfg_page(mpt, i, 1022 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1023 FALSE, 5000); 1024 if (rv) { 1025 mpt_prt(mpt, 1026 "cannot read SPI Target %d Device Page 0\n", i); 1027 continue; 1028 } 1029 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 1030 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1031 "target %d page 0: Negotiated Params %x Information %x\n", 1032 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1033 mpt->mpt_dev_page0[i].Information); 1034 1035 rv = mpt_read_cur_cfg_page(mpt, i, 1036 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1037 FALSE, 5000); 1038 if (rv) { 1039 mpt_prt(mpt, 1040 "cannot read SPI Target %d Device Page 1\n", i); 1041 continue; 1042 } 1043 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 1044 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1045 "target %d page 1: Requested Params %x Configuration %x\n", 1046 i, mpt->mpt_dev_page1[i].RequestedParameters, 1047 mpt->mpt_dev_page1[i].Configuration); 1048 } 1049 return (0); 1050 } 1051 1052 /* 1053 * Validate SPI configuration information. 1054 * 1055 * In particular, validate SPI Port Page 1. 1056 */ 1057 static int 1058 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1059 { 1060 int error, i, pp1val; 1061 1062 mpt->mpt_disc_enable = 0xff; 1063 mpt->mpt_tag_enable = 0; 1064 1065 pp1val = ((1 << mpt->mpt_ini_id) << 1066 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; 1067 if (mpt->mpt_port_page1.Configuration != pp1val) { 1068 CONFIG_PAGE_SCSI_PORT_1 tmp; 1069 1070 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1071 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 1072 tmp = mpt->mpt_port_page1; 1073 tmp.Configuration = pp1val; 1074 host2mpt_config_page_scsi_port_1(&tmp); 1075 error = mpt_write_cur_cfg_page(mpt, 0, 1076 &tmp.Header, sizeof(tmp), FALSE, 5000); 1077 if (error) { 1078 return (-1); 1079 } 1080 error = mpt_read_cur_cfg_page(mpt, 0, 1081 &tmp.Header, sizeof(tmp), FALSE, 5000); 1082 if (error) { 1083 return (-1); 1084 } 1085 mpt2host_config_page_scsi_port_1(&tmp); 1086 if (tmp.Configuration != pp1val) { 1087 mpt_prt(mpt, 1088 "failed to reset SPI Port Page 1 Config value\n"); 1089 return (-1); 1090 } 1091 mpt->mpt_port_page1 = tmp; 1092 } 1093 1094 /* 1095 * The purpose of this exercise is to get 1096 * all targets back to async/narrow. 1097 * 1098 * We skip this step if the BIOS has already negotiated 1099 * speeds with the targets. 1100 */ 1101 i = mpt->mpt_port_page2.PortSettings & 1102 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1103 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 1104 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1105 "honoring BIOS transfer negotiations\n"); 1106 } else { 1107 for (i = 0; i < 16; i++) { 1108 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1109 mpt->mpt_dev_page1[i].Configuration = 0; 1110 (void) mpt_update_spi_config(mpt, i); 1111 } 1112 } 1113 return (0); 1114 } 1115 1116 static int 1117 mpt_cam_enable(struct mpt_softc *mpt) 1118 { 1119 int error; 1120 1121 MPT_LOCK(mpt); 1122 1123 error = EIO; 1124 if (mpt->is_fc) { 1125 if (mpt_read_config_info_fc(mpt)) { 1126 goto out; 1127 } 1128 if (mpt_set_initial_config_fc(mpt)) { 1129 goto out; 1130 } 1131 } else if (mpt->is_sas) { 1132 if (mpt_read_config_info_sas(mpt)) { 1133 goto out; 1134 } 1135 if (mpt_set_initial_config_sas(mpt)) { 1136 goto out; 1137 } 1138 } else if (mpt->is_spi) { 1139 if (mpt_read_config_info_spi(mpt)) { 1140 goto out; 1141 } 1142 if (mpt_set_initial_config_spi(mpt)) { 1143 goto out; 1144 } 1145 } 1146 error = 0; 1147 1148 out: 1149 MPT_UNLOCK(mpt); 1150 return (error); 1151 } 1152 1153 static void 1154 mpt_cam_ready(struct mpt_softc *mpt) 1155 { 1156 1157 /* 1158 * If we're in target mode, hang out resources now 1159 * so we don't cause the world to hang talking to us. 1160 */ 1161 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1162 /* 1163 * Try to add some target command resources 1164 */ 1165 MPT_LOCK(mpt); 1166 if (mpt_add_target_commands(mpt) == FALSE) { 1167 mpt_prt(mpt, "failed to add target commands\n"); 1168 } 1169 MPT_UNLOCK(mpt); 1170 } 1171 mpt->ready = 1; 1172 } 1173 1174 static void 1175 mpt_cam_detach(struct mpt_softc *mpt) 1176 { 1177 mpt_handler_t handler; 1178 1179 MPT_LOCK(mpt); 1180 mpt->ready = 0; 1181 mpt_terminate_recovery_thread(mpt); 1182 1183 handler.reply_handler = mpt_scsi_reply_handler; 1184 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1185 scsi_io_handler_id); 1186 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1188 scsi_tmf_handler_id); 1189 handler.reply_handler = mpt_fc_els_reply_handler; 1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1191 fc_els_handler_id); 1192 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1193 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1194 mpt->scsi_tgt_handler_id); 1195 handler.reply_handler = mpt_sata_pass_reply_handler; 1196 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1197 sata_pass_handler_id); 1198 1199 if (mpt->tmf_req != NULL) { 1200 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1201 mpt_free_request(mpt, mpt->tmf_req); 1202 mpt->tmf_req = NULL; 1203 } 1204 if (mpt->sas_portinfo != NULL) { 1205 free(mpt->sas_portinfo, M_DEVBUF); 1206 mpt->sas_portinfo = NULL; 1207 } 1208 1209 if (mpt->sim != NULL) { 1210 xpt_free_path(mpt->path); 1211 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1212 cam_sim_free(mpt->sim, TRUE); 1213 mpt->sim = NULL; 1214 } 1215 1216 if (mpt->phydisk_sim != NULL) { 1217 xpt_free_path(mpt->phydisk_path); 1218 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1219 cam_sim_free(mpt->phydisk_sim, TRUE); 1220 mpt->phydisk_sim = NULL; 1221 } 1222 MPT_UNLOCK(mpt); 1223 } 1224 1225 /* This routine is used after a system crash to dump core onto the swap device. 1226 */ 1227 static void 1228 mpt_poll(struct cam_sim *sim) 1229 { 1230 struct mpt_softc *mpt; 1231 1232 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1233 mpt_intr(mpt); 1234 } 1235 1236 /* 1237 * Watchdog timeout routine for SCSI requests. 1238 */ 1239 static void 1240 mpt_timeout(void *arg) 1241 { 1242 union ccb *ccb; 1243 struct mpt_softc *mpt; 1244 request_t *req; 1245 1246 ccb = (union ccb *)arg; 1247 mpt = ccb->ccb_h.ccb_mpt_ptr; 1248 1249 MPT_LOCK_ASSERT(mpt); 1250 req = ccb->ccb_h.ccb_req_ptr; 1251 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1252 req->serno, ccb, req->ccb); 1253 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1254 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1255 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1256 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1257 req->state |= REQ_STATE_TIMEDOUT; 1258 mpt_wakeup_recovery_thread(mpt); 1259 } 1260 } 1261 1262 /* 1263 * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called 1264 * directly. 1265 * 1266 * Takes a list of physical segments and builds the SGL for SCSI IO command 1267 * and forwards the commard to the IOC after one last check that CAM has not 1268 * aborted the transaction. 1269 */ 1270 static void 1271 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1272 { 1273 request_t *req, *trq; 1274 char *mpt_off; 1275 union ccb *ccb; 1276 struct mpt_softc *mpt; 1277 bus_addr_t chain_list_addr; 1278 int first_lim, seg, this_seg_lim; 1279 uint32_t addr, cur_off, flags, nxt_off, tf; 1280 void *sglp = NULL; 1281 MSG_REQUEST_HEADER *hdrp; 1282 SGE_SIMPLE64 *se; 1283 SGE_CHAIN64 *ce; 1284 int istgt = 0; 1285 1286 req = (request_t *)arg; 1287 ccb = req->ccb; 1288 1289 mpt = ccb->ccb_h.ccb_mpt_ptr; 1290 req = ccb->ccb_h.ccb_req_ptr; 1291 1292 hdrp = req->req_vbuf; 1293 mpt_off = req->req_vbuf; 1294 1295 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1296 error = EFBIG; 1297 } 1298 1299 if (error == 0) { 1300 switch (hdrp->Function) { 1301 case MPI_FUNCTION_SCSI_IO_REQUEST: 1302 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1303 istgt = 0; 1304 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1305 break; 1306 case MPI_FUNCTION_TARGET_ASSIST: 1307 istgt = 1; 1308 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1309 break; 1310 default: 1311 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1312 hdrp->Function); 1313 error = EINVAL; 1314 break; 1315 } 1316 } 1317 1318 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1319 error = EFBIG; 1320 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1321 nseg, mpt->max_seg_cnt); 1322 } 1323 1324 bad: 1325 if (error != 0) { 1326 if (error != EFBIG && error != ENOMEM) { 1327 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1328 } 1329 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1330 cam_status status; 1331 mpt_freeze_ccb(ccb); 1332 if (error == EFBIG) { 1333 status = CAM_REQ_TOO_BIG; 1334 } else if (error == ENOMEM) { 1335 if (mpt->outofbeer == 0) { 1336 mpt->outofbeer = 1; 1337 xpt_freeze_simq(mpt->sim, 1); 1338 mpt_lprt(mpt, MPT_PRT_DEBUG, 1339 "FREEZEQ\n"); 1340 } 1341 status = CAM_REQUEUE_REQ; 1342 } else { 1343 status = CAM_REQ_CMP_ERR; 1344 } 1345 mpt_set_ccb_status(ccb, status); 1346 } 1347 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1348 request_t *cmd_req = 1349 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1350 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1351 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1352 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1353 } 1354 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1355 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1356 xpt_done(ccb); 1357 mpt_free_request(mpt, req); 1358 return; 1359 } 1360 1361 /* 1362 * No data to transfer? 1363 * Just make a single simple SGL with zero length. 1364 */ 1365 1366 if (mpt->verbose >= MPT_PRT_DEBUG) { 1367 int tidx = ((char *)sglp) - mpt_off; 1368 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1369 } 1370 1371 if (nseg == 0) { 1372 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1373 MPI_pSGE_SET_FLAGS(se1, 1374 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1375 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1376 se1->FlagsLength = htole32(se1->FlagsLength); 1377 goto out; 1378 } 1379 1380 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1381 if (istgt == 0) { 1382 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1383 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1384 } 1385 } else { 1386 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1387 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1388 } 1389 } 1390 1391 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1392 bus_dmasync_op_t op; 1393 if (istgt == 0) { 1394 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1395 op = BUS_DMASYNC_PREREAD; 1396 } else { 1397 op = BUS_DMASYNC_PREWRITE; 1398 } 1399 } else { 1400 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1401 op = BUS_DMASYNC_PREWRITE; 1402 } else { 1403 op = BUS_DMASYNC_PREREAD; 1404 } 1405 } 1406 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1407 } 1408 1409 /* 1410 * Okay, fill in what we can at the end of the command frame. 1411 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1412 * the command frame. 1413 * 1414 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1415 * SIMPLE64 pointers and start doing CHAIN64 entries after 1416 * that. 1417 */ 1418 1419 if (nseg < MPT_NSGL_FIRST(mpt)) { 1420 first_lim = nseg; 1421 } else { 1422 /* 1423 * Leave room for CHAIN element 1424 */ 1425 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1426 } 1427 1428 se = (SGE_SIMPLE64 *) sglp; 1429 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1430 tf = flags; 1431 memset(se, 0, sizeof (*se)); 1432 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1433 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1434 if (sizeof(bus_addr_t) > 4) { 1435 addr = ((uint64_t)dm_segs->ds_addr) >> 32; 1436 /* SAS1078 36GB limitation WAR */ 1437 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + 1438 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { 1439 addr |= (1U << 31); 1440 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 1441 } 1442 se->Address.High = htole32(addr); 1443 } 1444 if (seg == first_lim - 1) { 1445 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1446 } 1447 if (seg == nseg - 1) { 1448 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1449 MPI_SGE_FLAGS_END_OF_BUFFER; 1450 } 1451 MPI_pSGE_SET_FLAGS(se, tf); 1452 se->FlagsLength = htole32(se->FlagsLength); 1453 } 1454 1455 if (seg == nseg) { 1456 goto out; 1457 } 1458 1459 /* 1460 * Tell the IOC where to find the first chain element. 1461 */ 1462 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1463 nxt_off = MPT_RQSL(mpt); 1464 trq = req; 1465 1466 /* 1467 * Make up the rest of the data segments out of a chain element 1468 * (contained in the current request frame) which points to 1469 * SIMPLE64 elements in the next request frame, possibly ending 1470 * with *another* chain element (if there's more). 1471 */ 1472 while (seg < nseg) { 1473 /* 1474 * Point to the chain descriptor. Note that the chain 1475 * descriptor is at the end of the *previous* list (whether 1476 * chain or simple). 1477 */ 1478 ce = (SGE_CHAIN64 *) se; 1479 1480 /* 1481 * Before we change our current pointer, make sure we won't 1482 * overflow the request area with this frame. Note that we 1483 * test against 'greater than' here as it's okay in this case 1484 * to have next offset be just outside the request area. 1485 */ 1486 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1487 nxt_off = MPT_REQUEST_AREA; 1488 goto next_chain; 1489 } 1490 1491 /* 1492 * Set our SGE element pointer to the beginning of the chain 1493 * list and update our next chain list offset. 1494 */ 1495 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1496 cur_off = nxt_off; 1497 nxt_off += MPT_RQSL(mpt); 1498 1499 /* 1500 * Now initialize the chain descriptor. 1501 */ 1502 memset(ce, 0, sizeof (*ce)); 1503 1504 /* 1505 * Get the physical address of the chain list. 1506 */ 1507 chain_list_addr = trq->req_pbuf; 1508 chain_list_addr += cur_off; 1509 if (sizeof (bus_addr_t) > 4) { 1510 ce->Address.High = 1511 htole32(((uint64_t)chain_list_addr) >> 32); 1512 } 1513 ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 1514 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1515 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1516 1517 /* 1518 * If we have more than a frame's worth of segments left, 1519 * set up the chain list to have the last element be another 1520 * chain descriptor. 1521 */ 1522 if ((nseg - seg) > MPT_NSGL(mpt)) { 1523 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1524 /* 1525 * The length of the chain is the length in bytes of the 1526 * number of segments plus the next chain element. 1527 * 1528 * The next chain descriptor offset is the length, 1529 * in words, of the number of segments. 1530 */ 1531 ce->Length = (this_seg_lim - seg) * 1532 sizeof (SGE_SIMPLE64); 1533 ce->NextChainOffset = ce->Length >> 2; 1534 ce->Length += sizeof (SGE_CHAIN64); 1535 } else { 1536 this_seg_lim = nseg; 1537 ce->Length = (this_seg_lim - seg) * 1538 sizeof (SGE_SIMPLE64); 1539 } 1540 ce->Length = htole16(ce->Length); 1541 1542 /* 1543 * Fill in the chain list SGE elements with our segment data. 1544 * 1545 * If we're the last element in this chain list, set the last 1546 * element flag. If we're the completely last element period, 1547 * set the end of list and end of buffer flags. 1548 */ 1549 while (seg < this_seg_lim) { 1550 tf = flags; 1551 memset(se, 0, sizeof (*se)); 1552 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1553 se->Address.Low = htole32(dm_segs->ds_addr & 1554 0xffffffff); 1555 if (sizeof (bus_addr_t) > 4) { 1556 addr = ((uint64_t)dm_segs->ds_addr) >> 32; 1557 /* SAS1078 36GB limitation WAR */ 1558 if (mpt->is_1078 && 1559 (((uint64_t)dm_segs->ds_addr + 1560 MPI_SGE_LENGTH(se->FlagsLength)) >> 1561 32) == 9) { 1562 addr |= (1U << 31); 1563 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 1564 } 1565 se->Address.High = htole32(addr); 1566 } 1567 if (seg == this_seg_lim - 1) { 1568 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1569 } 1570 if (seg == nseg - 1) { 1571 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1572 MPI_SGE_FLAGS_END_OF_BUFFER; 1573 } 1574 MPI_pSGE_SET_FLAGS(se, tf); 1575 se->FlagsLength = htole32(se->FlagsLength); 1576 se++; 1577 seg++; 1578 dm_segs++; 1579 } 1580 1581 next_chain: 1582 /* 1583 * If we have more segments to do and we've used up all of 1584 * the space in a request area, go allocate another one 1585 * and chain to that. 1586 */ 1587 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1588 request_t *nrq; 1589 1590 nrq = mpt_get_request(mpt, FALSE); 1591 1592 if (nrq == NULL) { 1593 error = ENOMEM; 1594 goto bad; 1595 } 1596 1597 /* 1598 * Append the new request area on the tail of our list. 1599 */ 1600 if ((trq = req->chain) == NULL) { 1601 req->chain = nrq; 1602 } else { 1603 while (trq->chain != NULL) { 1604 trq = trq->chain; 1605 } 1606 trq->chain = nrq; 1607 } 1608 trq = nrq; 1609 mpt_off = trq->req_vbuf; 1610 if (mpt->verbose >= MPT_PRT_DEBUG) { 1611 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1612 } 1613 nxt_off = 0; 1614 } 1615 } 1616 out: 1617 1618 /* 1619 * Last time we need to check if this CCB needs to be aborted. 1620 */ 1621 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1622 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1623 request_t *cmd_req = 1624 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1625 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1626 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1627 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1628 } 1629 mpt_prt(mpt, 1630 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1631 ccb->ccb_h.status & CAM_STATUS_MASK); 1632 if (nseg) { 1633 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1634 } 1635 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1636 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1637 xpt_done(ccb); 1638 mpt_free_request(mpt, req); 1639 return; 1640 } 1641 1642 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1643 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1644 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout, 1645 mpt_timeout, ccb); 1646 } 1647 if (mpt->verbose > MPT_PRT_DEBUG) { 1648 int nc = 0; 1649 mpt_print_request(req->req_vbuf); 1650 for (trq = req->chain; trq; trq = trq->chain) { 1651 printf(" Additional Chain Area %d\n", nc++); 1652 mpt_dump_sgl(trq->req_vbuf, 0); 1653 } 1654 } 1655 1656 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1657 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1658 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1659 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1660 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1661 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1662 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1663 } else { 1664 tgt->state = TGT_STATE_MOVING_DATA; 1665 } 1666 #else 1667 tgt->state = TGT_STATE_MOVING_DATA; 1668 #endif 1669 } 1670 mpt_send_cmd(mpt, req); 1671 } 1672 1673 static void 1674 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1675 { 1676 request_t *req, *trq; 1677 char *mpt_off; 1678 union ccb *ccb; 1679 struct mpt_softc *mpt; 1680 int seg, first_lim; 1681 uint32_t flags, nxt_off; 1682 void *sglp = NULL; 1683 MSG_REQUEST_HEADER *hdrp; 1684 SGE_SIMPLE32 *se; 1685 SGE_CHAIN32 *ce; 1686 int istgt = 0; 1687 1688 req = (request_t *)arg; 1689 ccb = req->ccb; 1690 1691 mpt = ccb->ccb_h.ccb_mpt_ptr; 1692 req = ccb->ccb_h.ccb_req_ptr; 1693 1694 hdrp = req->req_vbuf; 1695 mpt_off = req->req_vbuf; 1696 1697 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1698 error = EFBIG; 1699 } 1700 1701 if (error == 0) { 1702 switch (hdrp->Function) { 1703 case MPI_FUNCTION_SCSI_IO_REQUEST: 1704 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1705 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1706 break; 1707 case MPI_FUNCTION_TARGET_ASSIST: 1708 istgt = 1; 1709 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1710 break; 1711 default: 1712 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1713 hdrp->Function); 1714 error = EINVAL; 1715 break; 1716 } 1717 } 1718 1719 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1720 error = EFBIG; 1721 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1722 nseg, mpt->max_seg_cnt); 1723 } 1724 1725 bad: 1726 if (error != 0) { 1727 if (error != EFBIG && error != ENOMEM) { 1728 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1729 } 1730 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1731 cam_status status; 1732 mpt_freeze_ccb(ccb); 1733 if (error == EFBIG) { 1734 status = CAM_REQ_TOO_BIG; 1735 } else if (error == ENOMEM) { 1736 if (mpt->outofbeer == 0) { 1737 mpt->outofbeer = 1; 1738 xpt_freeze_simq(mpt->sim, 1); 1739 mpt_lprt(mpt, MPT_PRT_DEBUG, 1740 "FREEZEQ\n"); 1741 } 1742 status = CAM_REQUEUE_REQ; 1743 } else { 1744 status = CAM_REQ_CMP_ERR; 1745 } 1746 mpt_set_ccb_status(ccb, status); 1747 } 1748 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1749 request_t *cmd_req = 1750 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1751 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1752 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1753 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1754 } 1755 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1756 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1757 xpt_done(ccb); 1758 mpt_free_request(mpt, req); 1759 return; 1760 } 1761 1762 /* 1763 * No data to transfer? 1764 * Just make a single simple SGL with zero length. 1765 */ 1766 1767 if (mpt->verbose >= MPT_PRT_DEBUG) { 1768 int tidx = ((char *)sglp) - mpt_off; 1769 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1770 } 1771 1772 if (nseg == 0) { 1773 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1774 MPI_pSGE_SET_FLAGS(se1, 1775 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1776 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1777 se1->FlagsLength = htole32(se1->FlagsLength); 1778 goto out; 1779 } 1780 1781 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1782 if (istgt == 0) { 1783 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1784 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1785 } 1786 } else { 1787 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1788 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1789 } 1790 } 1791 1792 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1793 bus_dmasync_op_t op; 1794 if (istgt) { 1795 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1796 op = BUS_DMASYNC_PREREAD; 1797 } else { 1798 op = BUS_DMASYNC_PREWRITE; 1799 } 1800 } else { 1801 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1802 op = BUS_DMASYNC_PREWRITE; 1803 } else { 1804 op = BUS_DMASYNC_PREREAD; 1805 } 1806 } 1807 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1808 } 1809 1810 /* 1811 * Okay, fill in what we can at the end of the command frame. 1812 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1813 * the command frame. 1814 * 1815 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1816 * SIMPLE32 pointers and start doing CHAIN32 entries after 1817 * that. 1818 */ 1819 1820 if (nseg < MPT_NSGL_FIRST(mpt)) { 1821 first_lim = nseg; 1822 } else { 1823 /* 1824 * Leave room for CHAIN element 1825 */ 1826 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1827 } 1828 1829 se = (SGE_SIMPLE32 *) sglp; 1830 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1831 uint32_t tf; 1832 1833 memset(se, 0,sizeof (*se)); 1834 se->Address = htole32(dm_segs->ds_addr); 1835 1836 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1837 tf = flags; 1838 if (seg == first_lim - 1) { 1839 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1840 } 1841 if (seg == nseg - 1) { 1842 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1843 MPI_SGE_FLAGS_END_OF_BUFFER; 1844 } 1845 MPI_pSGE_SET_FLAGS(se, tf); 1846 se->FlagsLength = htole32(se->FlagsLength); 1847 } 1848 1849 if (seg == nseg) { 1850 goto out; 1851 } 1852 1853 /* 1854 * Tell the IOC where to find the first chain element. 1855 */ 1856 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1857 nxt_off = MPT_RQSL(mpt); 1858 trq = req; 1859 1860 /* 1861 * Make up the rest of the data segments out of a chain element 1862 * (contained in the current request frame) which points to 1863 * SIMPLE32 elements in the next request frame, possibly ending 1864 * with *another* chain element (if there's more). 1865 */ 1866 while (seg < nseg) { 1867 int this_seg_lim; 1868 uint32_t tf, cur_off; 1869 bus_addr_t chain_list_addr; 1870 1871 /* 1872 * Point to the chain descriptor. Note that the chain 1873 * descriptor is at the end of the *previous* list (whether 1874 * chain or simple). 1875 */ 1876 ce = (SGE_CHAIN32 *) se; 1877 1878 /* 1879 * Before we change our current pointer, make sure we won't 1880 * overflow the request area with this frame. Note that we 1881 * test against 'greater than' here as it's okay in this case 1882 * to have next offset be just outside the request area. 1883 */ 1884 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1885 nxt_off = MPT_REQUEST_AREA; 1886 goto next_chain; 1887 } 1888 1889 /* 1890 * Set our SGE element pointer to the beginning of the chain 1891 * list and update our next chain list offset. 1892 */ 1893 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1894 cur_off = nxt_off; 1895 nxt_off += MPT_RQSL(mpt); 1896 1897 /* 1898 * Now initialize the chain descriptor. 1899 */ 1900 memset(ce, 0, sizeof (*ce)); 1901 1902 /* 1903 * Get the physical address of the chain list. 1904 */ 1905 chain_list_addr = trq->req_pbuf; 1906 chain_list_addr += cur_off; 1907 1908 ce->Address = htole32(chain_list_addr); 1909 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1910 1911 /* 1912 * If we have more than a frame's worth of segments left, 1913 * set up the chain list to have the last element be another 1914 * chain descriptor. 1915 */ 1916 if ((nseg - seg) > MPT_NSGL(mpt)) { 1917 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1918 /* 1919 * The length of the chain is the length in bytes of the 1920 * number of segments plus the next chain element. 1921 * 1922 * The next chain descriptor offset is the length, 1923 * in words, of the number of segments. 1924 */ 1925 ce->Length = (this_seg_lim - seg) * 1926 sizeof (SGE_SIMPLE32); 1927 ce->NextChainOffset = ce->Length >> 2; 1928 ce->Length += sizeof (SGE_CHAIN32); 1929 } else { 1930 this_seg_lim = nseg; 1931 ce->Length = (this_seg_lim - seg) * 1932 sizeof (SGE_SIMPLE32); 1933 } 1934 ce->Length = htole16(ce->Length); 1935 1936 /* 1937 * Fill in the chain list SGE elements with our segment data. 1938 * 1939 * If we're the last element in this chain list, set the last 1940 * element flag. If we're the completely last element period, 1941 * set the end of list and end of buffer flags. 1942 */ 1943 while (seg < this_seg_lim) { 1944 memset(se, 0, sizeof (*se)); 1945 se->Address = htole32(dm_segs->ds_addr); 1946 1947 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1948 tf = flags; 1949 if (seg == this_seg_lim - 1) { 1950 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1951 } 1952 if (seg == nseg - 1) { 1953 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1954 MPI_SGE_FLAGS_END_OF_BUFFER; 1955 } 1956 MPI_pSGE_SET_FLAGS(se, tf); 1957 se->FlagsLength = htole32(se->FlagsLength); 1958 se++; 1959 seg++; 1960 dm_segs++; 1961 } 1962 1963 next_chain: 1964 /* 1965 * If we have more segments to do and we've used up all of 1966 * the space in a request area, go allocate another one 1967 * and chain to that. 1968 */ 1969 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1970 request_t *nrq; 1971 1972 nrq = mpt_get_request(mpt, FALSE); 1973 1974 if (nrq == NULL) { 1975 error = ENOMEM; 1976 goto bad; 1977 } 1978 1979 /* 1980 * Append the new request area on the tail of our list. 1981 */ 1982 if ((trq = req->chain) == NULL) { 1983 req->chain = nrq; 1984 } else { 1985 while (trq->chain != NULL) { 1986 trq = trq->chain; 1987 } 1988 trq->chain = nrq; 1989 } 1990 trq = nrq; 1991 mpt_off = trq->req_vbuf; 1992 if (mpt->verbose >= MPT_PRT_DEBUG) { 1993 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1994 } 1995 nxt_off = 0; 1996 } 1997 } 1998 out: 1999 2000 /* 2001 * Last time we need to check if this CCB needs to be aborted. 2002 */ 2003 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2004 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2005 request_t *cmd_req = 2006 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2007 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2008 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2009 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2010 } 2011 mpt_prt(mpt, 2012 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2013 ccb->ccb_h.status & CAM_STATUS_MASK); 2014 if (nseg) { 2015 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2016 } 2017 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2018 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 2019 xpt_done(ccb); 2020 mpt_free_request(mpt, req); 2021 return; 2022 } 2023 2024 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2025 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2026 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout, 2027 mpt_timeout, ccb); 2028 } 2029 if (mpt->verbose > MPT_PRT_DEBUG) { 2030 int nc = 0; 2031 mpt_print_request(req->req_vbuf); 2032 for (trq = req->chain; trq; trq = trq->chain) { 2033 printf(" Additional Chain Area %d\n", nc++); 2034 mpt_dump_sgl(trq->req_vbuf, 0); 2035 } 2036 } 2037 2038 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2039 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2040 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2041 #ifdef WE_TRUST_AUTO_GOOD_STATUS 2042 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2043 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2044 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2045 } else { 2046 tgt->state = TGT_STATE_MOVING_DATA; 2047 } 2048 #else 2049 tgt->state = TGT_STATE_MOVING_DATA; 2050 #endif 2051 } 2052 mpt_send_cmd(mpt, req); 2053 } 2054 2055 static void 2056 mpt_start(struct cam_sim *sim, union ccb *ccb) 2057 { 2058 request_t *req; 2059 struct mpt_softc *mpt; 2060 MSG_SCSI_IO_REQUEST *mpt_req; 2061 struct ccb_scsiio *csio = &ccb->csio; 2062 struct ccb_hdr *ccbh = &ccb->ccb_h; 2063 bus_dmamap_callback_t *cb; 2064 target_id_t tgt; 2065 int raid_passthru; 2066 int error; 2067 2068 /* Get the pointer for the physical addapter */ 2069 mpt = ccb->ccb_h.ccb_mpt_ptr; 2070 raid_passthru = (sim == mpt->phydisk_sim); 2071 2072 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2073 if (mpt->outofbeer == 0) { 2074 mpt->outofbeer = 1; 2075 xpt_freeze_simq(mpt->sim, 1); 2076 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2077 } 2078 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2079 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2080 xpt_done(ccb); 2081 return; 2082 } 2083 #ifdef INVARIANTS 2084 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2085 #endif 2086 2087 if (sizeof (bus_addr_t) > 4) { 2088 cb = mpt_execute_req_a64; 2089 } else { 2090 cb = mpt_execute_req; 2091 } 2092 2093 /* 2094 * Link the ccb and the request structure so we can find 2095 * the other knowing either the request or the ccb 2096 */ 2097 req->ccb = ccb; 2098 ccb->ccb_h.ccb_req_ptr = req; 2099 2100 /* Now we build the command for the IOC */ 2101 mpt_req = req->req_vbuf; 2102 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2103 2104 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2105 if (raid_passthru) { 2106 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2107 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2108 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2109 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2110 xpt_done(ccb); 2111 return; 2112 } 2113 mpt_req->Bus = 0; /* we never set bus here */ 2114 } else { 2115 tgt = ccb->ccb_h.target_id; 2116 mpt_req->Bus = 0; /* XXX */ 2117 2118 } 2119 mpt_req->SenseBufferLength = 2120 (csio->sense_len < MPT_SENSE_SIZE) ? 2121 csio->sense_len : MPT_SENSE_SIZE; 2122 2123 /* 2124 * We use the message context to find the request structure when we 2125 * Get the command completion interrupt from the IOC. 2126 */ 2127 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2128 2129 /* Which physical device to do the I/O on */ 2130 mpt_req->TargetID = tgt; 2131 2132 be64enc(mpt_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); 2133 2134 /* Set the direction of the transfer */ 2135 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2136 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2137 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2138 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2139 } else { 2140 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2141 } 2142 2143 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2144 switch(ccb->csio.tag_action) { 2145 case MSG_HEAD_OF_Q_TAG: 2146 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2147 break; 2148 case MSG_ACA_TASK: 2149 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2150 break; 2151 case MSG_ORDERED_Q_TAG: 2152 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2153 break; 2154 case MSG_SIMPLE_Q_TAG: 2155 default: 2156 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2157 break; 2158 } 2159 } else { 2160 if (mpt->is_fc || mpt->is_sas) { 2161 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2162 } else { 2163 /* XXX No such thing for a target doing packetized. */ 2164 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2165 } 2166 } 2167 2168 if (mpt->is_spi) { 2169 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2170 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2171 } 2172 } 2173 mpt_req->Control = htole32(mpt_req->Control); 2174 2175 /* Copy the scsi command block into place */ 2176 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2177 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2178 } else { 2179 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2180 } 2181 2182 mpt_req->CDBLength = csio->cdb_len; 2183 mpt_req->DataLength = htole32(csio->dxfer_len); 2184 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2185 2186 /* 2187 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2188 */ 2189 if (mpt->verbose == MPT_PRT_DEBUG) { 2190 U32 df; 2191 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2192 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2193 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2194 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2195 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2196 mpt_prtc(mpt, "(%s %u byte%s ", 2197 (df == MPI_SCSIIO_CONTROL_READ)? 2198 "read" : "write", csio->dxfer_len, 2199 (csio->dxfer_len == 1)? ")" : "s)"); 2200 } 2201 mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt, 2202 (uintmax_t)ccb->ccb_h.target_lun, req, req->serno); 2203 } 2204 2205 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, 2206 req, 0); 2207 if (error == EINPROGRESS) { 2208 /* 2209 * So as to maintain ordering, freeze the controller queue 2210 * until our mapping is returned. 2211 */ 2212 xpt_freeze_simq(mpt->sim, 1); 2213 ccbh->status |= CAM_RELEASE_SIMQ; 2214 } 2215 } 2216 2217 static int 2218 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2219 int sleep_ok) 2220 { 2221 int error; 2222 uint16_t status; 2223 uint8_t response; 2224 2225 error = mpt_scsi_send_tmf(mpt, 2226 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2227 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2228 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2229 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2230 0, /* XXX How do I get the channel ID? */ 2231 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2232 lun != CAM_LUN_WILDCARD ? lun : 0, 2233 0, sleep_ok); 2234 2235 if (error != 0) { 2236 /* 2237 * mpt_scsi_send_tmf hard resets on failure, so no 2238 * need to do so here. 2239 */ 2240 mpt_prt(mpt, 2241 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2242 return (EIO); 2243 } 2244 2245 /* Wait for bus reset to be processed by the IOC. */ 2246 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2247 REQ_STATE_DONE, sleep_ok, 5000); 2248 2249 status = le16toh(mpt->tmf_req->IOCStatus); 2250 response = mpt->tmf_req->ResponseCode; 2251 mpt->tmf_req->state = REQ_STATE_FREE; 2252 2253 if (error) { 2254 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2255 "Resetting controller.\n"); 2256 mpt_reset(mpt, TRUE); 2257 return (ETIMEDOUT); 2258 } 2259 2260 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2261 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2262 "Resetting controller.\n", status); 2263 mpt_reset(mpt, TRUE); 2264 return (EIO); 2265 } 2266 2267 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2268 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2269 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2270 "Resetting controller.\n", response); 2271 mpt_reset(mpt, TRUE); 2272 return (EIO); 2273 } 2274 return (0); 2275 } 2276 2277 static int 2278 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2279 { 2280 int r = 0; 2281 request_t *req; 2282 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2283 2284 req = mpt_get_request(mpt, FALSE); 2285 if (req == NULL) { 2286 return (ENOMEM); 2287 } 2288 fc = req->req_vbuf; 2289 memset(fc, 0, sizeof(*fc)); 2290 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2291 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2292 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2293 mpt_send_cmd(mpt, req); 2294 if (dowait) { 2295 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2296 REQ_STATE_DONE, FALSE, 60 * 1000); 2297 if (r == 0) { 2298 mpt_free_request(mpt, req); 2299 } 2300 } 2301 return (r); 2302 } 2303 2304 static int 2305 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2306 MSG_EVENT_NOTIFY_REPLY *msg) 2307 { 2308 uint32_t data0, data1; 2309 2310 data0 = le32toh(msg->Data[0]); 2311 data1 = le32toh(msg->Data[1]); 2312 switch(msg->Event & 0xFF) { 2313 case MPI_EVENT_UNIT_ATTENTION: 2314 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2315 (data0 >> 8) & 0xff, data0 & 0xff); 2316 break; 2317 2318 case MPI_EVENT_IOC_BUS_RESET: 2319 /* We generated a bus reset */ 2320 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2321 (data0 >> 8) & 0xff); 2322 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2323 break; 2324 2325 case MPI_EVENT_EXT_BUS_RESET: 2326 /* Someone else generated a bus reset */ 2327 mpt_prt(mpt, "External Bus Reset Detected\n"); 2328 /* 2329 * These replies don't return EventData like the MPI 2330 * spec says they do 2331 */ 2332 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2333 break; 2334 2335 case MPI_EVENT_RESCAN: 2336 { 2337 union ccb *ccb; 2338 uint32_t pathid; 2339 /* 2340 * In general this means a device has been added to the loop. 2341 */ 2342 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2343 if (mpt->ready == 0) { 2344 break; 2345 } 2346 if (mpt->phydisk_sim) { 2347 pathid = cam_sim_path(mpt->phydisk_sim); 2348 } else { 2349 pathid = cam_sim_path(mpt->sim); 2350 } 2351 /* 2352 * Allocate a CCB, create a wildcard path for this bus, 2353 * and schedule a rescan. 2354 */ 2355 ccb = xpt_alloc_ccb_nowait(); 2356 if (ccb == NULL) { 2357 mpt_prt(mpt, "unable to alloc CCB for rescan\n"); 2358 break; 2359 } 2360 2361 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, 2362 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2363 mpt_prt(mpt, "unable to create path for rescan\n"); 2364 xpt_free_ccb(ccb); 2365 break; 2366 } 2367 xpt_rescan(ccb); 2368 break; 2369 } 2370 2371 case MPI_EVENT_LINK_STATUS_CHANGE: 2372 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2373 (data1 >> 8) & 0xff, 2374 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2375 break; 2376 2377 case MPI_EVENT_LOOP_STATE_CHANGE: 2378 switch ((data0 >> 16) & 0xff) { 2379 case 0x01: 2380 mpt_prt(mpt, 2381 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2382 "(Loop Initialization)\n", 2383 (data1 >> 8) & 0xff, 2384 (data0 >> 8) & 0xff, 2385 (data0 ) & 0xff); 2386 switch ((data0 >> 8) & 0xff) { 2387 case 0xF7: 2388 if ((data0 & 0xff) == 0xF7) { 2389 mpt_prt(mpt, "Device needs AL_PA\n"); 2390 } else { 2391 mpt_prt(mpt, "Device %02x doesn't like " 2392 "FC performance\n", 2393 data0 & 0xFF); 2394 } 2395 break; 2396 case 0xF8: 2397 if ((data0 & 0xff) == 0xF7) { 2398 mpt_prt(mpt, "Device had loop failure " 2399 "at its receiver prior to acquiring" 2400 " AL_PA\n"); 2401 } else { 2402 mpt_prt(mpt, "Device %02x detected loop" 2403 " failure at its receiver\n", 2404 data0 & 0xFF); 2405 } 2406 break; 2407 default: 2408 mpt_prt(mpt, "Device %02x requests that device " 2409 "%02x reset itself\n", 2410 data0 & 0xFF, 2411 (data0 >> 8) & 0xFF); 2412 break; 2413 } 2414 break; 2415 case 0x02: 2416 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2417 "LPE(%02x,%02x) (Loop Port Enable)\n", 2418 (data1 >> 8) & 0xff, /* Port */ 2419 (data0 >> 8) & 0xff, /* Character 3 */ 2420 (data0 ) & 0xff /* Character 4 */); 2421 break; 2422 case 0x03: 2423 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2424 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2425 (data1 >> 8) & 0xff, /* Port */ 2426 (data0 >> 8) & 0xff, /* Character 3 */ 2427 (data0 ) & 0xff /* Character 4 */); 2428 break; 2429 default: 2430 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2431 "FC event (%02x %02x %02x)\n", 2432 (data1 >> 8) & 0xff, /* Port */ 2433 (data0 >> 16) & 0xff, /* Event */ 2434 (data0 >> 8) & 0xff, /* Character 3 */ 2435 (data0 ) & 0xff /* Character 4 */); 2436 } 2437 break; 2438 2439 case MPI_EVENT_LOGOUT: 2440 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2441 (data1 >> 8) & 0xff, data0); 2442 break; 2443 case MPI_EVENT_QUEUE_FULL: 2444 { 2445 struct cam_sim *sim; 2446 struct cam_path *tmppath; 2447 struct ccb_relsim crs; 2448 PTR_EVENT_DATA_QUEUE_FULL pqf; 2449 lun_id_t lun_id; 2450 2451 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 2452 pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 2453 if (bootverbose) { 2454 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x " 2455 "Depth %d\n", 2456 pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2457 } 2458 if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 2459 pqf->TargetID) != 0) { 2460 sim = mpt->phydisk_sim; 2461 } else { 2462 sim = mpt->sim; 2463 } 2464 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2465 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2466 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2467 mpt_prt(mpt, "unable to create a path to send " 2468 "XPT_REL_SIMQ"); 2469 break; 2470 } 2471 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2472 crs.ccb_h.func_code = XPT_REL_SIMQ; 2473 crs.ccb_h.flags = CAM_DEV_QFREEZE; 2474 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2475 crs.openings = pqf->CurrentDepth - 1; 2476 xpt_action((union ccb *)&crs); 2477 if (crs.ccb_h.status != CAM_REQ_CMP) { 2478 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2479 } 2480 xpt_free_path(tmppath); 2481 } 2482 break; 2483 } 2484 case MPI_EVENT_IR_RESYNC_UPDATE: 2485 mpt_prt(mpt, "IR resync update %d completed\n", 2486 (data0 >> 16) & 0xff); 2487 break; 2488 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2489 { 2490 union ccb *ccb; 2491 struct cam_sim *sim; 2492 struct cam_path *tmppath; 2493 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc; 2494 2495 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data; 2496 if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 2497 psdsc->TargetID) != 0) 2498 sim = mpt->phydisk_sim; 2499 else 2500 sim = mpt->sim; 2501 switch(psdsc->ReasonCode) { 2502 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 2503 ccb = xpt_alloc_ccb_nowait(); 2504 if (ccb == NULL) { 2505 mpt_prt(mpt, 2506 "unable to alloc CCB for rescan\n"); 2507 break; 2508 } 2509 if (xpt_create_path(&ccb->ccb_h.path, NULL, 2510 cam_sim_path(sim), psdsc->TargetID, 2511 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2512 mpt_prt(mpt, 2513 "unable to create path for rescan\n"); 2514 xpt_free_ccb(ccb); 2515 break; 2516 } 2517 xpt_rescan(ccb); 2518 break; 2519 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 2520 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2521 psdsc->TargetID, CAM_LUN_WILDCARD) != 2522 CAM_REQ_CMP) { 2523 mpt_prt(mpt, 2524 "unable to create path for async event"); 2525 break; 2526 } 2527 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2528 xpt_free_path(tmppath); 2529 break; 2530 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: 2531 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL: 2532 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 2533 break; 2534 default: 2535 mpt_lprt(mpt, MPT_PRT_WARN, 2536 "SAS device status change: Bus: 0x%02x TargetID: " 2537 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus, 2538 psdsc->TargetID, psdsc->ReasonCode); 2539 break; 2540 } 2541 break; 2542 } 2543 case MPI_EVENT_SAS_DISCOVERY_ERROR: 2544 { 2545 PTR_EVENT_DATA_DISCOVERY_ERROR pde; 2546 2547 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data; 2548 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus); 2549 mpt_lprt(mpt, MPT_PRT_WARN, 2550 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n", 2551 pde->Port, pde->DiscoveryStatus); 2552 break; 2553 } 2554 case MPI_EVENT_EVENT_CHANGE: 2555 case MPI_EVENT_INTEGRATED_RAID: 2556 case MPI_EVENT_IR2: 2557 case MPI_EVENT_LOG_ENTRY_ADDED: 2558 case MPI_EVENT_SAS_DISCOVERY: 2559 case MPI_EVENT_SAS_PHY_LINK_STATUS: 2560 case MPI_EVENT_SAS_SES: 2561 break; 2562 default: 2563 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2564 msg->Event & 0xFF); 2565 return (0); 2566 } 2567 return (1); 2568 } 2569 2570 /* 2571 * Reply path for all SCSI I/O requests, called from our 2572 * interrupt handler by extracting our handler index from 2573 * the MsgContext field of the reply from the IOC. 2574 * 2575 * This routine is optimized for the common case of a 2576 * completion without error. All exception handling is 2577 * offloaded to non-inlined helper routines to minimize 2578 * cache footprint. 2579 */ 2580 static int 2581 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2582 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2583 { 2584 MSG_SCSI_IO_REQUEST *scsi_req; 2585 union ccb *ccb; 2586 2587 if (req->state == REQ_STATE_FREE) { 2588 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2589 return (TRUE); 2590 } 2591 2592 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2593 ccb = req->ccb; 2594 if (ccb == NULL) { 2595 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2596 req, req->serno); 2597 return (TRUE); 2598 } 2599 2600 mpt_req_untimeout(req, mpt_timeout, ccb); 2601 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2602 2603 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2604 bus_dmasync_op_t op; 2605 2606 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2607 op = BUS_DMASYNC_POSTREAD; 2608 else 2609 op = BUS_DMASYNC_POSTWRITE; 2610 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2611 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2612 } 2613 2614 if (reply_frame == NULL) { 2615 /* 2616 * Context only reply, completion without error status. 2617 */ 2618 ccb->csio.resid = 0; 2619 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2620 ccb->csio.scsi_status = SCSI_STATUS_OK; 2621 } else { 2622 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2623 } 2624 2625 if (mpt->outofbeer) { 2626 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2627 mpt->outofbeer = 0; 2628 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2629 } 2630 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2631 struct scsi_inquiry_data *iq = 2632 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2633 if (scsi_req->Function == 2634 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2635 /* 2636 * Fake out the device type so that only the 2637 * pass-thru device will attach. 2638 */ 2639 iq->device &= ~0x1F; 2640 iq->device |= T_NODEVICE; 2641 } 2642 } 2643 if (mpt->verbose == MPT_PRT_DEBUG) { 2644 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2645 req, req->serno); 2646 } 2647 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 2648 xpt_done(ccb); 2649 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2650 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2651 } else { 2652 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2653 req, req->serno); 2654 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2655 } 2656 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2657 ("CCB req needed wakeup")); 2658 #ifdef INVARIANTS 2659 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2660 #endif 2661 mpt_free_request(mpt, req); 2662 return (TRUE); 2663 } 2664 2665 static int 2666 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2667 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2668 { 2669 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2670 2671 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2672 #ifdef INVARIANTS 2673 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2674 #endif 2675 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2676 /* Record IOC Status and Response Code of TMF for any waiters. */ 2677 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2678 req->ResponseCode = tmf_reply->ResponseCode; 2679 2680 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2681 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2682 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2683 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2684 req->state |= REQ_STATE_DONE; 2685 wakeup(req); 2686 } else { 2687 mpt->tmf_req->state = REQ_STATE_FREE; 2688 } 2689 return (TRUE); 2690 } 2691 2692 /* 2693 * XXX: Move to definitions file 2694 */ 2695 #define ELS 0x22 2696 #define FC4LS 0x32 2697 #define ABTS 0x81 2698 #define BA_ACC 0x84 2699 2700 #define LS_RJT 0x01 2701 #define LS_ACC 0x02 2702 #define PLOGI 0x03 2703 #define LOGO 0x05 2704 #define SRR 0x14 2705 #define PRLI 0x20 2706 #define PRLO 0x21 2707 #define ADISC 0x52 2708 #define RSCN 0x61 2709 2710 static void 2711 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2712 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2713 { 2714 uint32_t fl; 2715 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2716 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2717 2718 /* 2719 * We are going to reuse the ELS request to send this response back. 2720 */ 2721 rsp = &tmp; 2722 memset(rsp, 0, sizeof(*rsp)); 2723 2724 #ifdef USE_IMMEDIATE_LINK_DATA 2725 /* 2726 * Apparently the IMMEDIATE stuff doesn't seem to work. 2727 */ 2728 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2729 #endif 2730 rsp->RspLength = length; 2731 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2732 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2733 2734 /* 2735 * Copy over information from the original reply frame to 2736 * it's correct place in the response. 2737 */ 2738 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2739 2740 /* 2741 * And now copy back the temporary area to the original frame. 2742 */ 2743 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2744 rsp = req->req_vbuf; 2745 2746 #ifdef USE_IMMEDIATE_LINK_DATA 2747 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2748 #else 2749 { 2750 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2751 bus_addr_t paddr = req->req_pbuf; 2752 paddr += MPT_RQSL(mpt); 2753 2754 fl = 2755 MPI_SGE_FLAGS_HOST_TO_IOC | 2756 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2757 MPI_SGE_FLAGS_LAST_ELEMENT | 2758 MPI_SGE_FLAGS_END_OF_LIST | 2759 MPI_SGE_FLAGS_END_OF_BUFFER; 2760 fl <<= MPI_SGE_FLAGS_SHIFT; 2761 fl |= (length); 2762 se->FlagsLength = htole32(fl); 2763 se->Address = htole32((uint32_t) paddr); 2764 } 2765 #endif 2766 2767 /* 2768 * Send it on... 2769 */ 2770 mpt_send_cmd(mpt, req); 2771 } 2772 2773 static int 2774 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2775 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2776 { 2777 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2778 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2779 U8 rctl; 2780 U8 type; 2781 U8 cmd; 2782 U16 status = le16toh(reply_frame->IOCStatus); 2783 U32 *elsbuf; 2784 int ioindex; 2785 int do_refresh = TRUE; 2786 2787 #ifdef INVARIANTS 2788 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2789 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2790 req, req->serno, rp->Function)); 2791 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2792 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2793 } else { 2794 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2795 } 2796 #endif 2797 mpt_lprt(mpt, MPT_PRT_DEBUG, 2798 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2799 req, req->serno, reply_frame, reply_frame->Function); 2800 2801 if (status != MPI_IOCSTATUS_SUCCESS) { 2802 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2803 status, reply_frame->Function); 2804 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2805 /* 2806 * XXX: to get around shutdown issue 2807 */ 2808 mpt->disabled = 1; 2809 return (TRUE); 2810 } 2811 return (TRUE); 2812 } 2813 2814 /* 2815 * If the function of a link service response, we recycle the 2816 * response to be a refresh for a new link service request. 2817 * 2818 * The request pointer is bogus in this case and we have to fetch 2819 * it based upon the TransactionContext. 2820 */ 2821 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2822 /* Freddie Uncle Charlie Katie */ 2823 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2824 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2825 if (mpt->els_cmd_ptrs[ioindex] == req) { 2826 break; 2827 } 2828 2829 KASSERT(ioindex < mpt->els_cmds_allocated, 2830 ("can't find my mommie!")); 2831 2832 /* remove from active list as we're going to re-post it */ 2833 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2834 req->state &= ~REQ_STATE_QUEUED; 2835 req->state |= REQ_STATE_DONE; 2836 mpt_fc_post_els(mpt, req, ioindex); 2837 return (TRUE); 2838 } 2839 2840 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2841 /* remove from active list as we're done */ 2842 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2843 req->state &= ~REQ_STATE_QUEUED; 2844 req->state |= REQ_STATE_DONE; 2845 if (req->state & REQ_STATE_TIMEDOUT) { 2846 mpt_lprt(mpt, MPT_PRT_DEBUG, 2847 "Sync Primitive Send Completed After Timeout\n"); 2848 mpt_free_request(mpt, req); 2849 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2850 mpt_lprt(mpt, MPT_PRT_DEBUG, 2851 "Async Primitive Send Complete\n"); 2852 mpt_free_request(mpt, req); 2853 } else { 2854 mpt_lprt(mpt, MPT_PRT_DEBUG, 2855 "Sync Primitive Send Complete- Waking Waiter\n"); 2856 wakeup(req); 2857 } 2858 return (TRUE); 2859 } 2860 2861 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2862 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2863 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2864 rp->MsgLength, rp->MsgFlags); 2865 return (TRUE); 2866 } 2867 2868 if (rp->MsgLength <= 5) { 2869 /* 2870 * This is just a ack of an original ELS buffer post 2871 */ 2872 mpt_lprt(mpt, MPT_PRT_DEBUG, 2873 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2874 return (TRUE); 2875 } 2876 2877 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2878 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2879 2880 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2881 cmd = be32toh(elsbuf[0]) >> 24; 2882 2883 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2884 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2885 return (TRUE); 2886 } 2887 2888 ioindex = le32toh(rp->TransactionContext); 2889 req = mpt->els_cmd_ptrs[ioindex]; 2890 2891 if (rctl == ELS && type == 1) { 2892 switch (cmd) { 2893 case PRLI: 2894 /* 2895 * Send back a PRLI ACC 2896 */ 2897 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2898 le32toh(rp->Wwn.PortNameHigh), 2899 le32toh(rp->Wwn.PortNameLow)); 2900 elsbuf[0] = htobe32(0x02100014); 2901 elsbuf[1] |= htobe32(0x00000100); 2902 elsbuf[4] = htobe32(0x00000002); 2903 if (mpt->role & MPT_ROLE_TARGET) 2904 elsbuf[4] |= htobe32(0x00000010); 2905 if (mpt->role & MPT_ROLE_INITIATOR) 2906 elsbuf[4] |= htobe32(0x00000020); 2907 /* remove from active list as we're done */ 2908 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2909 req->state &= ~REQ_STATE_QUEUED; 2910 req->state |= REQ_STATE_DONE; 2911 mpt_fc_els_send_response(mpt, req, rp, 20); 2912 do_refresh = FALSE; 2913 break; 2914 case PRLO: 2915 memset(elsbuf, 0, 5 * (sizeof (U32))); 2916 elsbuf[0] = htobe32(0x02100014); 2917 elsbuf[1] = htobe32(0x08000100); 2918 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2919 le32toh(rp->Wwn.PortNameHigh), 2920 le32toh(rp->Wwn.PortNameLow)); 2921 /* remove from active list as we're done */ 2922 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2923 req->state &= ~REQ_STATE_QUEUED; 2924 req->state |= REQ_STATE_DONE; 2925 mpt_fc_els_send_response(mpt, req, rp, 20); 2926 do_refresh = FALSE; 2927 break; 2928 default: 2929 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2930 break; 2931 } 2932 } else if (rctl == ABTS && type == 0) { 2933 uint16_t rx_id = le16toh(rp->Rxid); 2934 uint16_t ox_id = le16toh(rp->Oxid); 2935 mpt_tgt_state_t *tgt; 2936 request_t *tgt_req = NULL; 2937 union ccb *ccb; 2938 uint32_t ct_id; 2939 2940 mpt_prt(mpt, 2941 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2942 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2943 le32toh(rp->Wwn.PortNameLow)); 2944 if (rx_id >= mpt->mpt_max_tgtcmds) { 2945 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2946 } else if (mpt->tgt_cmd_ptrs == NULL) { 2947 mpt_prt(mpt, "No TGT CMD PTRS\n"); 2948 } else { 2949 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 2950 } 2951 if (tgt_req == NULL) { 2952 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 2953 goto skip; 2954 } 2955 tgt = MPT_TGT_STATE(mpt, tgt_req); 2956 2957 /* Check to make sure we have the correct command. */ 2958 ct_id = GET_IO_INDEX(tgt->reply_desc); 2959 if (ct_id != rx_id) { 2960 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2961 "RX_ID received=0x%x, in cmd=0x%x\n", rx_id, ct_id); 2962 goto skip; 2963 } 2964 if (tgt->itag != ox_id) { 2965 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2966 "OX_ID received=0x%x, in cmd=0x%x\n", ox_id, tgt->itag); 2967 goto skip; 2968 } 2969 2970 if ((ccb = tgt->ccb) != NULL) { 2971 mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n", 2972 ccb, (uintmax_t)ccb->ccb_h.target_lun, 2973 ccb->ccb_h.flags, ccb->ccb_h.status); 2974 } 2975 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 2976 "%x nxfers %x\n", tgt->state, tgt->resid, 2977 tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers); 2978 if (mpt_abort_target_cmd(mpt, tgt_req)) 2979 mpt_prt(mpt, "unable to start TargetAbort\n"); 2980 2981 skip: 2982 memset(elsbuf, 0, 5 * (sizeof (U32))); 2983 elsbuf[0] = htobe32(0); 2984 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 2985 elsbuf[2] = htobe32(0x000ffff); 2986 /* 2987 * Dork with the reply frame so that the response to it 2988 * will be correct. 2989 */ 2990 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 2991 /* remove from active list as we're done */ 2992 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2993 req->state &= ~REQ_STATE_QUEUED; 2994 req->state |= REQ_STATE_DONE; 2995 mpt_fc_els_send_response(mpt, req, rp, 12); 2996 do_refresh = FALSE; 2997 } else { 2998 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 2999 } 3000 if (do_refresh == TRUE) { 3001 /* remove from active list as we're done */ 3002 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3003 req->state &= ~REQ_STATE_QUEUED; 3004 req->state |= REQ_STATE_DONE; 3005 mpt_fc_post_els(mpt, req, ioindex); 3006 } 3007 return (TRUE); 3008 } 3009 3010 /* 3011 * Clean up all SCSI Initiator personality state in response 3012 * to a controller reset. 3013 */ 3014 static void 3015 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3016 { 3017 3018 /* 3019 * The pending list is already run down by 3020 * the generic handler. Perform the same 3021 * operation on the timed out request list. 3022 */ 3023 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3024 MPI_IOCSTATUS_INVALID_STATE); 3025 3026 /* 3027 * XXX: We need to repost ELS and Target Command Buffers? 3028 */ 3029 3030 /* 3031 * Inform the XPT that a bus reset has occurred. 3032 */ 3033 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3034 } 3035 3036 /* 3037 * Parse additional completion information in the reply 3038 * frame for SCSI I/O requests. 3039 */ 3040 static int 3041 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3042 MSG_DEFAULT_REPLY *reply_frame) 3043 { 3044 union ccb *ccb; 3045 MSG_SCSI_IO_REPLY *scsi_io_reply; 3046 u_int ioc_status; 3047 u_int sstate; 3048 3049 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3050 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3051 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3052 ("MPT SCSI I/O Handler called with incorrect reply type")); 3053 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3054 ("MPT SCSI I/O Handler called with continuation reply")); 3055 3056 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3057 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3058 ioc_status &= MPI_IOCSTATUS_MASK; 3059 sstate = scsi_io_reply->SCSIState; 3060 3061 ccb = req->ccb; 3062 ccb->csio.resid = 3063 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3064 3065 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3066 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3067 uint32_t sense_returned; 3068 3069 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3070 3071 sense_returned = le32toh(scsi_io_reply->SenseCount); 3072 if (sense_returned < ccb->csio.sense_len) 3073 ccb->csio.sense_resid = ccb->csio.sense_len - 3074 sense_returned; 3075 else 3076 ccb->csio.sense_resid = 0; 3077 3078 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); 3079 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3080 min(ccb->csio.sense_len, sense_returned)); 3081 } 3082 3083 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3084 /* 3085 * Tag messages rejected, but non-tagged retry 3086 * was successful. 3087 XXXX 3088 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3089 */ 3090 } 3091 3092 switch(ioc_status) { 3093 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3094 /* 3095 * XXX 3096 * Linux driver indicates that a zero 3097 * transfer length with this error code 3098 * indicates a CRC error. 3099 * 3100 * No need to swap the bytes for checking 3101 * against zero. 3102 */ 3103 if (scsi_io_reply->TransferCount == 0) { 3104 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3105 break; 3106 } 3107 /* FALLTHROUGH */ 3108 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3109 case MPI_IOCSTATUS_SUCCESS: 3110 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3111 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3112 /* 3113 * Status was never returned for this transaction. 3114 */ 3115 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3116 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3117 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3118 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3119 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3120 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3121 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3122 /* XXX Handle SPI-Packet and FCP-2 response info. */ 3123 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3124 } else 3125 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3126 break; 3127 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3128 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3129 break; 3130 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3131 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3132 break; 3133 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3134 /* 3135 * Since selection timeouts and "device really not 3136 * there" are grouped into this error code, report 3137 * selection timeout. Selection timeouts are 3138 * typically retried before giving up on the device 3139 * whereas "device not there" errors are considered 3140 * unretryable. 3141 */ 3142 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3143 break; 3144 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3145 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3146 break; 3147 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3148 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3149 break; 3150 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3151 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3152 break; 3153 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3154 ccb->ccb_h.status = CAM_UA_TERMIO; 3155 break; 3156 case MPI_IOCSTATUS_INVALID_STATE: 3157 /* 3158 * The IOC has been reset. Emulate a bus reset. 3159 */ 3160 /* FALLTHROUGH */ 3161 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3162 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3163 break; 3164 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3165 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3166 /* 3167 * Don't clobber any timeout status that has 3168 * already been set for this transaction. We 3169 * want the SCSI layer to be able to differentiate 3170 * between the command we aborted due to timeout 3171 * and any innocent bystanders. 3172 */ 3173 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3174 break; 3175 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3176 break; 3177 3178 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3179 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3180 break; 3181 case MPI_IOCSTATUS_BUSY: 3182 mpt_set_ccb_status(ccb, CAM_BUSY); 3183 break; 3184 case MPI_IOCSTATUS_INVALID_FUNCTION: 3185 case MPI_IOCSTATUS_INVALID_SGL: 3186 case MPI_IOCSTATUS_INTERNAL_ERROR: 3187 case MPI_IOCSTATUS_INVALID_FIELD: 3188 default: 3189 /* XXX 3190 * Some of the above may need to kick 3191 * of a recovery action!!!! 3192 */ 3193 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3194 break; 3195 } 3196 3197 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3198 mpt_freeze_ccb(ccb); 3199 } 3200 3201 return (TRUE); 3202 } 3203 3204 static void 3205 mpt_action(struct cam_sim *sim, union ccb *ccb) 3206 { 3207 struct mpt_softc *mpt; 3208 struct ccb_trans_settings *cts; 3209 target_id_t tgt; 3210 lun_id_t lun; 3211 int raid_passthru; 3212 3213 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3214 3215 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3216 raid_passthru = (sim == mpt->phydisk_sim); 3217 MPT_LOCK_ASSERT(mpt); 3218 3219 tgt = ccb->ccb_h.target_id; 3220 lun = ccb->ccb_h.target_lun; 3221 if (raid_passthru && 3222 ccb->ccb_h.func_code != XPT_PATH_INQ && 3223 ccb->ccb_h.func_code != XPT_RESET_BUS && 3224 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3225 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3226 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3227 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3228 xpt_done(ccb); 3229 return; 3230 } 3231 } 3232 ccb->ccb_h.ccb_mpt_ptr = mpt; 3233 3234 switch (ccb->ccb_h.func_code) { 3235 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3236 /* 3237 * Do a couple of preliminary checks... 3238 */ 3239 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3240 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3241 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3242 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3243 break; 3244 } 3245 } 3246 /* Max supported CDB length is 16 bytes */ 3247 /* XXX Unless we implement the new 32byte message type */ 3248 if (ccb->csio.cdb_len > 3249 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3250 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3251 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3252 break; 3253 } 3254 #ifdef MPT_TEST_MULTIPATH 3255 if (mpt->failure_id == ccb->ccb_h.target_id) { 3256 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3257 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3258 break; 3259 } 3260 #endif 3261 ccb->csio.scsi_status = SCSI_STATUS_OK; 3262 mpt_start(sim, ccb); 3263 return; 3264 3265 case XPT_RESET_BUS: 3266 if (raid_passthru) { 3267 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3268 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3269 break; 3270 } 3271 case XPT_RESET_DEV: 3272 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3273 if (bootverbose) { 3274 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3275 } 3276 } else { 3277 xpt_print(ccb->ccb_h.path, "reset device\n"); 3278 } 3279 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3280 3281 /* 3282 * mpt_bus_reset is always successful in that it 3283 * will fall back to a hard reset should a bus 3284 * reset attempt fail. 3285 */ 3286 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3287 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3288 break; 3289 3290 case XPT_ABORT: 3291 { 3292 union ccb *accb = ccb->cab.abort_ccb; 3293 switch (accb->ccb_h.func_code) { 3294 case XPT_ACCEPT_TARGET_IO: 3295 case XPT_IMMEDIATE_NOTIFY: 3296 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3297 break; 3298 case XPT_CONT_TARGET_IO: 3299 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3300 ccb->ccb_h.status = CAM_UA_ABORT; 3301 break; 3302 case XPT_SCSI_IO: 3303 ccb->ccb_h.status = CAM_UA_ABORT; 3304 break; 3305 default: 3306 ccb->ccb_h.status = CAM_REQ_INVALID; 3307 break; 3308 } 3309 break; 3310 } 3311 3312 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3313 3314 #define DP_DISC_ENABLE 0x1 3315 #define DP_DISC_DISABL 0x2 3316 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3317 3318 #define DP_TQING_ENABLE 0x4 3319 #define DP_TQING_DISABL 0x8 3320 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3321 3322 #define DP_WIDE 0x10 3323 #define DP_NARROW 0x20 3324 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3325 3326 #define DP_SYNC 0x40 3327 3328 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3329 { 3330 struct ccb_trans_settings_scsi *scsi; 3331 struct ccb_trans_settings_spi *spi; 3332 uint8_t dval; 3333 u_int period; 3334 u_int offset; 3335 int i, j; 3336 3337 cts = &ccb->cts; 3338 3339 if (mpt->is_fc || mpt->is_sas) { 3340 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3341 break; 3342 } 3343 3344 scsi = &cts->proto_specific.scsi; 3345 spi = &cts->xport_specific.spi; 3346 3347 /* 3348 * We can be called just to valid transport and proto versions 3349 */ 3350 if (scsi->valid == 0 && spi->valid == 0) { 3351 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3352 break; 3353 } 3354 3355 /* 3356 * Skip attempting settings on RAID volume disks. 3357 * Other devices on the bus get the normal treatment. 3358 */ 3359 if (mpt->phydisk_sim && raid_passthru == 0 && 3360 mpt_is_raid_volume(mpt, tgt) != 0) { 3361 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3362 "no transfer settings for RAID vols\n"); 3363 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3364 break; 3365 } 3366 3367 i = mpt->mpt_port_page2.PortSettings & 3368 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3369 j = mpt->mpt_port_page2.PortFlags & 3370 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3371 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3372 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3373 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3374 "honoring BIOS transfer negotiations\n"); 3375 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3376 break; 3377 } 3378 3379 dval = 0; 3380 period = 0; 3381 offset = 0; 3382 3383 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3384 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3385 DP_DISC_ENABLE : DP_DISC_DISABL; 3386 } 3387 3388 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3389 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3390 DP_TQING_ENABLE : DP_TQING_DISABL; 3391 } 3392 3393 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3394 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3395 DP_WIDE : DP_NARROW; 3396 } 3397 3398 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3399 dval |= DP_SYNC; 3400 offset = spi->sync_offset; 3401 } else { 3402 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3403 &mpt->mpt_dev_page1[tgt]; 3404 offset = ptr->RequestedParameters; 3405 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3406 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3407 } 3408 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3409 dval |= DP_SYNC; 3410 period = spi->sync_period; 3411 } else { 3412 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3413 &mpt->mpt_dev_page1[tgt]; 3414 period = ptr->RequestedParameters; 3415 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3416 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3417 } 3418 3419 if (dval & DP_DISC_ENABLE) { 3420 mpt->mpt_disc_enable |= (1 << tgt); 3421 } else if (dval & DP_DISC_DISABL) { 3422 mpt->mpt_disc_enable &= ~(1 << tgt); 3423 } 3424 if (dval & DP_TQING_ENABLE) { 3425 mpt->mpt_tag_enable |= (1 << tgt); 3426 } else if (dval & DP_TQING_DISABL) { 3427 mpt->mpt_tag_enable &= ~(1 << tgt); 3428 } 3429 if (dval & DP_WIDTH) { 3430 mpt_setwidth(mpt, tgt, 1); 3431 } 3432 if (dval & DP_SYNC) { 3433 mpt_setsync(mpt, tgt, period, offset); 3434 } 3435 if (dval == 0) { 3436 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3437 break; 3438 } 3439 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3440 "set [%d]: 0x%x period 0x%x offset %d\n", 3441 tgt, dval, period, offset); 3442 if (mpt_update_spi_config(mpt, tgt)) { 3443 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3444 } else { 3445 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3446 } 3447 break; 3448 } 3449 case XPT_GET_TRAN_SETTINGS: 3450 { 3451 struct ccb_trans_settings_scsi *scsi; 3452 cts = &ccb->cts; 3453 cts->protocol = PROTO_SCSI; 3454 if (mpt->is_fc) { 3455 struct ccb_trans_settings_fc *fc = 3456 &cts->xport_specific.fc; 3457 cts->protocol_version = SCSI_REV_SPC; 3458 cts->transport = XPORT_FC; 3459 cts->transport_version = 0; 3460 if (mpt->mpt_fcport_speed != 0) { 3461 fc->valid = CTS_FC_VALID_SPEED; 3462 fc->bitrate = 100000 * mpt->mpt_fcport_speed; 3463 } 3464 } else if (mpt->is_sas) { 3465 struct ccb_trans_settings_sas *sas = 3466 &cts->xport_specific.sas; 3467 cts->protocol_version = SCSI_REV_SPC2; 3468 cts->transport = XPORT_SAS; 3469 cts->transport_version = 0; 3470 sas->valid = CTS_SAS_VALID_SPEED; 3471 sas->bitrate = 300000; 3472 } else { 3473 cts->protocol_version = SCSI_REV_2; 3474 cts->transport = XPORT_SPI; 3475 cts->transport_version = 2; 3476 if (mpt_get_spi_settings(mpt, cts) != 0) { 3477 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3478 break; 3479 } 3480 } 3481 scsi = &cts->proto_specific.scsi; 3482 scsi->valid = CTS_SCSI_VALID_TQ; 3483 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3484 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3485 break; 3486 } 3487 case XPT_CALC_GEOMETRY: 3488 { 3489 struct ccb_calc_geometry *ccg; 3490 3491 ccg = &ccb->ccg; 3492 if (ccg->block_size == 0) { 3493 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3494 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3495 break; 3496 } 3497 cam_calc_geometry(ccg, /* extended */ 1); 3498 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 3499 break; 3500 } 3501 case XPT_GET_SIM_KNOB: 3502 { 3503 struct ccb_sim_knob *kp = &ccb->knob; 3504 3505 if (mpt->is_fc) { 3506 kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn; 3507 kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn; 3508 switch (mpt->role) { 3509 case MPT_ROLE_NONE: 3510 kp->xport_specific.fc.role = KNOB_ROLE_NONE; 3511 break; 3512 case MPT_ROLE_INITIATOR: 3513 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; 3514 break; 3515 case MPT_ROLE_TARGET: 3516 kp->xport_specific.fc.role = KNOB_ROLE_TARGET; 3517 break; 3518 case MPT_ROLE_BOTH: 3519 kp->xport_specific.fc.role = KNOB_ROLE_BOTH; 3520 break; 3521 } 3522 kp->xport_specific.fc.valid = 3523 KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; 3524 ccb->ccb_h.status = CAM_REQ_CMP; 3525 } else { 3526 ccb->ccb_h.status = CAM_REQ_INVALID; 3527 } 3528 xpt_done(ccb); 3529 break; 3530 } 3531 case XPT_PATH_INQ: /* Path routing inquiry */ 3532 { 3533 struct ccb_pathinq *cpi = &ccb->cpi; 3534 3535 cpi->version_num = 1; 3536 cpi->target_sprt = 0; 3537 cpi->hba_eng_cnt = 0; 3538 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3539 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; 3540 /* 3541 * FC cards report MAX_DEVICES of 512, but 3542 * the MSG_SCSI_IO_REQUEST target id field 3543 * is only 8 bits. Until we fix the driver 3544 * to support 'channels' for bus overflow, 3545 * just limit it. 3546 */ 3547 if (cpi->max_target > 255) { 3548 cpi->max_target = 255; 3549 } 3550 3551 /* 3552 * VMware ESX reports > 16 devices and then dies when we probe. 3553 */ 3554 if (mpt->is_spi && cpi->max_target > 15) { 3555 cpi->max_target = 15; 3556 } 3557 if (mpt->is_spi) 3558 cpi->max_lun = 7; 3559 else 3560 cpi->max_lun = MPT_MAX_LUNS; 3561 cpi->initiator_id = mpt->mpt_ini_id; 3562 cpi->bus_id = cam_sim_bus(sim); 3563 3564 /* 3565 * The base speed is the speed of the underlying connection. 3566 */ 3567 cpi->protocol = PROTO_SCSI; 3568 if (mpt->is_fc) { 3569 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | 3570 PIM_EXTLUNS; 3571 cpi->base_transfer_speed = 100000; 3572 cpi->hba_inquiry = PI_TAG_ABLE; 3573 cpi->transport = XPORT_FC; 3574 cpi->transport_version = 0; 3575 cpi->protocol_version = SCSI_REV_SPC; 3576 cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn; 3577 cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn; 3578 cpi->xport_specific.fc.port = mpt->scinfo.fc.portid; 3579 cpi->xport_specific.fc.bitrate = 3580 100000 * mpt->mpt_fcport_speed; 3581 } else if (mpt->is_sas) { 3582 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | 3583 PIM_EXTLUNS; 3584 cpi->base_transfer_speed = 300000; 3585 cpi->hba_inquiry = PI_TAG_ABLE; 3586 cpi->transport = XPORT_SAS; 3587 cpi->transport_version = 0; 3588 cpi->protocol_version = SCSI_REV_SPC2; 3589 } else { 3590 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED | 3591 PIM_EXTLUNS; 3592 cpi->base_transfer_speed = 3300; 3593 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3594 cpi->transport = XPORT_SPI; 3595 cpi->transport_version = 2; 3596 cpi->protocol_version = SCSI_REV_2; 3597 } 3598 3599 /* 3600 * We give our fake RAID passhtru bus a width that is MaxVolumes 3601 * wide and restrict it to one lun. 3602 */ 3603 if (raid_passthru) { 3604 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3605 cpi->initiator_id = cpi->max_target + 1; 3606 cpi->max_lun = 0; 3607 } 3608 3609 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3610 cpi->hba_misc |= PIM_NOINITIATOR; 3611 } 3612 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3613 cpi->target_sprt = 3614 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3615 } else { 3616 cpi->target_sprt = 0; 3617 } 3618 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3619 strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3620 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3621 cpi->unit_number = cam_sim_unit(sim); 3622 cpi->ccb_h.status = CAM_REQ_CMP; 3623 break; 3624 } 3625 case XPT_EN_LUN: /* Enable LUN as a target */ 3626 { 3627 int result; 3628 3629 if (ccb->cel.enable) 3630 result = mpt_enable_lun(mpt, 3631 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3632 else 3633 result = mpt_disable_lun(mpt, 3634 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3635 if (result == 0) { 3636 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3637 } else { 3638 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3639 } 3640 break; 3641 } 3642 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ 3643 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3644 { 3645 tgt_resource_t *trtp; 3646 lun_id_t lun = ccb->ccb_h.target_lun; 3647 ccb->ccb_h.sim_priv.entries[0].field = 0; 3648 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3649 3650 if (lun == CAM_LUN_WILDCARD) { 3651 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3652 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3653 break; 3654 } 3655 trtp = &mpt->trt_wildcard; 3656 } else if (lun >= MPT_MAX_LUNS) { 3657 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3658 break; 3659 } else { 3660 trtp = &mpt->trt[lun]; 3661 } 3662 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3663 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3664 "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun); 3665 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3666 sim_links.stqe); 3667 } else { 3668 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3669 "Put FREE INOT lun %jx\n", (uintmax_t)lun); 3670 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3671 sim_links.stqe); 3672 } 3673 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3674 return; 3675 } 3676 case XPT_NOTIFY_ACKNOWLEDGE: /* Task management request done. */ 3677 { 3678 request_t *req = MPT_TAG_2_REQ(mpt, ccb->cna2.tag_id); 3679 3680 mpt_lprt(mpt, MPT_PRT_DEBUG, "Got Notify ACK\n"); 3681 mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0); 3682 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3683 break; 3684 } 3685 case XPT_CONT_TARGET_IO: 3686 mpt_target_start_io(mpt, ccb); 3687 return; 3688 3689 default: 3690 ccb->ccb_h.status = CAM_REQ_INVALID; 3691 break; 3692 } 3693 xpt_done(ccb); 3694 } 3695 3696 static int 3697 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3698 { 3699 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3700 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3701 target_id_t tgt; 3702 uint32_t dval, pval, oval; 3703 int rv; 3704 3705 if (IS_CURRENT_SETTINGS(cts) == 0) { 3706 tgt = cts->ccb_h.target_id; 3707 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3708 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3709 return (-1); 3710 } 3711 } else { 3712 tgt = cts->ccb_h.target_id; 3713 } 3714 3715 /* 3716 * We aren't looking at Port Page 2 BIOS settings here- 3717 * sometimes these have been known to be bogus XXX. 3718 * 3719 * For user settings, we pick the max from port page 0 3720 * 3721 * For current settings we read the current settings out from 3722 * device page 0 for that target. 3723 */ 3724 if (IS_CURRENT_SETTINGS(cts)) { 3725 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3726 dval = 0; 3727 3728 tmp = mpt->mpt_dev_page0[tgt]; 3729 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3730 sizeof(tmp), FALSE, 5000); 3731 if (rv) { 3732 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3733 return (rv); 3734 } 3735 mpt2host_config_page_scsi_device_0(&tmp); 3736 3737 mpt_lprt(mpt, MPT_PRT_DEBUG, 3738 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3739 tmp.NegotiatedParameters, tmp.Information); 3740 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3741 DP_WIDE : DP_NARROW; 3742 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3743 DP_DISC_ENABLE : DP_DISC_DISABL; 3744 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3745 DP_TQING_ENABLE : DP_TQING_DISABL; 3746 oval = tmp.NegotiatedParameters; 3747 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3748 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3749 pval = tmp.NegotiatedParameters; 3750 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3751 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3752 mpt->mpt_dev_page0[tgt] = tmp; 3753 } else { 3754 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3755 oval = mpt->mpt_port_page0.Capabilities; 3756 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3757 pval = mpt->mpt_port_page0.Capabilities; 3758 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3759 } 3760 3761 spi->valid = 0; 3762 scsi->valid = 0; 3763 spi->flags = 0; 3764 scsi->flags = 0; 3765 spi->sync_offset = oval; 3766 spi->sync_period = pval; 3767 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3768 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3769 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3770 if (dval & DP_WIDE) { 3771 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3772 } else { 3773 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3774 } 3775 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3776 scsi->valid = CTS_SCSI_VALID_TQ; 3777 if (dval & DP_TQING_ENABLE) { 3778 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3779 } 3780 spi->valid |= CTS_SPI_VALID_DISC; 3781 if (dval & DP_DISC_ENABLE) { 3782 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3783 } 3784 } 3785 3786 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3787 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3788 IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval); 3789 return (0); 3790 } 3791 3792 static void 3793 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3794 { 3795 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3796 3797 ptr = &mpt->mpt_dev_page1[tgt]; 3798 if (onoff) { 3799 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3800 } else { 3801 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3802 } 3803 } 3804 3805 static void 3806 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3807 { 3808 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3809 3810 ptr = &mpt->mpt_dev_page1[tgt]; 3811 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3812 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3813 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3814 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3815 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3816 if (period == 0) { 3817 return; 3818 } 3819 ptr->RequestedParameters |= 3820 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3821 ptr->RequestedParameters |= 3822 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3823 if (period < 0xa) { 3824 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3825 } 3826 if (period < 0x9) { 3827 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3828 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3829 } 3830 } 3831 3832 static int 3833 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3834 { 3835 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3836 int rv; 3837 3838 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3839 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3840 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3841 tmp = mpt->mpt_dev_page1[tgt]; 3842 host2mpt_config_page_scsi_device_1(&tmp); 3843 rv = mpt_write_cur_cfg_page(mpt, tgt, 3844 &tmp.Header, sizeof(tmp), FALSE, 5000); 3845 if (rv) { 3846 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3847 return (-1); 3848 } 3849 return (0); 3850 } 3851 3852 /****************************** Timeout Recovery ******************************/ 3853 static int 3854 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3855 { 3856 int error; 3857 3858 error = kproc_create(mpt_recovery_thread, mpt, 3859 &mpt->recovery_thread, /*flags*/0, 3860 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3861 return (error); 3862 } 3863 3864 static void 3865 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3866 { 3867 3868 if (mpt->recovery_thread == NULL) { 3869 return; 3870 } 3871 mpt->shutdwn_recovery = 1; 3872 wakeup(mpt); 3873 /* 3874 * Sleep on a slightly different location 3875 * for this interlock just for added safety. 3876 */ 3877 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 3878 } 3879 3880 static void 3881 mpt_recovery_thread(void *arg) 3882 { 3883 struct mpt_softc *mpt; 3884 3885 mpt = (struct mpt_softc *)arg; 3886 MPT_LOCK(mpt); 3887 for (;;) { 3888 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3889 if (mpt->shutdwn_recovery == 0) { 3890 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 3891 } 3892 } 3893 if (mpt->shutdwn_recovery != 0) { 3894 break; 3895 } 3896 mpt_recover_commands(mpt); 3897 } 3898 mpt->recovery_thread = NULL; 3899 wakeup(&mpt->recovery_thread); 3900 MPT_UNLOCK(mpt); 3901 kproc_exit(0); 3902 } 3903 3904 static int 3905 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 3906 u_int channel, target_id_t target, lun_id_t lun, u_int abort_ctx, 3907 int sleep_ok) 3908 { 3909 MSG_SCSI_TASK_MGMT *tmf_req; 3910 int error; 3911 3912 /* 3913 * Wait for any current TMF request to complete. 3914 * We're only allowed to issue one TMF at a time. 3915 */ 3916 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 3917 sleep_ok, MPT_TMF_MAX_TIMEOUT); 3918 if (error != 0) { 3919 mpt_reset(mpt, TRUE); 3920 return (ETIMEDOUT); 3921 } 3922 3923 mpt_assign_serno(mpt, mpt->tmf_req); 3924 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 3925 3926 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 3927 memset(tmf_req, 0, sizeof(*tmf_req)); 3928 tmf_req->TargetID = target; 3929 tmf_req->Bus = channel; 3930 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 3931 tmf_req->TaskType = type; 3932 tmf_req->MsgFlags = flags; 3933 tmf_req->MsgContext = 3934 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 3935 be64enc(tmf_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun)); 3936 tmf_req->TaskMsgContext = abort_ctx; 3937 3938 mpt_lprt(mpt, MPT_PRT_DEBUG, 3939 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 3940 mpt->tmf_req->serno, tmf_req->MsgContext); 3941 if (mpt->verbose > MPT_PRT_DEBUG) { 3942 mpt_print_request(tmf_req); 3943 } 3944 3945 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 3946 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 3947 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 3948 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 3949 if (error != MPT_OK) { 3950 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 3951 mpt->tmf_req->state = REQ_STATE_FREE; 3952 mpt_reset(mpt, TRUE); 3953 } 3954 return (error); 3955 } 3956 3957 /* 3958 * When a command times out, it is placed on the requeust_timeout_list 3959 * and we wake our recovery thread. The MPT-Fusion architecture supports 3960 * only a single TMF operation at a time, so we serially abort/bdr, etc, 3961 * the timedout transactions. The next TMF is issued either by the 3962 * completion handler of the current TMF waking our recovery thread, 3963 * or the TMF timeout handler causing a hard reset sequence. 3964 */ 3965 static void 3966 mpt_recover_commands(struct mpt_softc *mpt) 3967 { 3968 request_t *req; 3969 union ccb *ccb; 3970 int error; 3971 3972 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3973 /* 3974 * No work to do- leave. 3975 */ 3976 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 3977 return; 3978 } 3979 3980 /* 3981 * Flush any commands whose completion coincides with their timeout. 3982 */ 3983 mpt_intr(mpt); 3984 3985 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3986 /* 3987 * The timedout commands have already 3988 * completed. This typically means 3989 * that either the timeout value was on 3990 * the hairy edge of what the device 3991 * requires or - more likely - interrupts 3992 * are not happening. 3993 */ 3994 mpt_prt(mpt, "Timedout requests already complete. " 3995 "Interrupts may not be functioning.\n"); 3996 mpt_enable_ints(mpt); 3997 return; 3998 } 3999 4000 /* 4001 * We have no visibility into the current state of the 4002 * controller, so attempt to abort the commands in the 4003 * order they timed-out. For initiator commands, we 4004 * depend on the reply handler pulling requests off 4005 * the timeout list. 4006 */ 4007 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4008 uint16_t status; 4009 uint8_t response; 4010 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4011 4012 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4013 req, req->serno, hdrp->Function); 4014 ccb = req->ccb; 4015 if (ccb == NULL) { 4016 mpt_prt(mpt, "null ccb in timed out request. " 4017 "Resetting Controller.\n"); 4018 mpt_reset(mpt, TRUE); 4019 continue; 4020 } 4021 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4022 4023 /* 4024 * Check to see if this is not an initiator command and 4025 * deal with it differently if it is. 4026 */ 4027 switch (hdrp->Function) { 4028 case MPI_FUNCTION_SCSI_IO_REQUEST: 4029 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4030 break; 4031 default: 4032 /* 4033 * XXX: FIX ME: need to abort target assists... 4034 */ 4035 mpt_prt(mpt, "just putting it back on the pend q\n"); 4036 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4037 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4038 links); 4039 continue; 4040 } 4041 4042 error = mpt_scsi_send_tmf(mpt, 4043 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4044 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4045 htole32(req->index | scsi_io_handler_id), TRUE); 4046 4047 if (error != 0) { 4048 /* 4049 * mpt_scsi_send_tmf hard resets on failure, so no 4050 * need to do so here. Our queue should be emptied 4051 * by the hard reset. 4052 */ 4053 continue; 4054 } 4055 4056 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4057 REQ_STATE_DONE, TRUE, 500); 4058 4059 status = le16toh(mpt->tmf_req->IOCStatus); 4060 response = mpt->tmf_req->ResponseCode; 4061 mpt->tmf_req->state = REQ_STATE_FREE; 4062 4063 if (error != 0) { 4064 /* 4065 * If we've errored out,, reset the controller. 4066 */ 4067 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4068 "Resetting controller\n"); 4069 mpt_reset(mpt, TRUE); 4070 continue; 4071 } 4072 4073 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4074 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4075 "Resetting controller.\n", status); 4076 mpt_reset(mpt, TRUE); 4077 continue; 4078 } 4079 4080 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4081 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4082 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4083 "Resetting controller.\n", response); 4084 mpt_reset(mpt, TRUE); 4085 continue; 4086 } 4087 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4088 } 4089 } 4090 4091 /************************ Target Mode Support ****************************/ 4092 static void 4093 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4094 { 4095 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4096 PTR_SGE_TRANSACTION32 tep; 4097 PTR_SGE_SIMPLE32 se; 4098 bus_addr_t paddr; 4099 uint32_t fl; 4100 4101 paddr = req->req_pbuf; 4102 paddr += MPT_RQSL(mpt); 4103 4104 fc = req->req_vbuf; 4105 memset(fc, 0, MPT_REQUEST_AREA); 4106 fc->BufferCount = 1; 4107 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4108 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4109 4110 /* 4111 * Okay, set up ELS buffer pointers. ELS buffer pointers 4112 * consist of a TE SGL element (with details length of zero) 4113 * followed by a SIMPLE SGL element which holds the address 4114 * of the buffer. 4115 */ 4116 4117 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4118 4119 tep->ContextSize = 4; 4120 tep->Flags = 0; 4121 tep->TransactionContext[0] = htole32(ioindex); 4122 4123 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4124 fl = 4125 MPI_SGE_FLAGS_HOST_TO_IOC | 4126 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4127 MPI_SGE_FLAGS_LAST_ELEMENT | 4128 MPI_SGE_FLAGS_END_OF_LIST | 4129 MPI_SGE_FLAGS_END_OF_BUFFER; 4130 fl <<= MPI_SGE_FLAGS_SHIFT; 4131 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4132 se->FlagsLength = htole32(fl); 4133 se->Address = htole32((uint32_t) paddr); 4134 mpt_lprt(mpt, MPT_PRT_DEBUG, 4135 "add ELS index %d ioindex %d for %p:%u\n", 4136 req->index, ioindex, req, req->serno); 4137 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4138 ("mpt_fc_post_els: request not locked")); 4139 mpt_send_cmd(mpt, req); 4140 } 4141 4142 static void 4143 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4144 { 4145 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4146 PTR_CMD_BUFFER_DESCRIPTOR cb; 4147 bus_addr_t paddr; 4148 4149 paddr = req->req_pbuf; 4150 paddr += MPT_RQSL(mpt); 4151 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4152 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4153 4154 fc = req->req_vbuf; 4155 fc->BufferCount = 1; 4156 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4157 fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX); 4158 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4159 4160 cb = &fc->Buffer[0]; 4161 cb->IoIndex = htole16(ioindex); 4162 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4163 4164 mpt_check_doorbell(mpt); 4165 mpt_send_cmd(mpt, req); 4166 } 4167 4168 static int 4169 mpt_add_els_buffers(struct mpt_softc *mpt) 4170 { 4171 int i; 4172 4173 if (mpt->is_fc == 0) { 4174 return (TRUE); 4175 } 4176 4177 if (mpt->els_cmds_allocated) { 4178 return (TRUE); 4179 } 4180 4181 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), 4182 M_DEVBUF, M_NOWAIT | M_ZERO); 4183 4184 if (mpt->els_cmd_ptrs == NULL) { 4185 return (FALSE); 4186 } 4187 4188 /* 4189 * Feed the chip some ELS buffer resources 4190 */ 4191 for (i = 0; i < MPT_MAX_ELS; i++) { 4192 request_t *req = mpt_get_request(mpt, FALSE); 4193 if (req == NULL) { 4194 break; 4195 } 4196 req->state |= REQ_STATE_LOCKED; 4197 mpt->els_cmd_ptrs[i] = req; 4198 mpt_fc_post_els(mpt, req, i); 4199 } 4200 4201 if (i == 0) { 4202 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4203 free(mpt->els_cmd_ptrs, M_DEVBUF); 4204 mpt->els_cmd_ptrs = NULL; 4205 return (FALSE); 4206 } 4207 if (i != MPT_MAX_ELS) { 4208 mpt_lprt(mpt, MPT_PRT_INFO, 4209 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4210 } 4211 mpt->els_cmds_allocated = i; 4212 return(TRUE); 4213 } 4214 4215 static int 4216 mpt_add_target_commands(struct mpt_softc *mpt) 4217 { 4218 int i, max; 4219 4220 if (mpt->tgt_cmd_ptrs) { 4221 return (TRUE); 4222 } 4223 4224 max = MPT_MAX_REQUESTS(mpt) >> 1; 4225 if (max > mpt->mpt_max_tgtcmds) { 4226 max = mpt->mpt_max_tgtcmds; 4227 } 4228 mpt->tgt_cmd_ptrs = 4229 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4230 if (mpt->tgt_cmd_ptrs == NULL) { 4231 mpt_prt(mpt, 4232 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4233 return (FALSE); 4234 } 4235 4236 for (i = 0; i < max; i++) { 4237 request_t *req; 4238 4239 req = mpt_get_request(mpt, FALSE); 4240 if (req == NULL) { 4241 break; 4242 } 4243 req->state |= REQ_STATE_LOCKED; 4244 mpt->tgt_cmd_ptrs[i] = req; 4245 mpt_post_target_command(mpt, req, i); 4246 } 4247 4248 if (i == 0) { 4249 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4250 free(mpt->tgt_cmd_ptrs, M_DEVBUF); 4251 mpt->tgt_cmd_ptrs = NULL; 4252 return (FALSE); 4253 } 4254 4255 mpt->tgt_cmds_allocated = i; 4256 4257 if (i < max) { 4258 mpt_lprt(mpt, MPT_PRT_INFO, 4259 "added %d of %d target bufs\n", i, max); 4260 } 4261 return (i); 4262 } 4263 4264 static int 4265 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4266 { 4267 4268 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4269 mpt->twildcard = 1; 4270 } else if (lun >= MPT_MAX_LUNS) { 4271 return (EINVAL); 4272 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4273 return (EINVAL); 4274 } 4275 if (mpt->tenabled == 0) { 4276 if (mpt->is_fc) { 4277 (void) mpt_fc_reset_link(mpt, 0); 4278 } 4279 mpt->tenabled = 1; 4280 } 4281 if (lun == CAM_LUN_WILDCARD) { 4282 mpt->trt_wildcard.enabled = 1; 4283 } else { 4284 mpt->trt[lun].enabled = 1; 4285 } 4286 return (0); 4287 } 4288 4289 static int 4290 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4291 { 4292 int i; 4293 4294 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4295 mpt->twildcard = 0; 4296 } else if (lun >= MPT_MAX_LUNS) { 4297 return (EINVAL); 4298 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4299 return (EINVAL); 4300 } 4301 if (lun == CAM_LUN_WILDCARD) { 4302 mpt->trt_wildcard.enabled = 0; 4303 } else { 4304 mpt->trt[lun].enabled = 0; 4305 } 4306 for (i = 0; i < MPT_MAX_LUNS; i++) { 4307 if (mpt->trt[i].enabled) { 4308 break; 4309 } 4310 } 4311 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4312 if (mpt->is_fc) { 4313 (void) mpt_fc_reset_link(mpt, 0); 4314 } 4315 mpt->tenabled = 0; 4316 } 4317 return (0); 4318 } 4319 4320 /* 4321 * Called with MPT lock held 4322 */ 4323 static void 4324 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4325 { 4326 struct ccb_scsiio *csio = &ccb->csio; 4327 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4328 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4329 4330 switch (tgt->state) { 4331 case TGT_STATE_IN_CAM: 4332 break; 4333 case TGT_STATE_MOVING_DATA: 4334 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4335 xpt_freeze_simq(mpt->sim, 1); 4336 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4337 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4338 xpt_done(ccb); 4339 return; 4340 default: 4341 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4342 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4343 mpt_tgt_dump_req_state(mpt, cmd_req); 4344 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4345 xpt_done(ccb); 4346 return; 4347 } 4348 4349 if (csio->dxfer_len) { 4350 bus_dmamap_callback_t *cb; 4351 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4352 request_t *req; 4353 int error; 4354 4355 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4356 ("dxfer_len %u but direction is NONE", csio->dxfer_len)); 4357 4358 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4359 if (mpt->outofbeer == 0) { 4360 mpt->outofbeer = 1; 4361 xpt_freeze_simq(mpt->sim, 1); 4362 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4363 } 4364 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4365 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4366 xpt_done(ccb); 4367 return; 4368 } 4369 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4370 if (sizeof (bus_addr_t) > 4) { 4371 cb = mpt_execute_req_a64; 4372 } else { 4373 cb = mpt_execute_req; 4374 } 4375 4376 req->ccb = ccb; 4377 ccb->ccb_h.ccb_req_ptr = req; 4378 4379 /* 4380 * Record the currently active ccb and the 4381 * request for it in our target state area. 4382 */ 4383 tgt->ccb = ccb; 4384 tgt->req = req; 4385 4386 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4387 ta = req->req_vbuf; 4388 4389 if (mpt->is_sas) { 4390 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4391 cmd_req->req_vbuf; 4392 ta->QueueTag = ssp->InitiatorTag; 4393 } else if (mpt->is_spi) { 4394 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4395 cmd_req->req_vbuf; 4396 ta->QueueTag = sp->Tag; 4397 } 4398 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4399 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4400 ta->ReplyWord = htole32(tgt->reply_desc); 4401 be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(csio->ccb_h.target_lun)); 4402 4403 ta->RelativeOffset = tgt->bytes_xfered; 4404 ta->DataLength = ccb->csio.dxfer_len; 4405 if (ta->DataLength > tgt->resid) { 4406 ta->DataLength = tgt->resid; 4407 } 4408 4409 /* 4410 * XXX Should be done after data transfer completes? 4411 */ 4412 csio->resid = csio->dxfer_len - ta->DataLength; 4413 tgt->resid -= csio->dxfer_len; 4414 tgt->bytes_xfered += csio->dxfer_len; 4415 4416 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4417 ta->TargetAssistFlags |= 4418 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4419 } 4420 4421 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4422 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4423 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4424 ta->TargetAssistFlags |= 4425 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4426 } 4427 #endif 4428 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4429 4430 mpt_lprt(mpt, MPT_PRT_DEBUG, 4431 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4432 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4433 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4434 4435 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, 4436 cb, req, 0); 4437 if (error == EINPROGRESS) { 4438 xpt_freeze_simq(mpt->sim, 1); 4439 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4440 } 4441 } else { 4442 /* 4443 * XXX: I don't know why this seems to happen, but 4444 * XXX: completing the CCB seems to make things happy. 4445 * XXX: This seems to happen if the initiator requests 4446 * XXX: enough data that we have to do multiple CTIOs. 4447 */ 4448 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4449 mpt_lprt(mpt, MPT_PRT_DEBUG, 4450 "Meaningless STATUS CCB (%p): flags %x status %x " 4451 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4452 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4453 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4454 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4455 xpt_done(ccb); 4456 return; 4457 } 4458 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, 4459 (void *)&csio->sense_data, 4460 (ccb->ccb_h.flags & CAM_SEND_SENSE) ? 4461 csio->sense_len : 0); 4462 } 4463 } 4464 4465 static void 4466 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4467 lun_id_t lun, int send, uint8_t *data, size_t length) 4468 { 4469 mpt_tgt_state_t *tgt; 4470 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4471 SGE_SIMPLE32 *se; 4472 uint32_t flags; 4473 uint8_t *dptr; 4474 bus_addr_t pptr; 4475 request_t *req; 4476 4477 /* 4478 * We enter with resid set to the data load for the command. 4479 */ 4480 tgt = MPT_TGT_STATE(mpt, cmd_req); 4481 if (length == 0 || tgt->resid == 0) { 4482 tgt->resid = 0; 4483 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0); 4484 return; 4485 } 4486 4487 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4488 mpt_prt(mpt, "out of resources- dropping local response\n"); 4489 return; 4490 } 4491 tgt->is_local = 1; 4492 4493 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4494 ta = req->req_vbuf; 4495 4496 if (mpt->is_sas) { 4497 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4498 ta->QueueTag = ssp->InitiatorTag; 4499 } else if (mpt->is_spi) { 4500 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4501 ta->QueueTag = sp->Tag; 4502 } 4503 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4504 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4505 ta->ReplyWord = htole32(tgt->reply_desc); 4506 be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun)); 4507 ta->RelativeOffset = 0; 4508 ta->DataLength = length; 4509 4510 dptr = req->req_vbuf; 4511 dptr += MPT_RQSL(mpt); 4512 pptr = req->req_pbuf; 4513 pptr += MPT_RQSL(mpt); 4514 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4515 4516 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4517 memset(se, 0,sizeof (*se)); 4518 4519 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4520 if (send) { 4521 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4522 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4523 } 4524 se->Address = pptr; 4525 MPI_pSGE_SET_LENGTH(se, length); 4526 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4527 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4528 MPI_pSGE_SET_FLAGS(se, flags); 4529 4530 tgt->ccb = NULL; 4531 tgt->req = req; 4532 tgt->resid -= length; 4533 tgt->bytes_xfered = length; 4534 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4535 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4536 #else 4537 tgt->state = TGT_STATE_MOVING_DATA; 4538 #endif 4539 mpt_send_cmd(mpt, req); 4540 } 4541 4542 /* 4543 * Abort queued up CCBs 4544 */ 4545 static cam_status 4546 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4547 { 4548 struct mpt_hdr_stailq *lp; 4549 struct ccb_hdr *srch; 4550 union ccb *accb = ccb->cab.abort_ccb; 4551 tgt_resource_t *trtp; 4552 mpt_tgt_state_t *tgt; 4553 request_t *req; 4554 uint32_t tag; 4555 4556 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4557 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) 4558 trtp = &mpt->trt_wildcard; 4559 else 4560 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4561 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4562 lp = &trtp->atios; 4563 tag = accb->atio.tag_id; 4564 } else { 4565 lp = &trtp->inots; 4566 tag = accb->cin1.tag_id; 4567 } 4568 4569 /* Search the CCB among queued. */ 4570 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4571 if (srch != &accb->ccb_h) 4572 continue; 4573 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4574 accb->ccb_h.status = CAM_REQ_ABORTED; 4575 xpt_done(accb); 4576 return (CAM_REQ_CMP); 4577 } 4578 4579 /* Search the CCB among running. */ 4580 req = MPT_TAG_2_REQ(mpt, tag); 4581 tgt = MPT_TGT_STATE(mpt, req); 4582 if (tgt->tag_id == tag) { 4583 mpt_abort_target_cmd(mpt, req); 4584 return (CAM_REQ_CMP); 4585 } 4586 4587 return (CAM_UA_ABORT); 4588 } 4589 4590 /* 4591 * Ask the MPT to abort the current target command 4592 */ 4593 static int 4594 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4595 { 4596 int error; 4597 request_t *req; 4598 PTR_MSG_TARGET_MODE_ABORT abtp; 4599 4600 req = mpt_get_request(mpt, FALSE); 4601 if (req == NULL) { 4602 return (-1); 4603 } 4604 abtp = req->req_vbuf; 4605 memset(abtp, 0, sizeof (*abtp)); 4606 4607 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4608 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4609 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4610 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4611 error = 0; 4612 if (mpt->is_fc || mpt->is_sas) { 4613 mpt_send_cmd(mpt, req); 4614 } else { 4615 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4616 } 4617 return (error); 4618 } 4619 4620 /* 4621 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4622 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4623 * FC929 to set bogus FC_RSP fields (nonzero residuals 4624 * but w/o RESID fields set). This causes QLogic initiators 4625 * to think maybe that a frame was lost. 4626 * 4627 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4628 * we use allocated requests to do TARGET_ASSIST and we 4629 * need to know when to release them. 4630 */ 4631 4632 static void 4633 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4634 uint8_t status, uint8_t const *sense_data, u_int sense_len) 4635 { 4636 uint8_t *cmd_vbuf; 4637 mpt_tgt_state_t *tgt; 4638 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4639 request_t *req; 4640 bus_addr_t paddr; 4641 int resplen = 0; 4642 uint32_t fl; 4643 4644 cmd_vbuf = cmd_req->req_vbuf; 4645 cmd_vbuf += MPT_RQSL(mpt); 4646 tgt = MPT_TGT_STATE(mpt, cmd_req); 4647 4648 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4649 if (mpt->outofbeer == 0) { 4650 mpt->outofbeer = 1; 4651 xpt_freeze_simq(mpt->sim, 1); 4652 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4653 } 4654 if (ccb) { 4655 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4656 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4657 xpt_done(ccb); 4658 } else { 4659 mpt_prt(mpt, 4660 "could not allocate status request- dropping\n"); 4661 } 4662 return; 4663 } 4664 req->ccb = ccb; 4665 if (ccb) { 4666 ccb->ccb_h.ccb_mpt_ptr = mpt; 4667 ccb->ccb_h.ccb_req_ptr = req; 4668 } 4669 4670 /* 4671 * Record the currently active ccb, if any, and the 4672 * request for it in our target state area. 4673 */ 4674 tgt->ccb = ccb; 4675 tgt->req = req; 4676 tgt->state = TGT_STATE_SENDING_STATUS; 4677 4678 tp = req->req_vbuf; 4679 paddr = req->req_pbuf; 4680 paddr += MPT_RQSL(mpt); 4681 4682 memset(tp, 0, sizeof (*tp)); 4683 tp->StatusCode = status; 4684 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4685 if (mpt->is_fc) { 4686 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4687 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4688 uint8_t *sts_vbuf; 4689 uint32_t *rsp; 4690 4691 sts_vbuf = req->req_vbuf; 4692 sts_vbuf += MPT_RQSL(mpt); 4693 rsp = (uint32_t *) sts_vbuf; 4694 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4695 4696 /* 4697 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4698 * It has to be big-endian in memory and is organized 4699 * in 32 bit words, which are much easier to deal with 4700 * as words which are swizzled as needed. 4701 * 4702 * All we're filling here is the FC_RSP payload. 4703 * We may just have the chip synthesize it if 4704 * we have no residual and an OK status. 4705 * 4706 */ 4707 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4708 4709 rsp[2] = htobe32(status); 4710 #define MIN_FCP_RESPONSE_SIZE 24 4711 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4712 resplen = MIN_FCP_RESPONSE_SIZE; 4713 #endif 4714 if (tgt->resid < 0) { 4715 rsp[2] |= htobe32(0x400); /* XXXX NEED MNEMONIC!!!! */ 4716 rsp[3] = htobe32(-tgt->resid); 4717 resplen = MIN_FCP_RESPONSE_SIZE; 4718 } else if (tgt->resid > 0) { 4719 rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */ 4720 rsp[3] = htobe32(tgt->resid); 4721 resplen = MIN_FCP_RESPONSE_SIZE; 4722 } 4723 if (sense_len > 0) { 4724 rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */ 4725 rsp[4] = htobe32(sense_len); 4726 memcpy(&rsp[6], sense_data, sense_len); 4727 resplen = MIN_FCP_RESPONSE_SIZE + sense_len; 4728 } 4729 } else if (mpt->is_sas) { 4730 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4731 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4732 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4733 } else { 4734 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4735 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4736 tp->QueueTag = htole16(sp->Tag); 4737 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4738 } 4739 4740 tp->ReplyWord = htole32(tgt->reply_desc); 4741 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4742 4743 #ifdef WE_CAN_USE_AUTO_REPOST 4744 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4745 #endif 4746 if (status == SCSI_STATUS_OK && resplen == 0) { 4747 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4748 } else { 4749 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4750 fl = MPI_SGE_FLAGS_HOST_TO_IOC | 4751 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4752 MPI_SGE_FLAGS_LAST_ELEMENT | 4753 MPI_SGE_FLAGS_END_OF_LIST | 4754 MPI_SGE_FLAGS_END_OF_BUFFER; 4755 fl <<= MPI_SGE_FLAGS_SHIFT; 4756 fl |= resplen; 4757 tp->StatusDataSGE.FlagsLength = htole32(fl); 4758 } 4759 4760 mpt_lprt(mpt, MPT_PRT_DEBUG, 4761 "STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n", 4762 ccb, sense_len > 0 ? "" : "out", tgt->tag_id, 4763 req, req->serno, tgt->resid); 4764 if (mpt->verbose > MPT_PRT_DEBUG) 4765 mpt_print_request(req->req_vbuf); 4766 if (ccb) { 4767 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4768 mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb); 4769 } 4770 mpt_send_cmd(mpt, req); 4771 } 4772 4773 static void 4774 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4775 tgt_resource_t *trtp, int init_id) 4776 { 4777 struct ccb_immediate_notify *inot; 4778 mpt_tgt_state_t *tgt; 4779 4780 tgt = MPT_TGT_STATE(mpt, req); 4781 inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots); 4782 if (inot == NULL) { 4783 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4784 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0); 4785 return; 4786 } 4787 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4788 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4789 "Get FREE INOT %p lun %jx\n", inot, 4790 (uintmax_t)inot->ccb_h.target_lun); 4791 4792 inot->initiator_id = init_id; /* XXX */ 4793 inot->tag_id = tgt->tag_id; 4794 inot->seq_id = 0; 4795 /* 4796 * This is a somewhat grotesque attempt to map from task management 4797 * to old style SCSI messages. God help us all. 4798 */ 4799 switch (fc) { 4800 case MPT_QUERY_TASK_SET: 4801 inot->arg = MSG_QUERY_TASK_SET; 4802 break; 4803 case MPT_ABORT_TASK_SET: 4804 inot->arg = MSG_ABORT_TASK_SET; 4805 break; 4806 case MPT_CLEAR_TASK_SET: 4807 inot->arg = MSG_CLEAR_TASK_SET; 4808 break; 4809 case MPT_QUERY_ASYNC_EVENT: 4810 inot->arg = MSG_QUERY_ASYNC_EVENT; 4811 break; 4812 case MPT_LOGICAL_UNIT_RESET: 4813 inot->arg = MSG_LOGICAL_UNIT_RESET; 4814 break; 4815 case MPT_TARGET_RESET: 4816 inot->arg = MSG_TARGET_RESET; 4817 break; 4818 case MPT_CLEAR_ACA: 4819 inot->arg = MSG_CLEAR_ACA; 4820 break; 4821 default: 4822 inot->arg = MSG_NOOP; 4823 break; 4824 } 4825 tgt->ccb = (union ccb *) inot; 4826 inot->ccb_h.status = CAM_MESSAGE_RECV; 4827 xpt_done((union ccb *)inot); 4828 } 4829 4830 static void 4831 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 4832 { 4833 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 4834 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 4835 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 4836 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 4837 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 4838 '0', '0', '0', '1' 4839 }; 4840 struct ccb_accept_tio *atiop; 4841 lun_id_t lun; 4842 int tag_action = 0; 4843 mpt_tgt_state_t *tgt; 4844 tgt_resource_t *trtp = NULL; 4845 U8 *lunptr; 4846 U8 *vbuf; 4847 U16 ioindex; 4848 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 4849 uint8_t *cdbp; 4850 4851 /* 4852 * Stash info for the current command where we can get at it later. 4853 */ 4854 vbuf = req->req_vbuf; 4855 vbuf += MPT_RQSL(mpt); 4856 if (mpt->verbose >= MPT_PRT_DEBUG) { 4857 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 4858 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 4859 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 4860 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 4861 } 4862 4863 /* 4864 * Get our state pointer set up. 4865 */ 4866 tgt = MPT_TGT_STATE(mpt, req); 4867 if (tgt->state != TGT_STATE_LOADED) { 4868 mpt_tgt_dump_req_state(mpt, req); 4869 panic("bad target state in mpt_scsi_tgt_atio"); 4870 } 4871 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 4872 tgt->state = TGT_STATE_IN_CAM; 4873 tgt->reply_desc = reply_desc; 4874 ioindex = GET_IO_INDEX(reply_desc); 4875 4876 /* 4877 * The tag we construct here allows us to find the 4878 * original request that the command came in with. 4879 * 4880 * This way we don't have to depend on anything but the 4881 * tag to find things when CCBs show back up from CAM. 4882 */ 4883 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 4884 4885 if (mpt->is_fc) { 4886 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 4887 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 4888 if (fc->FcpCntl[2]) { 4889 /* 4890 * Task Management Request 4891 */ 4892 switch (fc->FcpCntl[2]) { 4893 case 0x1: 4894 fct = MPT_QUERY_TASK_SET; 4895 break; 4896 case 0x2: 4897 fct = MPT_ABORT_TASK_SET; 4898 break; 4899 case 0x4: 4900 fct = MPT_CLEAR_TASK_SET; 4901 break; 4902 case 0x8: 4903 fct = MPT_QUERY_ASYNC_EVENT; 4904 break; 4905 case 0x10: 4906 fct = MPT_LOGICAL_UNIT_RESET; 4907 break; 4908 case 0x20: 4909 fct = MPT_TARGET_RESET; 4910 break; 4911 case 0x40: 4912 fct = MPT_CLEAR_ACA; 4913 break; 4914 default: 4915 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 4916 fc->FcpCntl[2]); 4917 mpt_scsi_tgt_status(mpt, NULL, req, 4918 SCSI_STATUS_OK, NULL, 0); 4919 return; 4920 } 4921 } else { 4922 switch (fc->FcpCntl[1]) { 4923 case 0: 4924 tag_action = MSG_SIMPLE_Q_TAG; 4925 break; 4926 case 1: 4927 tag_action = MSG_HEAD_OF_Q_TAG; 4928 break; 4929 case 2: 4930 tag_action = MSG_ORDERED_Q_TAG; 4931 break; 4932 default: 4933 /* 4934 * Bah. Ignore Untagged Queing and ACA 4935 */ 4936 tag_action = MSG_SIMPLE_Q_TAG; 4937 break; 4938 } 4939 } 4940 tgt->resid = be32toh(fc->FcpDl); 4941 cdbp = fc->FcpCdb; 4942 lunptr = fc->FcpLun; 4943 tgt->itag = fc->OptionalOxid; 4944 } else if (mpt->is_sas) { 4945 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 4946 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 4947 cdbp = ssp->CDB; 4948 lunptr = ssp->LogicalUnitNumber; 4949 tgt->itag = ssp->InitiatorTag; 4950 } else { 4951 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 4952 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 4953 cdbp = sp->CDB; 4954 lunptr = sp->LogicalUnitNumber; 4955 tgt->itag = sp->Tag; 4956 } 4957 4958 lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(lunptr)); 4959 4960 /* 4961 * Deal with non-enabled or bad luns here. 4962 */ 4963 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 4964 mpt->trt[lun].enabled == 0) { 4965 if (mpt->twildcard) { 4966 trtp = &mpt->trt_wildcard; 4967 } else if (fct == MPT_NIL_TMT_VALUE) { 4968 /* 4969 * In this case, we haven't got an upstream listener 4970 * for either a specific lun or wildcard luns. We 4971 * have to make some sensible response. For regular 4972 * inquiry, just return some NOT HERE inquiry data. 4973 * For VPD inquiry, report illegal field in cdb. 4974 * For REQUEST SENSE, just return NO SENSE data. 4975 * REPORT LUNS gets illegal command. 4976 * All other commands get 'no such device'. 4977 */ 4978 uint8_t sense[MPT_SENSE_SIZE]; 4979 size_t len; 4980 4981 memset(sense, 0, sizeof(sense)); 4982 sense[0] = 0xf0; 4983 sense[2] = 0x5; 4984 sense[7] = 0x8; 4985 4986 switch (cdbp[0]) { 4987 case INQUIRY: 4988 { 4989 if (cdbp[1] != 0) { 4990 sense[12] = 0x26; 4991 sense[13] = 0x01; 4992 break; 4993 } 4994 len = min(tgt->resid, cdbp[4]); 4995 len = min(len, sizeof (null_iqd)); 4996 mpt_lprt(mpt, MPT_PRT_DEBUG, 4997 "local inquiry %ld bytes\n", (long) len); 4998 mpt_scsi_tgt_local(mpt, req, lun, 1, 4999 null_iqd, len); 5000 return; 5001 } 5002 case REQUEST_SENSE: 5003 { 5004 sense[2] = 0x0; 5005 len = min(tgt->resid, cdbp[4]); 5006 len = min(len, sizeof (sense)); 5007 mpt_lprt(mpt, MPT_PRT_DEBUG, 5008 "local reqsense %ld bytes\n", (long) len); 5009 mpt_scsi_tgt_local(mpt, req, lun, 1, 5010 sense, len); 5011 return; 5012 } 5013 case REPORT_LUNS: 5014 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5015 sense[12] = 0x26; 5016 return; 5017 default: 5018 mpt_lprt(mpt, MPT_PRT_DEBUG, 5019 "CMD 0x%x to unmanaged lun %jx\n", 5020 cdbp[0], (uintmax_t)lun); 5021 sense[12] = 0x25; 5022 break; 5023 } 5024 mpt_scsi_tgt_status(mpt, NULL, req, 5025 SCSI_STATUS_CHECK_COND, sense, sizeof(sense)); 5026 return; 5027 } 5028 /* otherwise, leave trtp NULL */ 5029 } else { 5030 trtp = &mpt->trt[lun]; 5031 } 5032 5033 /* 5034 * Deal with any task management 5035 */ 5036 if (fct != MPT_NIL_TMT_VALUE) { 5037 if (trtp == NULL) { 5038 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5039 fct); 5040 mpt_scsi_tgt_status(mpt, NULL, req, 5041 SCSI_STATUS_OK, NULL, 0); 5042 } else { 5043 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5044 GET_INITIATOR_INDEX(reply_desc)); 5045 } 5046 return; 5047 } 5048 5049 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5050 if (atiop == NULL) { 5051 mpt_lprt(mpt, MPT_PRT_WARN, 5052 "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun, 5053 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5054 mpt_scsi_tgt_status(mpt, NULL, req, 5055 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5056 NULL, 0); 5057 return; 5058 } 5059 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5060 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5061 "Get FREE ATIO %p lun %jx\n", atiop, 5062 (uintmax_t)atiop->ccb_h.target_lun); 5063 atiop->ccb_h.ccb_mpt_ptr = mpt; 5064 atiop->ccb_h.status = CAM_CDB_RECVD; 5065 atiop->ccb_h.target_lun = lun; 5066 atiop->sense_len = 0; 5067 atiop->tag_id = tgt->tag_id; 5068 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5069 atiop->cdb_len = 16; 5070 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5071 if (tag_action) { 5072 atiop->tag_action = tag_action; 5073 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 5074 } 5075 if (mpt->verbose >= MPT_PRT_DEBUG) { 5076 int i; 5077 mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop, 5078 (uintmax_t)atiop->ccb_h.target_lun); 5079 for (i = 0; i < atiop->cdb_len; i++) { 5080 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5081 (i == (atiop->cdb_len - 1))? '>' : ' '); 5082 } 5083 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5084 tgt->itag, tgt->tag_id, tgt->reply_desc, tgt->resid); 5085 } 5086 5087 xpt_done((union ccb *)atiop); 5088 } 5089 5090 static void 5091 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5092 { 5093 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5094 5095 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5096 "nx %d tag 0x%08x itag 0x%04x state=%d\n", req, req->serno, 5097 tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb, 5098 tgt->req, tgt->nxfers, tgt->tag_id, tgt->itag, tgt->state); 5099 } 5100 5101 static void 5102 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5103 { 5104 5105 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5106 req->index, req->index, req->state); 5107 mpt_tgt_dump_tgt_state(mpt, req); 5108 } 5109 5110 static int 5111 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5112 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5113 { 5114 int dbg; 5115 union ccb *ccb; 5116 U16 status; 5117 5118 if (reply_frame == NULL) { 5119 /* 5120 * Figure out what the state of the command is. 5121 */ 5122 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5123 5124 #ifdef INVARIANTS 5125 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5126 if (tgt->req) { 5127 mpt_req_not_spcl(mpt, tgt->req, 5128 "turbo scsi_tgt_reply associated req", __LINE__); 5129 } 5130 #endif 5131 switch(tgt->state) { 5132 case TGT_STATE_LOADED: 5133 /* 5134 * This is a new command starting. 5135 */ 5136 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5137 break; 5138 case TGT_STATE_MOVING_DATA: 5139 { 5140 ccb = tgt->ccb; 5141 if (tgt->req == NULL) { 5142 panic("mpt: turbo target reply with null " 5143 "associated request moving data"); 5144 /* NOTREACHED */ 5145 } 5146 if (ccb == NULL) { 5147 if (tgt->is_local == 0) { 5148 panic("mpt: turbo target reply with " 5149 "null associated ccb moving data"); 5150 /* NOTREACHED */ 5151 } 5152 mpt_lprt(mpt, MPT_PRT_DEBUG, 5153 "TARGET_ASSIST local done\n"); 5154 TAILQ_REMOVE(&mpt->request_pending_list, 5155 tgt->req, links); 5156 mpt_free_request(mpt, tgt->req); 5157 tgt->req = NULL; 5158 mpt_scsi_tgt_status(mpt, NULL, req, 5159 0, NULL, 0); 5160 return (TRUE); 5161 } 5162 tgt->ccb = NULL; 5163 tgt->nxfers++; 5164 mpt_req_untimeout(tgt->req, mpt_timeout, ccb); 5165 mpt_lprt(mpt, MPT_PRT_DEBUG, 5166 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5167 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5168 /* 5169 * Free the Target Assist Request 5170 */ 5171 KASSERT(tgt->req->ccb == ccb, 5172 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5173 tgt->req->serno, tgt->req->ccb)); 5174 TAILQ_REMOVE(&mpt->request_pending_list, 5175 tgt->req, links); 5176 mpt_free_request(mpt, tgt->req); 5177 tgt->req = NULL; 5178 5179 /* 5180 * Do we need to send status now? That is, are 5181 * we done with all our data transfers? 5182 */ 5183 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5184 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5185 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5186 KASSERT(ccb->ccb_h.status, 5187 ("zero ccb sts at %d", __LINE__)); 5188 tgt->state = TGT_STATE_IN_CAM; 5189 if (mpt->outofbeer) { 5190 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5191 mpt->outofbeer = 0; 5192 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5193 } 5194 xpt_done(ccb); 5195 break; 5196 } 5197 /* 5198 * Otherwise, send status (and sense) 5199 */ 5200 mpt_scsi_tgt_status(mpt, ccb, req, 5201 ccb->csio.scsi_status, 5202 (void *)&ccb->csio.sense_data, 5203 (ccb->ccb_h.flags & CAM_SEND_SENSE) ? 5204 ccb->csio.sense_len : 0); 5205 break; 5206 } 5207 case TGT_STATE_SENDING_STATUS: 5208 case TGT_STATE_MOVING_DATA_AND_STATUS: 5209 { 5210 int ioindex; 5211 ccb = tgt->ccb; 5212 5213 if (tgt->req == NULL) { 5214 panic("mpt: turbo target reply with null " 5215 "associated request sending status"); 5216 /* NOTREACHED */ 5217 } 5218 5219 if (ccb) { 5220 tgt->ccb = NULL; 5221 if (tgt->state == 5222 TGT_STATE_MOVING_DATA_AND_STATUS) { 5223 tgt->nxfers++; 5224 } 5225 mpt_req_untimeout(tgt->req, mpt_timeout, ccb); 5226 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5227 ccb->ccb_h.status |= CAM_SENT_SENSE; 5228 } 5229 mpt_lprt(mpt, MPT_PRT_DEBUG, 5230 "TARGET_STATUS tag %x sts %x flgs %x req " 5231 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5232 ccb->ccb_h.flags, tgt->req); 5233 /* 5234 * Free the Target Send Status Request 5235 */ 5236 KASSERT(tgt->req->ccb == ccb, 5237 ("tgt->req %p:%u tgt->req->ccb %p", 5238 tgt->req, tgt->req->serno, tgt->req->ccb)); 5239 /* 5240 * Notify CAM that we're done 5241 */ 5242 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5243 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5244 KASSERT(ccb->ccb_h.status, 5245 ("ZERO ccb sts at %d", __LINE__)); 5246 tgt->ccb = NULL; 5247 } else { 5248 mpt_lprt(mpt, MPT_PRT_DEBUG, 5249 "TARGET_STATUS non-CAM for req %p:%u\n", 5250 tgt->req, tgt->req->serno); 5251 } 5252 TAILQ_REMOVE(&mpt->request_pending_list, 5253 tgt->req, links); 5254 mpt_free_request(mpt, tgt->req); 5255 tgt->req = NULL; 5256 5257 /* 5258 * And re-post the Command Buffer. 5259 * This will reset the state. 5260 */ 5261 ioindex = GET_IO_INDEX(reply_desc); 5262 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5263 tgt->is_local = 0; 5264 mpt_post_target_command(mpt, req, ioindex); 5265 5266 /* 5267 * And post a done for anyone who cares 5268 */ 5269 if (ccb) { 5270 if (mpt->outofbeer) { 5271 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5272 mpt->outofbeer = 0; 5273 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5274 } 5275 xpt_done(ccb); 5276 } 5277 break; 5278 } 5279 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5280 tgt->state = TGT_STATE_LOADED; 5281 break; 5282 default: 5283 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5284 "Reply Function\n", tgt->state); 5285 } 5286 return (TRUE); 5287 } 5288 5289 status = le16toh(reply_frame->IOCStatus); 5290 if (status != MPI_IOCSTATUS_SUCCESS) { 5291 dbg = MPT_PRT_ERROR; 5292 } else { 5293 dbg = MPT_PRT_DEBUG1; 5294 } 5295 5296 mpt_lprt(mpt, dbg, 5297 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5298 req, req->serno, reply_frame, reply_frame->Function, status); 5299 5300 switch (reply_frame->Function) { 5301 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5302 { 5303 mpt_tgt_state_t *tgt; 5304 #ifdef INVARIANTS 5305 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5306 #endif 5307 if (status != MPI_IOCSTATUS_SUCCESS) { 5308 /* 5309 * XXX What to do? 5310 */ 5311 break; 5312 } 5313 tgt = MPT_TGT_STATE(mpt, req); 5314 KASSERT(tgt->state == TGT_STATE_LOADING, 5315 ("bad state 0x%x on reply to buffer post", tgt->state)); 5316 mpt_assign_serno(mpt, req); 5317 tgt->state = TGT_STATE_LOADED; 5318 break; 5319 } 5320 case MPI_FUNCTION_TARGET_ASSIST: 5321 #ifdef INVARIANTS 5322 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5323 #endif 5324 mpt_prt(mpt, "target assist completion\n"); 5325 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5326 mpt_free_request(mpt, req); 5327 break; 5328 case MPI_FUNCTION_TARGET_STATUS_SEND: 5329 #ifdef INVARIANTS 5330 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5331 #endif 5332 mpt_prt(mpt, "status send completion\n"); 5333 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5334 mpt_free_request(mpt, req); 5335 break; 5336 case MPI_FUNCTION_TARGET_MODE_ABORT: 5337 { 5338 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5339 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5340 PTR_MSG_TARGET_MODE_ABORT abtp = 5341 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5342 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5343 #ifdef INVARIANTS 5344 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5345 #endif 5346 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5347 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5348 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5349 mpt_free_request(mpt, req); 5350 break; 5351 } 5352 default: 5353 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5354 "0x%x\n", reply_frame->Function); 5355 break; 5356 } 5357 return (TRUE); 5358 } 5359