1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 */ 96 #include <sys/cdefs.h> 97 __FBSDID("$FreeBSD$"); 98 99 #include <dev/mpt/mpt.h> 100 #include <dev/mpt/mpt_cam.h> 101 #include <dev/mpt/mpt_raid.h> 102 103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/mpt/mpilib/mpi_init.h" 105 #include "dev/mpt/mpilib/mpi_targ.h" 106 #include "dev/mpt/mpilib/mpi_fc.h" 107 #include "dev/mpt/mpilib/mpi_sas.h" 108 109 #include <sys/callout.h> 110 #include <sys/kthread.h> 111 #include <sys/sysctl.h> 112 113 #if __FreeBSD_version >= 700025 114 #ifndef CAM_NEW_TRAN_CODE 115 #define CAM_NEW_TRAN_CODE 1 116 #endif 117 #endif 118 119 static void mpt_poll(struct cam_sim *); 120 static timeout_t mpt_timeout; 121 static void mpt_action(struct cam_sim *, union ccb *); 122 static int 123 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 124 static void mpt_setwidth(struct mpt_softc *, int, int); 125 static void mpt_setsync(struct mpt_softc *, int, int, int); 126 static int mpt_update_spi_config(struct mpt_softc *, int); 127 128 static mpt_reply_handler_t mpt_scsi_reply_handler; 129 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 130 static mpt_reply_handler_t mpt_fc_els_reply_handler; 131 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 132 MSG_DEFAULT_REPLY *); 133 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 134 static int mpt_fc_reset_link(struct mpt_softc *, int); 135 136 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 137 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 138 static void mpt_recovery_thread(void *arg); 139 static void mpt_recover_commands(struct mpt_softc *mpt); 140 141 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 142 u_int, u_int, u_int, int); 143 144 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 145 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 146 static int mpt_add_els_buffers(struct mpt_softc *mpt); 147 static int mpt_add_target_commands(struct mpt_softc *mpt); 148 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 149 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 150 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 151 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 152 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 153 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 154 uint8_t, uint8_t const *); 155 static void 156 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 157 tgt_resource_t *, int); 158 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 159 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 160 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 161 static mpt_reply_handler_t mpt_sata_pass_reply_handler; 162 163 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 164 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 165 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 166 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 167 168 static mpt_probe_handler_t mpt_cam_probe; 169 static mpt_attach_handler_t mpt_cam_attach; 170 static mpt_enable_handler_t mpt_cam_enable; 171 static mpt_ready_handler_t mpt_cam_ready; 172 static mpt_event_handler_t mpt_cam_event; 173 static mpt_reset_handler_t mpt_cam_ioc_reset; 174 static mpt_detach_handler_t mpt_cam_detach; 175 176 static struct mpt_personality mpt_cam_personality = 177 { 178 .name = "mpt_cam", 179 .probe = mpt_cam_probe, 180 .attach = mpt_cam_attach, 181 .enable = mpt_cam_enable, 182 .ready = mpt_cam_ready, 183 .event = mpt_cam_event, 184 .reset = mpt_cam_ioc_reset, 185 .detach = mpt_cam_detach, 186 }; 187 188 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 189 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 190 191 int mpt_enable_sata_wc = -1; 192 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 193 194 static int 195 mpt_cam_probe(struct mpt_softc *mpt) 196 { 197 int role; 198 199 /* 200 * Only attach to nodes that support the initiator or target role 201 * (or want to) or have RAID physical devices that need CAM pass-thru 202 * support. 203 */ 204 if (mpt->do_cfg_role) { 205 role = mpt->cfg_role; 206 } else { 207 role = mpt->role; 208 } 209 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 210 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 211 return (0); 212 } 213 return (ENODEV); 214 } 215 216 static int 217 mpt_cam_attach(struct mpt_softc *mpt) 218 { 219 struct cam_devq *devq; 220 mpt_handler_t handler; 221 int maxq; 222 int error; 223 224 MPT_LOCK(mpt); 225 TAILQ_INIT(&mpt->request_timeout_list); 226 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 227 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 228 229 handler.reply_handler = mpt_scsi_reply_handler; 230 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 231 &scsi_io_handler_id); 232 if (error != 0) { 233 MPT_UNLOCK(mpt); 234 goto cleanup; 235 } 236 237 handler.reply_handler = mpt_scsi_tmf_reply_handler; 238 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 239 &scsi_tmf_handler_id); 240 if (error != 0) { 241 MPT_UNLOCK(mpt); 242 goto cleanup; 243 } 244 245 /* 246 * If we're fibre channel and could support target mode, we register 247 * an ELS reply handler and give it resources. 248 */ 249 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 250 handler.reply_handler = mpt_fc_els_reply_handler; 251 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 252 &fc_els_handler_id); 253 if (error != 0) { 254 MPT_UNLOCK(mpt); 255 goto cleanup; 256 } 257 if (mpt_add_els_buffers(mpt) == FALSE) { 258 error = ENOMEM; 259 MPT_UNLOCK(mpt); 260 goto cleanup; 261 } 262 maxq -= mpt->els_cmds_allocated; 263 } 264 265 /* 266 * If we support target mode, we register a reply handler for it, 267 * but don't add command resources until we actually enable target 268 * mode. 269 */ 270 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 271 handler.reply_handler = mpt_scsi_tgt_reply_handler; 272 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 273 &mpt->scsi_tgt_handler_id); 274 if (error != 0) { 275 MPT_UNLOCK(mpt); 276 goto cleanup; 277 } 278 } 279 280 if (mpt->is_sas) { 281 handler.reply_handler = mpt_sata_pass_reply_handler; 282 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 283 &sata_pass_handler_id); 284 if (error != 0) { 285 MPT_UNLOCK(mpt); 286 goto cleanup; 287 } 288 } 289 290 /* 291 * We keep one request reserved for timeout TMF requests. 292 */ 293 mpt->tmf_req = mpt_get_request(mpt, FALSE); 294 if (mpt->tmf_req == NULL) { 295 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 296 error = ENOMEM; 297 MPT_UNLOCK(mpt); 298 goto cleanup; 299 } 300 301 /* 302 * Mark the request as free even though not on the free list. 303 * There is only one TMF request allowed to be outstanding at 304 * a time and the TMF routines perform their own allocation 305 * tracking using the standard state flags. 306 */ 307 mpt->tmf_req->state = REQ_STATE_FREE; 308 maxq--; 309 310 /* 311 * The rest of this is CAM foo, for which we need to drop our lock 312 */ 313 MPT_UNLOCK(mpt); 314 315 if (mpt_spawn_recovery_thread(mpt) != 0) { 316 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 317 error = ENOMEM; 318 goto cleanup; 319 } 320 321 /* 322 * Create the device queue for our SIM(s). 323 */ 324 devq = cam_simq_alloc(maxq); 325 if (devq == NULL) { 326 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 327 error = ENOMEM; 328 goto cleanup; 329 } 330 331 /* 332 * Construct our SIM entry. 333 */ 334 mpt->sim = 335 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 336 if (mpt->sim == NULL) { 337 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 338 cam_simq_free(devq); 339 error = ENOMEM; 340 goto cleanup; 341 } 342 343 /* 344 * Register exactly this bus. 345 */ 346 MPT_LOCK(mpt); 347 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { 348 mpt_prt(mpt, "Bus registration Failed!\n"); 349 error = ENOMEM; 350 MPT_UNLOCK(mpt); 351 goto cleanup; 352 } 353 354 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 355 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 356 mpt_prt(mpt, "Unable to allocate Path!\n"); 357 error = ENOMEM; 358 MPT_UNLOCK(mpt); 359 goto cleanup; 360 } 361 MPT_UNLOCK(mpt); 362 363 /* 364 * Only register a second bus for RAID physical 365 * devices if the controller supports RAID. 366 */ 367 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 368 return (0); 369 } 370 371 /* 372 * Create a "bus" to export all hidden disks to CAM. 373 */ 374 mpt->phydisk_sim = 375 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 376 if (mpt->phydisk_sim == NULL) { 377 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 378 error = ENOMEM; 379 goto cleanup; 380 } 381 382 /* 383 * Register this bus. 384 */ 385 MPT_LOCK(mpt); 386 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != 387 CAM_SUCCESS) { 388 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 389 error = ENOMEM; 390 MPT_UNLOCK(mpt); 391 goto cleanup; 392 } 393 394 if (xpt_create_path(&mpt->phydisk_path, NULL, 395 cam_sim_path(mpt->phydisk_sim), 396 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 397 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 398 error = ENOMEM; 399 MPT_UNLOCK(mpt); 400 goto cleanup; 401 } 402 MPT_UNLOCK(mpt); 403 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 404 return (0); 405 406 cleanup: 407 mpt_cam_detach(mpt); 408 return (error); 409 } 410 411 /* 412 * Read FC configuration information 413 */ 414 static int 415 mpt_read_config_info_fc(struct mpt_softc *mpt) 416 { 417 struct sysctl_ctx_list *ctx; 418 struct sysctl_oid *tree; 419 char *topology = NULL; 420 int rv; 421 422 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 423 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 424 if (rv) { 425 return (-1); 426 } 427 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 428 mpt->mpt_fcport_page0.Header.PageVersion, 429 mpt->mpt_fcport_page0.Header.PageLength, 430 mpt->mpt_fcport_page0.Header.PageNumber, 431 mpt->mpt_fcport_page0.Header.PageType); 432 433 434 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 435 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 436 if (rv) { 437 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 438 return (-1); 439 } 440 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 441 442 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 443 444 switch (mpt->mpt_fcport_page0.Flags & 445 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 447 mpt->mpt_fcport_speed = 0; 448 topology = "<NO LOOP>"; 449 break; 450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 451 topology = "N-Port"; 452 break; 453 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 454 topology = "NL-Port"; 455 break; 456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 457 topology = "F-Port"; 458 break; 459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 460 topology = "FL-Port"; 461 break; 462 default: 463 mpt->mpt_fcport_speed = 0; 464 topology = "?"; 465 break; 466 } 467 468 mpt_lprt(mpt, MPT_PRT_INFO, 469 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 470 "Speed %u-Gbit\n", topology, 471 mpt->mpt_fcport_page0.WWNN.High, 472 mpt->mpt_fcport_page0.WWNN.Low, 473 mpt->mpt_fcport_page0.WWPN.High, 474 mpt->mpt_fcport_page0.WWPN.Low, 475 mpt->mpt_fcport_speed); 476 MPT_UNLOCK(mpt); 477 ctx = device_get_sysctl_ctx(mpt->dev); 478 tree = device_get_sysctl_tree(mpt->dev); 479 480 snprintf(mpt->scinfo.fc.wwnn, sizeof (mpt->scinfo.fc.wwnn), 481 "0x%08x%08x", mpt->mpt_fcport_page0.WWNN.High, 482 mpt->mpt_fcport_page0.WWNN.Low); 483 484 snprintf(mpt->scinfo.fc.wwpn, sizeof (mpt->scinfo.fc.wwpn), 485 "0x%08x%08x", mpt->mpt_fcport_page0.WWPN.High, 486 mpt->mpt_fcport_page0.WWPN.Low); 487 488 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 489 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 490 "World Wide Node Name"); 491 492 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 493 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 494 "World Wide Port Name"); 495 496 MPT_LOCK(mpt); 497 return (0); 498 } 499 500 /* 501 * Set FC configuration information. 502 */ 503 static int 504 mpt_set_initial_config_fc(struct mpt_softc *mpt) 505 { 506 CONFIG_PAGE_FC_PORT_1 fc; 507 U32 fl; 508 int r, doit = 0; 509 int role; 510 511 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 512 &fc.Header, FALSE, 5000); 513 if (r) { 514 mpt_prt(mpt, "failed to read FC page 1 header\n"); 515 return (mpt_fc_reset_link(mpt, 1)); 516 } 517 518 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 519 &fc.Header, sizeof (fc), FALSE, 5000); 520 if (r) { 521 mpt_prt(mpt, "failed to read FC page 1\n"); 522 return (mpt_fc_reset_link(mpt, 1)); 523 } 524 mpt2host_config_page_fc_port_1(&fc); 525 526 /* 527 * Check our flags to make sure we support the role we want. 528 */ 529 doit = 0; 530 role = 0; 531 fl = fc.Flags; 532 533 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 534 role |= MPT_ROLE_INITIATOR; 535 } 536 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 537 role |= MPT_ROLE_TARGET; 538 } 539 540 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 541 542 if (mpt->do_cfg_role == 0) { 543 role = mpt->cfg_role; 544 } else { 545 mpt->do_cfg_role = 0; 546 } 547 548 if (role != mpt->cfg_role) { 549 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 550 if ((role & MPT_ROLE_INITIATOR) == 0) { 551 mpt_prt(mpt, "adding initiator role\n"); 552 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 553 doit++; 554 } else { 555 mpt_prt(mpt, "keeping initiator role\n"); 556 } 557 } else if (role & MPT_ROLE_INITIATOR) { 558 mpt_prt(mpt, "removing initiator role\n"); 559 doit++; 560 } 561 if (mpt->cfg_role & MPT_ROLE_TARGET) { 562 if ((role & MPT_ROLE_TARGET) == 0) { 563 mpt_prt(mpt, "adding target role\n"); 564 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 565 doit++; 566 } else { 567 mpt_prt(mpt, "keeping target role\n"); 568 } 569 } else if (role & MPT_ROLE_TARGET) { 570 mpt_prt(mpt, "removing target role\n"); 571 doit++; 572 } 573 mpt->role = mpt->cfg_role; 574 } 575 576 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 577 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 578 mpt_prt(mpt, "adding OXID option\n"); 579 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 580 doit++; 581 } 582 } 583 584 if (doit) { 585 fc.Flags = fl; 586 host2mpt_config_page_fc_port_1(&fc); 587 r = mpt_write_cfg_page(mpt, 588 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 589 sizeof(fc), FALSE, 5000); 590 if (r != 0) { 591 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 592 return (0); 593 } 594 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 595 "effect until next reboot or IOC reset\n"); 596 } 597 return (0); 598 } 599 600 static int 601 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 602 { 603 ConfigExtendedPageHeader_t hdr; 604 struct mptsas_phyinfo *phyinfo; 605 SasIOUnitPage0_t *buffer; 606 int error, len, i; 607 608 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 609 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 610 &hdr, 0, 10000); 611 if (error) 612 goto out; 613 if (hdr.ExtPageLength == 0) { 614 error = ENXIO; 615 goto out; 616 } 617 618 len = hdr.ExtPageLength * 4; 619 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 620 if (buffer == NULL) { 621 error = ENOMEM; 622 goto out; 623 } 624 625 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 626 0, &hdr, buffer, len, 0, 10000); 627 if (error) { 628 free(buffer, M_DEVBUF); 629 goto out; 630 } 631 632 portinfo->num_phys = buffer->NumPhys; 633 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * 634 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 635 if (portinfo->phy_info == NULL) { 636 free(buffer, M_DEVBUF); 637 error = ENOMEM; 638 goto out; 639 } 640 641 for (i = 0; i < portinfo->num_phys; i++) { 642 phyinfo = &portinfo->phy_info[i]; 643 phyinfo->phy_num = i; 644 phyinfo->port_id = buffer->PhyData[i].Port; 645 phyinfo->negotiated_link_rate = 646 buffer->PhyData[i].NegotiatedLinkRate; 647 phyinfo->handle = 648 le16toh(buffer->PhyData[i].ControllerDevHandle); 649 } 650 651 free(buffer, M_DEVBUF); 652 out: 653 return (error); 654 } 655 656 static int 657 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 658 uint32_t form, uint32_t form_specific) 659 { 660 ConfigExtendedPageHeader_t hdr; 661 SasPhyPage0_t *buffer; 662 int error; 663 664 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 665 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 666 0, 10000); 667 if (error) 668 goto out; 669 if (hdr.ExtPageLength == 0) { 670 error = ENXIO; 671 goto out; 672 } 673 674 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 675 if (buffer == NULL) { 676 error = ENOMEM; 677 goto out; 678 } 679 680 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 681 form + form_specific, &hdr, buffer, 682 sizeof(SasPhyPage0_t), 0, 10000); 683 if (error) { 684 free(buffer, M_DEVBUF); 685 goto out; 686 } 687 688 phy_info->hw_link_rate = buffer->HwLinkRate; 689 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 690 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 691 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 692 693 free(buffer, M_DEVBUF); 694 out: 695 return (error); 696 } 697 698 static int 699 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 700 uint32_t form, uint32_t form_specific) 701 { 702 ConfigExtendedPageHeader_t hdr; 703 SasDevicePage0_t *buffer; 704 uint64_t sas_address; 705 int error = 0; 706 707 bzero(device_info, sizeof(*device_info)); 708 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 709 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 710 &hdr, 0, 10000); 711 if (error) 712 goto out; 713 if (hdr.ExtPageLength == 0) { 714 error = ENXIO; 715 goto out; 716 } 717 718 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 719 if (buffer == NULL) { 720 error = ENOMEM; 721 goto out; 722 } 723 724 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 725 form + form_specific, &hdr, buffer, 726 sizeof(SasDevicePage0_t), 0, 10000); 727 if (error) { 728 free(buffer, M_DEVBUF); 729 goto out; 730 } 731 732 device_info->dev_handle = le16toh(buffer->DevHandle); 733 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 734 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 735 device_info->slot = le16toh(buffer->Slot); 736 device_info->phy_num = buffer->PhyNum; 737 device_info->physical_port = buffer->PhysicalPort; 738 device_info->target_id = buffer->TargetID; 739 device_info->bus = buffer->Bus; 740 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 741 device_info->sas_address = le64toh(sas_address); 742 device_info->device_info = le32toh(buffer->DeviceInfo); 743 744 free(buffer, M_DEVBUF); 745 out: 746 return (error); 747 } 748 749 /* 750 * Read SAS configuration information. Nothing to do yet. 751 */ 752 static int 753 mpt_read_config_info_sas(struct mpt_softc *mpt) 754 { 755 struct mptsas_portinfo *portinfo; 756 struct mptsas_phyinfo *phyinfo; 757 int error, i; 758 759 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 760 if (portinfo == NULL) 761 return (ENOMEM); 762 763 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 764 if (error) { 765 free(portinfo, M_DEVBUF); 766 return (0); 767 } 768 769 for (i = 0; i < portinfo->num_phys; i++) { 770 phyinfo = &portinfo->phy_info[i]; 771 error = mptsas_sas_phy_pg0(mpt, phyinfo, 772 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 773 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 774 if (error) 775 break; 776 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 777 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 778 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 779 phyinfo->handle); 780 if (error) 781 break; 782 phyinfo->identify.phy_num = phyinfo->phy_num = i; 783 if (phyinfo->attached.dev_handle) 784 error = mptsas_sas_device_pg0(mpt, 785 &phyinfo->attached, 786 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 787 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 788 phyinfo->attached.dev_handle); 789 if (error) 790 break; 791 } 792 mpt->sas_portinfo = portinfo; 793 return (0); 794 } 795 796 static void 797 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 798 int enabled) 799 { 800 SataPassthroughRequest_t *pass; 801 request_t *req; 802 int error, status; 803 804 req = mpt_get_request(mpt, 0); 805 if (req == NULL) 806 return; 807 808 pass = req->req_vbuf; 809 bzero(pass, sizeof(SataPassthroughRequest_t)); 810 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 811 pass->TargetID = devinfo->target_id; 812 pass->Bus = devinfo->bus; 813 pass->PassthroughFlags = 0; 814 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 815 pass->DataLength = 0; 816 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 817 pass->CommandFIS[0] = 0x27; 818 pass->CommandFIS[1] = 0x80; 819 pass->CommandFIS[2] = 0xef; 820 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 821 pass->CommandFIS[7] = 0x40; 822 pass->CommandFIS[15] = 0x08; 823 824 mpt_check_doorbell(mpt); 825 mpt_send_cmd(mpt, req); 826 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 827 10 * 1000); 828 if (error) { 829 mpt_free_request(mpt, req); 830 printf("error %d sending passthrough\n", error); 831 return; 832 } 833 834 status = le16toh(req->IOCStatus); 835 if (status != MPI_IOCSTATUS_SUCCESS) { 836 mpt_free_request(mpt, req); 837 printf("IOCSTATUS %d\n", status); 838 return; 839 } 840 841 mpt_free_request(mpt, req); 842 } 843 844 /* 845 * Set SAS configuration information. Nothing to do yet. 846 */ 847 static int 848 mpt_set_initial_config_sas(struct mpt_softc *mpt) 849 { 850 struct mptsas_phyinfo *phyinfo; 851 int i; 852 853 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 854 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 855 phyinfo = &mpt->sas_portinfo->phy_info[i]; 856 if (phyinfo->attached.dev_handle == 0) 857 continue; 858 if ((phyinfo->attached.device_info & 859 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 860 continue; 861 if (bootverbose) 862 device_printf(mpt->dev, 863 "%sabling SATA WC on phy %d\n", 864 (mpt_enable_sata_wc) ? "En" : "Dis", i); 865 mptsas_set_sata_wc(mpt, &phyinfo->attached, 866 mpt_enable_sata_wc); 867 } 868 } 869 870 return (0); 871 } 872 873 static int 874 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 875 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 876 { 877 878 if (req != NULL) { 879 if (reply_frame != NULL) { 880 req->IOCStatus = le16toh(reply_frame->IOCStatus); 881 } 882 req->state &= ~REQ_STATE_QUEUED; 883 req->state |= REQ_STATE_DONE; 884 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 885 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 886 wakeup(req); 887 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 888 /* 889 * Whew- we can free this request (late completion) 890 */ 891 mpt_free_request(mpt, req); 892 } 893 } 894 895 return (TRUE); 896 } 897 898 /* 899 * Read SCSI configuration information 900 */ 901 static int 902 mpt_read_config_info_spi(struct mpt_softc *mpt) 903 { 904 int rv, i; 905 906 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 907 &mpt->mpt_port_page0.Header, FALSE, 5000); 908 if (rv) { 909 return (-1); 910 } 911 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 912 mpt->mpt_port_page0.Header.PageVersion, 913 mpt->mpt_port_page0.Header.PageLength, 914 mpt->mpt_port_page0.Header.PageNumber, 915 mpt->mpt_port_page0.Header.PageType); 916 917 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 918 &mpt->mpt_port_page1.Header, FALSE, 5000); 919 if (rv) { 920 return (-1); 921 } 922 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 923 mpt->mpt_port_page1.Header.PageVersion, 924 mpt->mpt_port_page1.Header.PageLength, 925 mpt->mpt_port_page1.Header.PageNumber, 926 mpt->mpt_port_page1.Header.PageType); 927 928 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 929 &mpt->mpt_port_page2.Header, FALSE, 5000); 930 if (rv) { 931 return (-1); 932 } 933 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 934 mpt->mpt_port_page2.Header.PageVersion, 935 mpt->mpt_port_page2.Header.PageLength, 936 mpt->mpt_port_page2.Header.PageNumber, 937 mpt->mpt_port_page2.Header.PageType); 938 939 for (i = 0; i < 16; i++) { 940 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 941 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 942 if (rv) { 943 return (-1); 944 } 945 mpt_lprt(mpt, MPT_PRT_DEBUG, 946 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 947 mpt->mpt_dev_page0[i].Header.PageVersion, 948 mpt->mpt_dev_page0[i].Header.PageLength, 949 mpt->mpt_dev_page0[i].Header.PageNumber, 950 mpt->mpt_dev_page0[i].Header.PageType); 951 952 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 953 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 954 if (rv) { 955 return (-1); 956 } 957 mpt_lprt(mpt, MPT_PRT_DEBUG, 958 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 959 mpt->mpt_dev_page1[i].Header.PageVersion, 960 mpt->mpt_dev_page1[i].Header.PageLength, 961 mpt->mpt_dev_page1[i].Header.PageNumber, 962 mpt->mpt_dev_page1[i].Header.PageType); 963 } 964 965 /* 966 * At this point, we don't *have* to fail. As long as we have 967 * valid config header information, we can (barely) lurch 968 * along. 969 */ 970 971 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 972 sizeof(mpt->mpt_port_page0), FALSE, 5000); 973 if (rv) { 974 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 975 } else { 976 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 977 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 978 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 979 mpt->mpt_port_page0.Capabilities, 980 mpt->mpt_port_page0.PhysicalInterface); 981 } 982 983 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 984 sizeof(mpt->mpt_port_page1), FALSE, 5000); 985 if (rv) { 986 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 987 } else { 988 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 989 mpt_lprt(mpt, MPT_PRT_DEBUG, 990 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 991 mpt->mpt_port_page1.Configuration, 992 mpt->mpt_port_page1.OnBusTimerValue); 993 } 994 995 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 996 sizeof(mpt->mpt_port_page2), FALSE, 5000); 997 if (rv) { 998 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 999 } else { 1000 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1001 "Port Page 2: Flags %x Settings %x\n", 1002 mpt->mpt_port_page2.PortFlags, 1003 mpt->mpt_port_page2.PortSettings); 1004 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 1005 for (i = 0; i < 16; i++) { 1006 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1007 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1008 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1009 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1010 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1011 } 1012 } 1013 1014 for (i = 0; i < 16; i++) { 1015 rv = mpt_read_cur_cfg_page(mpt, i, 1016 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1017 FALSE, 5000); 1018 if (rv) { 1019 mpt_prt(mpt, 1020 "cannot read SPI Target %d Device Page 0\n", i); 1021 continue; 1022 } 1023 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 1024 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1025 "target %d page 0: Negotiated Params %x Information %x\n", 1026 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1027 mpt->mpt_dev_page0[i].Information); 1028 1029 rv = mpt_read_cur_cfg_page(mpt, i, 1030 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1031 FALSE, 5000); 1032 if (rv) { 1033 mpt_prt(mpt, 1034 "cannot read SPI Target %d Device Page 1\n", i); 1035 continue; 1036 } 1037 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 1038 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1039 "target %d page 1: Requested Params %x Configuration %x\n", 1040 i, mpt->mpt_dev_page1[i].RequestedParameters, 1041 mpt->mpt_dev_page1[i].Configuration); 1042 } 1043 return (0); 1044 } 1045 1046 /* 1047 * Validate SPI configuration information. 1048 * 1049 * In particular, validate SPI Port Page 1. 1050 */ 1051 static int 1052 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1053 { 1054 int error, i, pp1val; 1055 1056 mpt->mpt_disc_enable = 0xff; 1057 mpt->mpt_tag_enable = 0; 1058 1059 pp1val = ((1 << mpt->mpt_ini_id) << 1060 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; 1061 if (mpt->mpt_port_page1.Configuration != pp1val) { 1062 CONFIG_PAGE_SCSI_PORT_1 tmp; 1063 1064 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1065 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 1066 tmp = mpt->mpt_port_page1; 1067 tmp.Configuration = pp1val; 1068 host2mpt_config_page_scsi_port_1(&tmp); 1069 error = mpt_write_cur_cfg_page(mpt, 0, 1070 &tmp.Header, sizeof(tmp), FALSE, 5000); 1071 if (error) { 1072 return (-1); 1073 } 1074 error = mpt_read_cur_cfg_page(mpt, 0, 1075 &tmp.Header, sizeof(tmp), FALSE, 5000); 1076 if (error) { 1077 return (-1); 1078 } 1079 mpt2host_config_page_scsi_port_1(&tmp); 1080 if (tmp.Configuration != pp1val) { 1081 mpt_prt(mpt, 1082 "failed to reset SPI Port Page 1 Config value\n"); 1083 return (-1); 1084 } 1085 mpt->mpt_port_page1 = tmp; 1086 } 1087 1088 /* 1089 * The purpose of this exercise is to get 1090 * all targets back to async/narrow. 1091 * 1092 * We skip this step if the BIOS has already negotiated 1093 * speeds with the targets. 1094 */ 1095 i = mpt->mpt_port_page2.PortSettings & 1096 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1097 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 1098 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1099 "honoring BIOS transfer negotiations\n"); 1100 } else { 1101 for (i = 0; i < 16; i++) { 1102 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1103 mpt->mpt_dev_page1[i].Configuration = 0; 1104 (void) mpt_update_spi_config(mpt, i); 1105 } 1106 } 1107 return (0); 1108 } 1109 1110 static int 1111 mpt_cam_enable(struct mpt_softc *mpt) 1112 { 1113 int error; 1114 1115 MPT_LOCK(mpt); 1116 1117 error = EIO; 1118 if (mpt->is_fc) { 1119 if (mpt_read_config_info_fc(mpt)) { 1120 goto out; 1121 } 1122 if (mpt_set_initial_config_fc(mpt)) { 1123 goto out; 1124 } 1125 } else if (mpt->is_sas) { 1126 if (mpt_read_config_info_sas(mpt)) { 1127 goto out; 1128 } 1129 if (mpt_set_initial_config_sas(mpt)) { 1130 goto out; 1131 } 1132 } else if (mpt->is_spi) { 1133 if (mpt_read_config_info_spi(mpt)) { 1134 goto out; 1135 } 1136 if (mpt_set_initial_config_spi(mpt)) { 1137 goto out; 1138 } 1139 } 1140 error = 0; 1141 1142 out: 1143 MPT_UNLOCK(mpt); 1144 return (error); 1145 } 1146 1147 static void 1148 mpt_cam_ready(struct mpt_softc *mpt) 1149 { 1150 1151 /* 1152 * If we're in target mode, hang out resources now 1153 * so we don't cause the world to hang talking to us. 1154 */ 1155 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1156 /* 1157 * Try to add some target command resources 1158 */ 1159 MPT_LOCK(mpt); 1160 if (mpt_add_target_commands(mpt) == FALSE) { 1161 mpt_prt(mpt, "failed to add target commands\n"); 1162 } 1163 MPT_UNLOCK(mpt); 1164 } 1165 mpt->ready = 1; 1166 } 1167 1168 static void 1169 mpt_cam_detach(struct mpt_softc *mpt) 1170 { 1171 mpt_handler_t handler; 1172 1173 MPT_LOCK(mpt); 1174 mpt->ready = 0; 1175 mpt_terminate_recovery_thread(mpt); 1176 1177 handler.reply_handler = mpt_scsi_reply_handler; 1178 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1179 scsi_io_handler_id); 1180 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1181 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1182 scsi_tmf_handler_id); 1183 handler.reply_handler = mpt_fc_els_reply_handler; 1184 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1185 fc_els_handler_id); 1186 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1188 mpt->scsi_tgt_handler_id); 1189 handler.reply_handler = mpt_sata_pass_reply_handler; 1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1191 sata_pass_handler_id); 1192 1193 if (mpt->tmf_req != NULL) { 1194 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1195 mpt_free_request(mpt, mpt->tmf_req); 1196 mpt->tmf_req = NULL; 1197 } 1198 if (mpt->sas_portinfo != NULL) { 1199 free(mpt->sas_portinfo, M_DEVBUF); 1200 mpt->sas_portinfo = NULL; 1201 } 1202 1203 if (mpt->sim != NULL) { 1204 xpt_free_path(mpt->path); 1205 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1206 cam_sim_free(mpt->sim, TRUE); 1207 mpt->sim = NULL; 1208 } 1209 1210 if (mpt->phydisk_sim != NULL) { 1211 xpt_free_path(mpt->phydisk_path); 1212 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1213 cam_sim_free(mpt->phydisk_sim, TRUE); 1214 mpt->phydisk_sim = NULL; 1215 } 1216 MPT_UNLOCK(mpt); 1217 } 1218 1219 /* This routine is used after a system crash to dump core onto the swap device. 1220 */ 1221 static void 1222 mpt_poll(struct cam_sim *sim) 1223 { 1224 struct mpt_softc *mpt; 1225 1226 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1227 mpt_intr(mpt); 1228 } 1229 1230 /* 1231 * Watchdog timeout routine for SCSI requests. 1232 */ 1233 static void 1234 mpt_timeout(void *arg) 1235 { 1236 union ccb *ccb; 1237 struct mpt_softc *mpt; 1238 request_t *req; 1239 1240 ccb = (union ccb *)arg; 1241 mpt = ccb->ccb_h.ccb_mpt_ptr; 1242 1243 MPT_LOCK_ASSERT(mpt); 1244 req = ccb->ccb_h.ccb_req_ptr; 1245 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1246 req->serno, ccb, req->ccb); 1247 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1248 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1249 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1250 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1251 req->state |= REQ_STATE_TIMEDOUT; 1252 mpt_wakeup_recovery_thread(mpt); 1253 } 1254 } 1255 1256 /* 1257 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 1258 * 1259 * Takes a list of physical segments and builds the SGL for SCSI IO command 1260 * and forwards the commard to the IOC after one last check that CAM has not 1261 * aborted the transaction. 1262 */ 1263 static void 1264 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1265 { 1266 request_t *req, *trq; 1267 char *mpt_off; 1268 union ccb *ccb; 1269 struct mpt_softc *mpt; 1270 bus_addr_t chain_list_addr; 1271 int first_lim, seg, this_seg_lim; 1272 uint32_t addr, cur_off, flags, nxt_off, tf; 1273 void *sglp = NULL; 1274 MSG_REQUEST_HEADER *hdrp; 1275 SGE_SIMPLE64 *se; 1276 SGE_CHAIN64 *ce; 1277 int istgt = 0; 1278 1279 req = (request_t *)arg; 1280 ccb = req->ccb; 1281 1282 mpt = ccb->ccb_h.ccb_mpt_ptr; 1283 req = ccb->ccb_h.ccb_req_ptr; 1284 1285 hdrp = req->req_vbuf; 1286 mpt_off = req->req_vbuf; 1287 1288 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1289 error = EFBIG; 1290 } 1291 1292 if (error == 0) { 1293 switch (hdrp->Function) { 1294 case MPI_FUNCTION_SCSI_IO_REQUEST: 1295 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1296 istgt = 0; 1297 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1298 break; 1299 case MPI_FUNCTION_TARGET_ASSIST: 1300 istgt = 1; 1301 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1302 break; 1303 default: 1304 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1305 hdrp->Function); 1306 error = EINVAL; 1307 break; 1308 } 1309 } 1310 1311 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1312 error = EFBIG; 1313 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1314 nseg, mpt->max_seg_cnt); 1315 } 1316 1317 bad: 1318 if (error != 0) { 1319 if (error != EFBIG && error != ENOMEM) { 1320 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1321 } 1322 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1323 cam_status status; 1324 mpt_freeze_ccb(ccb); 1325 if (error == EFBIG) { 1326 status = CAM_REQ_TOO_BIG; 1327 } else if (error == ENOMEM) { 1328 if (mpt->outofbeer == 0) { 1329 mpt->outofbeer = 1; 1330 xpt_freeze_simq(mpt->sim, 1); 1331 mpt_lprt(mpt, MPT_PRT_DEBUG, 1332 "FREEZEQ\n"); 1333 } 1334 status = CAM_REQUEUE_REQ; 1335 } else { 1336 status = CAM_REQ_CMP_ERR; 1337 } 1338 mpt_set_ccb_status(ccb, status); 1339 } 1340 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1341 request_t *cmd_req = 1342 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1343 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1344 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1345 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1346 } 1347 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1348 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1349 xpt_done(ccb); 1350 mpt_free_request(mpt, req); 1351 return; 1352 } 1353 1354 /* 1355 * No data to transfer? 1356 * Just make a single simple SGL with zero length. 1357 */ 1358 1359 if (mpt->verbose >= MPT_PRT_DEBUG) { 1360 int tidx = ((char *)sglp) - mpt_off; 1361 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1362 } 1363 1364 if (nseg == 0) { 1365 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1366 MPI_pSGE_SET_FLAGS(se1, 1367 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1368 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1369 se1->FlagsLength = htole32(se1->FlagsLength); 1370 goto out; 1371 } 1372 1373 1374 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1375 if (istgt == 0) { 1376 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1377 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1378 } 1379 } else { 1380 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1381 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1382 } 1383 } 1384 1385 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1386 bus_dmasync_op_t op; 1387 if (istgt == 0) { 1388 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1389 op = BUS_DMASYNC_PREREAD; 1390 } else { 1391 op = BUS_DMASYNC_PREWRITE; 1392 } 1393 } else { 1394 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1395 op = BUS_DMASYNC_PREWRITE; 1396 } else { 1397 op = BUS_DMASYNC_PREREAD; 1398 } 1399 } 1400 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1401 } 1402 1403 /* 1404 * Okay, fill in what we can at the end of the command frame. 1405 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1406 * the command frame. 1407 * 1408 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1409 * SIMPLE64 pointers and start doing CHAIN64 entries after 1410 * that. 1411 */ 1412 1413 if (nseg < MPT_NSGL_FIRST(mpt)) { 1414 first_lim = nseg; 1415 } else { 1416 /* 1417 * Leave room for CHAIN element 1418 */ 1419 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1420 } 1421 1422 se = (SGE_SIMPLE64 *) sglp; 1423 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1424 tf = flags; 1425 memset(se, 0, sizeof (*se)); 1426 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1427 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1428 if (sizeof(bus_addr_t) > 4) { 1429 addr = ((uint64_t)dm_segs->ds_addr) >> 32; 1430 /* SAS1078 36GB limitation WAR */ 1431 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + 1432 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { 1433 addr |= (1 << 31); 1434 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 1435 } 1436 se->Address.High = htole32(addr); 1437 } 1438 if (seg == first_lim - 1) { 1439 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1440 } 1441 if (seg == nseg - 1) { 1442 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1443 MPI_SGE_FLAGS_END_OF_BUFFER; 1444 } 1445 MPI_pSGE_SET_FLAGS(se, tf); 1446 se->FlagsLength = htole32(se->FlagsLength); 1447 } 1448 1449 if (seg == nseg) { 1450 goto out; 1451 } 1452 1453 /* 1454 * Tell the IOC where to find the first chain element. 1455 */ 1456 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1457 nxt_off = MPT_RQSL(mpt); 1458 trq = req; 1459 1460 /* 1461 * Make up the rest of the data segments out of a chain element 1462 * (contained in the current request frame) which points to 1463 * SIMPLE64 elements in the next request frame, possibly ending 1464 * with *another* chain element (if there's more). 1465 */ 1466 while (seg < nseg) { 1467 /* 1468 * Point to the chain descriptor. Note that the chain 1469 * descriptor is at the end of the *previous* list (whether 1470 * chain or simple). 1471 */ 1472 ce = (SGE_CHAIN64 *) se; 1473 1474 /* 1475 * Before we change our current pointer, make sure we won't 1476 * overflow the request area with this frame. Note that we 1477 * test against 'greater than' here as it's okay in this case 1478 * to have next offset be just outside the request area. 1479 */ 1480 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1481 nxt_off = MPT_REQUEST_AREA; 1482 goto next_chain; 1483 } 1484 1485 /* 1486 * Set our SGE element pointer to the beginning of the chain 1487 * list and update our next chain list offset. 1488 */ 1489 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1490 cur_off = nxt_off; 1491 nxt_off += MPT_RQSL(mpt); 1492 1493 /* 1494 * Now initialize the chain descriptor. 1495 */ 1496 memset(ce, 0, sizeof (*ce)); 1497 1498 /* 1499 * Get the physical address of the chain list. 1500 */ 1501 chain_list_addr = trq->req_pbuf; 1502 chain_list_addr += cur_off; 1503 if (sizeof (bus_addr_t) > 4) { 1504 ce->Address.High = 1505 htole32(((uint64_t)chain_list_addr) >> 32); 1506 } 1507 ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 1508 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1509 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1510 1511 /* 1512 * If we have more than a frame's worth of segments left, 1513 * set up the chain list to have the last element be another 1514 * chain descriptor. 1515 */ 1516 if ((nseg - seg) > MPT_NSGL(mpt)) { 1517 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1518 /* 1519 * The length of the chain is the length in bytes of the 1520 * number of segments plus the next chain element. 1521 * 1522 * The next chain descriptor offset is the length, 1523 * in words, of the number of segments. 1524 */ 1525 ce->Length = (this_seg_lim - seg) * 1526 sizeof (SGE_SIMPLE64); 1527 ce->NextChainOffset = ce->Length >> 2; 1528 ce->Length += sizeof (SGE_CHAIN64); 1529 } else { 1530 this_seg_lim = nseg; 1531 ce->Length = (this_seg_lim - seg) * 1532 sizeof (SGE_SIMPLE64); 1533 } 1534 ce->Length = htole16(ce->Length); 1535 1536 /* 1537 * Fill in the chain list SGE elements with our segment data. 1538 * 1539 * If we're the last element in this chain list, set the last 1540 * element flag. If we're the completely last element period, 1541 * set the end of list and end of buffer flags. 1542 */ 1543 while (seg < this_seg_lim) { 1544 tf = flags; 1545 memset(se, 0, sizeof (*se)); 1546 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1547 se->Address.Low = htole32(dm_segs->ds_addr & 1548 0xffffffff); 1549 if (sizeof (bus_addr_t) > 4) { 1550 addr = ((uint64_t)dm_segs->ds_addr) >> 32; 1551 /* SAS1078 36GB limitation WAR */ 1552 if (mpt->is_1078 && 1553 (((uint64_t)dm_segs->ds_addr + 1554 MPI_SGE_LENGTH(se->FlagsLength)) >> 1555 32) == 9) { 1556 addr |= (1 << 31); 1557 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 1558 } 1559 se->Address.High = htole32(addr); 1560 } 1561 if (seg == this_seg_lim - 1) { 1562 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1563 } 1564 if (seg == nseg - 1) { 1565 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1566 MPI_SGE_FLAGS_END_OF_BUFFER; 1567 } 1568 MPI_pSGE_SET_FLAGS(se, tf); 1569 se->FlagsLength = htole32(se->FlagsLength); 1570 se++; 1571 seg++; 1572 dm_segs++; 1573 } 1574 1575 next_chain: 1576 /* 1577 * If we have more segments to do and we've used up all of 1578 * the space in a request area, go allocate another one 1579 * and chain to that. 1580 */ 1581 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1582 request_t *nrq; 1583 1584 nrq = mpt_get_request(mpt, FALSE); 1585 1586 if (nrq == NULL) { 1587 error = ENOMEM; 1588 goto bad; 1589 } 1590 1591 /* 1592 * Append the new request area on the tail of our list. 1593 */ 1594 if ((trq = req->chain) == NULL) { 1595 req->chain = nrq; 1596 } else { 1597 while (trq->chain != NULL) { 1598 trq = trq->chain; 1599 } 1600 trq->chain = nrq; 1601 } 1602 trq = nrq; 1603 mpt_off = trq->req_vbuf; 1604 if (mpt->verbose >= MPT_PRT_DEBUG) { 1605 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1606 } 1607 nxt_off = 0; 1608 } 1609 } 1610 out: 1611 1612 /* 1613 * Last time we need to check if this CCB needs to be aborted. 1614 */ 1615 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1616 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1617 request_t *cmd_req = 1618 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1619 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1620 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1621 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1622 } 1623 mpt_prt(mpt, 1624 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1625 ccb->ccb_h.status & CAM_STATUS_MASK); 1626 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1627 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1628 } 1629 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1630 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1631 xpt_done(ccb); 1632 mpt_free_request(mpt, req); 1633 return; 1634 } 1635 1636 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1637 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1638 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 1639 mpt_timeout, ccb); 1640 } 1641 if (mpt->verbose > MPT_PRT_DEBUG) { 1642 int nc = 0; 1643 mpt_print_request(req->req_vbuf); 1644 for (trq = req->chain; trq; trq = trq->chain) { 1645 printf(" Additional Chain Area %d\n", nc++); 1646 mpt_dump_sgl(trq->req_vbuf, 0); 1647 } 1648 } 1649 1650 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1651 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1652 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1653 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1654 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1655 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1656 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1657 } else { 1658 tgt->state = TGT_STATE_MOVING_DATA; 1659 } 1660 #else 1661 tgt->state = TGT_STATE_MOVING_DATA; 1662 #endif 1663 } 1664 mpt_send_cmd(mpt, req); 1665 } 1666 1667 static void 1668 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1669 { 1670 request_t *req, *trq; 1671 char *mpt_off; 1672 union ccb *ccb; 1673 struct mpt_softc *mpt; 1674 int seg, first_lim; 1675 uint32_t flags, nxt_off; 1676 void *sglp = NULL; 1677 MSG_REQUEST_HEADER *hdrp; 1678 SGE_SIMPLE32 *se; 1679 SGE_CHAIN32 *ce; 1680 int istgt = 0; 1681 1682 req = (request_t *)arg; 1683 ccb = req->ccb; 1684 1685 mpt = ccb->ccb_h.ccb_mpt_ptr; 1686 req = ccb->ccb_h.ccb_req_ptr; 1687 1688 hdrp = req->req_vbuf; 1689 mpt_off = req->req_vbuf; 1690 1691 1692 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1693 error = EFBIG; 1694 } 1695 1696 if (error == 0) { 1697 switch (hdrp->Function) { 1698 case MPI_FUNCTION_SCSI_IO_REQUEST: 1699 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1700 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1701 break; 1702 case MPI_FUNCTION_TARGET_ASSIST: 1703 istgt = 1; 1704 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1705 break; 1706 default: 1707 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1708 hdrp->Function); 1709 error = EINVAL; 1710 break; 1711 } 1712 } 1713 1714 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1715 error = EFBIG; 1716 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1717 nseg, mpt->max_seg_cnt); 1718 } 1719 1720 bad: 1721 if (error != 0) { 1722 if (error != EFBIG && error != ENOMEM) { 1723 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1724 } 1725 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1726 cam_status status; 1727 mpt_freeze_ccb(ccb); 1728 if (error == EFBIG) { 1729 status = CAM_REQ_TOO_BIG; 1730 } else if (error == ENOMEM) { 1731 if (mpt->outofbeer == 0) { 1732 mpt->outofbeer = 1; 1733 xpt_freeze_simq(mpt->sim, 1); 1734 mpt_lprt(mpt, MPT_PRT_DEBUG, 1735 "FREEZEQ\n"); 1736 } 1737 status = CAM_REQUEUE_REQ; 1738 } else { 1739 status = CAM_REQ_CMP_ERR; 1740 } 1741 mpt_set_ccb_status(ccb, status); 1742 } 1743 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1744 request_t *cmd_req = 1745 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1746 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1747 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1748 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1749 } 1750 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1751 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1752 xpt_done(ccb); 1753 mpt_free_request(mpt, req); 1754 return; 1755 } 1756 1757 /* 1758 * No data to transfer? 1759 * Just make a single simple SGL with zero length. 1760 */ 1761 1762 if (mpt->verbose >= MPT_PRT_DEBUG) { 1763 int tidx = ((char *)sglp) - mpt_off; 1764 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1765 } 1766 1767 if (nseg == 0) { 1768 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1769 MPI_pSGE_SET_FLAGS(se1, 1770 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1771 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1772 se1->FlagsLength = htole32(se1->FlagsLength); 1773 goto out; 1774 } 1775 1776 1777 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1778 if (istgt == 0) { 1779 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1780 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1781 } 1782 } else { 1783 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1784 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1785 } 1786 } 1787 1788 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1789 bus_dmasync_op_t op; 1790 if (istgt) { 1791 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1792 op = BUS_DMASYNC_PREREAD; 1793 } else { 1794 op = BUS_DMASYNC_PREWRITE; 1795 } 1796 } else { 1797 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1798 op = BUS_DMASYNC_PREWRITE; 1799 } else { 1800 op = BUS_DMASYNC_PREREAD; 1801 } 1802 } 1803 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1804 } 1805 1806 /* 1807 * Okay, fill in what we can at the end of the command frame. 1808 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1809 * the command frame. 1810 * 1811 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1812 * SIMPLE32 pointers and start doing CHAIN32 entries after 1813 * that. 1814 */ 1815 1816 if (nseg < MPT_NSGL_FIRST(mpt)) { 1817 first_lim = nseg; 1818 } else { 1819 /* 1820 * Leave room for CHAIN element 1821 */ 1822 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1823 } 1824 1825 se = (SGE_SIMPLE32 *) sglp; 1826 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1827 uint32_t tf; 1828 1829 memset(se, 0,sizeof (*se)); 1830 se->Address = htole32(dm_segs->ds_addr); 1831 1832 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1833 tf = flags; 1834 if (seg == first_lim - 1) { 1835 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1836 } 1837 if (seg == nseg - 1) { 1838 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1839 MPI_SGE_FLAGS_END_OF_BUFFER; 1840 } 1841 MPI_pSGE_SET_FLAGS(se, tf); 1842 se->FlagsLength = htole32(se->FlagsLength); 1843 } 1844 1845 if (seg == nseg) { 1846 goto out; 1847 } 1848 1849 /* 1850 * Tell the IOC where to find the first chain element. 1851 */ 1852 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1853 nxt_off = MPT_RQSL(mpt); 1854 trq = req; 1855 1856 /* 1857 * Make up the rest of the data segments out of a chain element 1858 * (contained in the current request frame) which points to 1859 * SIMPLE32 elements in the next request frame, possibly ending 1860 * with *another* chain element (if there's more). 1861 */ 1862 while (seg < nseg) { 1863 int this_seg_lim; 1864 uint32_t tf, cur_off; 1865 bus_addr_t chain_list_addr; 1866 1867 /* 1868 * Point to the chain descriptor. Note that the chain 1869 * descriptor is at the end of the *previous* list (whether 1870 * chain or simple). 1871 */ 1872 ce = (SGE_CHAIN32 *) se; 1873 1874 /* 1875 * Before we change our current pointer, make sure we won't 1876 * overflow the request area with this frame. Note that we 1877 * test against 'greater than' here as it's okay in this case 1878 * to have next offset be just outside the request area. 1879 */ 1880 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1881 nxt_off = MPT_REQUEST_AREA; 1882 goto next_chain; 1883 } 1884 1885 /* 1886 * Set our SGE element pointer to the beginning of the chain 1887 * list and update our next chain list offset. 1888 */ 1889 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1890 cur_off = nxt_off; 1891 nxt_off += MPT_RQSL(mpt); 1892 1893 /* 1894 * Now initialize the chain descriptor. 1895 */ 1896 memset(ce, 0, sizeof (*ce)); 1897 1898 /* 1899 * Get the physical address of the chain list. 1900 */ 1901 chain_list_addr = trq->req_pbuf; 1902 chain_list_addr += cur_off; 1903 1904 1905 1906 ce->Address = htole32(chain_list_addr); 1907 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1908 1909 1910 /* 1911 * If we have more than a frame's worth of segments left, 1912 * set up the chain list to have the last element be another 1913 * chain descriptor. 1914 */ 1915 if ((nseg - seg) > MPT_NSGL(mpt)) { 1916 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1917 /* 1918 * The length of the chain is the length in bytes of the 1919 * number of segments plus the next chain element. 1920 * 1921 * The next chain descriptor offset is the length, 1922 * in words, of the number of segments. 1923 */ 1924 ce->Length = (this_seg_lim - seg) * 1925 sizeof (SGE_SIMPLE32); 1926 ce->NextChainOffset = ce->Length >> 2; 1927 ce->Length += sizeof (SGE_CHAIN32); 1928 } else { 1929 this_seg_lim = nseg; 1930 ce->Length = (this_seg_lim - seg) * 1931 sizeof (SGE_SIMPLE32); 1932 } 1933 ce->Length = htole16(ce->Length); 1934 1935 /* 1936 * Fill in the chain list SGE elements with our segment data. 1937 * 1938 * If we're the last element in this chain list, set the last 1939 * element flag. If we're the completely last element period, 1940 * set the end of list and end of buffer flags. 1941 */ 1942 while (seg < this_seg_lim) { 1943 memset(se, 0, sizeof (*se)); 1944 se->Address = htole32(dm_segs->ds_addr); 1945 1946 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1947 tf = flags; 1948 if (seg == this_seg_lim - 1) { 1949 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1950 } 1951 if (seg == nseg - 1) { 1952 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1953 MPI_SGE_FLAGS_END_OF_BUFFER; 1954 } 1955 MPI_pSGE_SET_FLAGS(se, tf); 1956 se->FlagsLength = htole32(se->FlagsLength); 1957 se++; 1958 seg++; 1959 dm_segs++; 1960 } 1961 1962 next_chain: 1963 /* 1964 * If we have more segments to do and we've used up all of 1965 * the space in a request area, go allocate another one 1966 * and chain to that. 1967 */ 1968 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1969 request_t *nrq; 1970 1971 nrq = mpt_get_request(mpt, FALSE); 1972 1973 if (nrq == NULL) { 1974 error = ENOMEM; 1975 goto bad; 1976 } 1977 1978 /* 1979 * Append the new request area on the tail of our list. 1980 */ 1981 if ((trq = req->chain) == NULL) { 1982 req->chain = nrq; 1983 } else { 1984 while (trq->chain != NULL) { 1985 trq = trq->chain; 1986 } 1987 trq->chain = nrq; 1988 } 1989 trq = nrq; 1990 mpt_off = trq->req_vbuf; 1991 if (mpt->verbose >= MPT_PRT_DEBUG) { 1992 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1993 } 1994 nxt_off = 0; 1995 } 1996 } 1997 out: 1998 1999 /* 2000 * Last time we need to check if this CCB needs to be aborted. 2001 */ 2002 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2003 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2004 request_t *cmd_req = 2005 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2006 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2007 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2008 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2009 } 2010 mpt_prt(mpt, 2011 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2012 ccb->ccb_h.status & CAM_STATUS_MASK); 2013 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2014 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2015 } 2016 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2017 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 2018 xpt_done(ccb); 2019 mpt_free_request(mpt, req); 2020 return; 2021 } 2022 2023 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2024 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2025 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 2026 mpt_timeout, ccb); 2027 } 2028 if (mpt->verbose > MPT_PRT_DEBUG) { 2029 int nc = 0; 2030 mpt_print_request(req->req_vbuf); 2031 for (trq = req->chain; trq; trq = trq->chain) { 2032 printf(" Additional Chain Area %d\n", nc++); 2033 mpt_dump_sgl(trq->req_vbuf, 0); 2034 } 2035 } 2036 2037 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2038 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2039 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2040 #ifdef WE_TRUST_AUTO_GOOD_STATUS 2041 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2042 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2043 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2044 } else { 2045 tgt->state = TGT_STATE_MOVING_DATA; 2046 } 2047 #else 2048 tgt->state = TGT_STATE_MOVING_DATA; 2049 #endif 2050 } 2051 mpt_send_cmd(mpt, req); 2052 } 2053 2054 static void 2055 mpt_start(struct cam_sim *sim, union ccb *ccb) 2056 { 2057 request_t *req; 2058 struct mpt_softc *mpt; 2059 MSG_SCSI_IO_REQUEST *mpt_req; 2060 struct ccb_scsiio *csio = &ccb->csio; 2061 struct ccb_hdr *ccbh = &ccb->ccb_h; 2062 bus_dmamap_callback_t *cb; 2063 target_id_t tgt; 2064 int raid_passthru; 2065 2066 /* Get the pointer for the physical addapter */ 2067 mpt = ccb->ccb_h.ccb_mpt_ptr; 2068 raid_passthru = (sim == mpt->phydisk_sim); 2069 2070 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2071 if (mpt->outofbeer == 0) { 2072 mpt->outofbeer = 1; 2073 xpt_freeze_simq(mpt->sim, 1); 2074 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2075 } 2076 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2077 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2078 xpt_done(ccb); 2079 return; 2080 } 2081 #ifdef INVARIANTS 2082 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2083 #endif 2084 2085 if (sizeof (bus_addr_t) > 4) { 2086 cb = mpt_execute_req_a64; 2087 } else { 2088 cb = mpt_execute_req; 2089 } 2090 2091 /* 2092 * Link the ccb and the request structure so we can find 2093 * the other knowing either the request or the ccb 2094 */ 2095 req->ccb = ccb; 2096 ccb->ccb_h.ccb_req_ptr = req; 2097 2098 /* Now we build the command for the IOC */ 2099 mpt_req = req->req_vbuf; 2100 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2101 2102 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2103 if (raid_passthru) { 2104 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2105 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2106 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2107 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2108 xpt_done(ccb); 2109 return; 2110 } 2111 mpt_req->Bus = 0; /* we never set bus here */ 2112 } else { 2113 tgt = ccb->ccb_h.target_id; 2114 mpt_req->Bus = 0; /* XXX */ 2115 2116 } 2117 mpt_req->SenseBufferLength = 2118 (csio->sense_len < MPT_SENSE_SIZE) ? 2119 csio->sense_len : MPT_SENSE_SIZE; 2120 2121 /* 2122 * We use the message context to find the request structure when we 2123 * Get the command completion interrupt from the IOC. 2124 */ 2125 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2126 2127 /* Which physical device to do the I/O on */ 2128 mpt_req->TargetID = tgt; 2129 2130 /* We assume a single level LUN type */ 2131 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { 2132 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 2133 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 2134 } else { 2135 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 2136 } 2137 2138 /* Set the direction of the transfer */ 2139 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2140 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2141 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2142 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2143 } else { 2144 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2145 } 2146 2147 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2148 switch(ccb->csio.tag_action) { 2149 case MSG_HEAD_OF_Q_TAG: 2150 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2151 break; 2152 case MSG_ACA_TASK: 2153 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2154 break; 2155 case MSG_ORDERED_Q_TAG: 2156 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2157 break; 2158 case MSG_SIMPLE_Q_TAG: 2159 default: 2160 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2161 break; 2162 } 2163 } else { 2164 if (mpt->is_fc || mpt->is_sas) { 2165 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2166 } else { 2167 /* XXX No such thing for a target doing packetized. */ 2168 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2169 } 2170 } 2171 2172 if (mpt->is_spi) { 2173 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2174 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2175 } 2176 } 2177 mpt_req->Control = htole32(mpt_req->Control); 2178 2179 /* Copy the scsi command block into place */ 2180 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2181 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2182 } else { 2183 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2184 } 2185 2186 mpt_req->CDBLength = csio->cdb_len; 2187 mpt_req->DataLength = htole32(csio->dxfer_len); 2188 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2189 2190 /* 2191 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2192 */ 2193 if (mpt->verbose == MPT_PRT_DEBUG) { 2194 U32 df; 2195 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2196 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2197 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2198 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2199 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2200 mpt_prtc(mpt, "(%s %u byte%s ", 2201 (df == MPI_SCSIIO_CONTROL_READ)? 2202 "read" : "write", csio->dxfer_len, 2203 (csio->dxfer_len == 1)? ")" : "s)"); 2204 } 2205 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 2206 ccb->ccb_h.target_lun, req, req->serno); 2207 } 2208 2209 /* 2210 * If we have any data to send with this command map it into bus space. 2211 */ 2212 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2213 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 2214 /* 2215 * We've been given a pointer to a single buffer. 2216 */ 2217 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 2218 /* 2219 * Virtual address that needs to translated into 2220 * one or more physical address ranges. 2221 */ 2222 int error; 2223 int s = splsoftvm(); 2224 error = bus_dmamap_load(mpt->buffer_dmat, 2225 req->dmap, csio->data_ptr, csio->dxfer_len, 2226 cb, req, 0); 2227 splx(s); 2228 if (error == EINPROGRESS) { 2229 /* 2230 * So as to maintain ordering, 2231 * freeze the controller queue 2232 * until our mapping is 2233 * returned. 2234 */ 2235 xpt_freeze_simq(mpt->sim, 1); 2236 ccbh->status |= CAM_RELEASE_SIMQ; 2237 } 2238 } else { 2239 /* 2240 * We have been given a pointer to single 2241 * physical buffer. 2242 */ 2243 struct bus_dma_segment seg; 2244 seg.ds_addr = 2245 (bus_addr_t)(vm_offset_t)csio->data_ptr; 2246 seg.ds_len = csio->dxfer_len; 2247 (*cb)(req, &seg, 1, 0); 2248 } 2249 } else { 2250 /* 2251 * We have been given a list of addresses. 2252 * This case could be easily supported but they are not 2253 * currently generated by the CAM subsystem so there 2254 * is no point in wasting the time right now. 2255 */ 2256 struct bus_dma_segment *segs; 2257 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 2258 (*cb)(req, NULL, 0, EFAULT); 2259 } else { 2260 /* Just use the segments provided */ 2261 segs = (struct bus_dma_segment *)csio->data_ptr; 2262 (*cb)(req, segs, csio->sglist_cnt, 0); 2263 } 2264 } 2265 } else { 2266 (*cb)(req, NULL, 0, 0); 2267 } 2268 } 2269 2270 static int 2271 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2272 int sleep_ok) 2273 { 2274 int error; 2275 uint16_t status; 2276 uint8_t response; 2277 2278 error = mpt_scsi_send_tmf(mpt, 2279 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2280 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2281 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2282 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2283 0, /* XXX How do I get the channel ID? */ 2284 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2285 lun != CAM_LUN_WILDCARD ? lun : 0, 2286 0, sleep_ok); 2287 2288 if (error != 0) { 2289 /* 2290 * mpt_scsi_send_tmf hard resets on failure, so no 2291 * need to do so here. 2292 */ 2293 mpt_prt(mpt, 2294 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2295 return (EIO); 2296 } 2297 2298 /* Wait for bus reset to be processed by the IOC. */ 2299 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2300 REQ_STATE_DONE, sleep_ok, 5000); 2301 2302 status = le16toh(mpt->tmf_req->IOCStatus); 2303 response = mpt->tmf_req->ResponseCode; 2304 mpt->tmf_req->state = REQ_STATE_FREE; 2305 2306 if (error) { 2307 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2308 "Resetting controller.\n"); 2309 mpt_reset(mpt, TRUE); 2310 return (ETIMEDOUT); 2311 } 2312 2313 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2314 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2315 "Resetting controller.\n", status); 2316 mpt_reset(mpt, TRUE); 2317 return (EIO); 2318 } 2319 2320 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2321 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2322 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2323 "Resetting controller.\n", response); 2324 mpt_reset(mpt, TRUE); 2325 return (EIO); 2326 } 2327 return (0); 2328 } 2329 2330 static int 2331 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2332 { 2333 int r = 0; 2334 request_t *req; 2335 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2336 2337 req = mpt_get_request(mpt, FALSE); 2338 if (req == NULL) { 2339 return (ENOMEM); 2340 } 2341 fc = req->req_vbuf; 2342 memset(fc, 0, sizeof(*fc)); 2343 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2344 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2345 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2346 mpt_send_cmd(mpt, req); 2347 if (dowait) { 2348 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2349 REQ_STATE_DONE, FALSE, 60 * 1000); 2350 if (r == 0) { 2351 mpt_free_request(mpt, req); 2352 } 2353 } 2354 return (r); 2355 } 2356 2357 static int 2358 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2359 MSG_EVENT_NOTIFY_REPLY *msg) 2360 { 2361 uint32_t data0, data1; 2362 2363 data0 = le32toh(msg->Data[0]); 2364 data1 = le32toh(msg->Data[1]); 2365 switch(msg->Event & 0xFF) { 2366 case MPI_EVENT_UNIT_ATTENTION: 2367 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2368 (data0 >> 8) & 0xff, data0 & 0xff); 2369 break; 2370 2371 case MPI_EVENT_IOC_BUS_RESET: 2372 /* We generated a bus reset */ 2373 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2374 (data0 >> 8) & 0xff); 2375 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2376 break; 2377 2378 case MPI_EVENT_EXT_BUS_RESET: 2379 /* Someone else generated a bus reset */ 2380 mpt_prt(mpt, "External Bus Reset Detected\n"); 2381 /* 2382 * These replies don't return EventData like the MPI 2383 * spec says they do 2384 */ 2385 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2386 break; 2387 2388 case MPI_EVENT_RESCAN: 2389 #if __FreeBSD_version >= 600000 2390 { 2391 union ccb *ccb; 2392 uint32_t pathid; 2393 /* 2394 * In general this means a device has been added to the loop. 2395 */ 2396 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2397 if (mpt->ready == 0) { 2398 break; 2399 } 2400 if (mpt->phydisk_sim) { 2401 pathid = cam_sim_path(mpt->phydisk_sim); 2402 } else { 2403 pathid = cam_sim_path(mpt->sim); 2404 } 2405 /* 2406 * Allocate a CCB, create a wildcard path for this bus, 2407 * and schedule a rescan. 2408 */ 2409 ccb = xpt_alloc_ccb_nowait(); 2410 if (ccb == NULL) { 2411 mpt_prt(mpt, "unable to alloc CCB for rescan\n"); 2412 break; 2413 } 2414 2415 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 2416 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2417 mpt_prt(mpt, "unable to create path for rescan\n"); 2418 xpt_free_ccb(ccb); 2419 break; 2420 } 2421 xpt_rescan(ccb); 2422 break; 2423 } 2424 #else 2425 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2426 break; 2427 #endif 2428 case MPI_EVENT_LINK_STATUS_CHANGE: 2429 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2430 (data1 >> 8) & 0xff, 2431 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2432 break; 2433 2434 case MPI_EVENT_LOOP_STATE_CHANGE: 2435 switch ((data0 >> 16) & 0xff) { 2436 case 0x01: 2437 mpt_prt(mpt, 2438 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2439 "(Loop Initialization)\n", 2440 (data1 >> 8) & 0xff, 2441 (data0 >> 8) & 0xff, 2442 (data0 ) & 0xff); 2443 switch ((data0 >> 8) & 0xff) { 2444 case 0xF7: 2445 if ((data0 & 0xff) == 0xF7) { 2446 mpt_prt(mpt, "Device needs AL_PA\n"); 2447 } else { 2448 mpt_prt(mpt, "Device %02x doesn't like " 2449 "FC performance\n", 2450 data0 & 0xFF); 2451 } 2452 break; 2453 case 0xF8: 2454 if ((data0 & 0xff) == 0xF7) { 2455 mpt_prt(mpt, "Device had loop failure " 2456 "at its receiver prior to acquiring" 2457 " AL_PA\n"); 2458 } else { 2459 mpt_prt(mpt, "Device %02x detected loop" 2460 " failure at its receiver\n", 2461 data0 & 0xFF); 2462 } 2463 break; 2464 default: 2465 mpt_prt(mpt, "Device %02x requests that device " 2466 "%02x reset itself\n", 2467 data0 & 0xFF, 2468 (data0 >> 8) & 0xFF); 2469 break; 2470 } 2471 break; 2472 case 0x02: 2473 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2474 "LPE(%02x,%02x) (Loop Port Enable)\n", 2475 (data1 >> 8) & 0xff, /* Port */ 2476 (data0 >> 8) & 0xff, /* Character 3 */ 2477 (data0 ) & 0xff /* Character 4 */); 2478 break; 2479 case 0x03: 2480 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2481 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2482 (data1 >> 8) & 0xff, /* Port */ 2483 (data0 >> 8) & 0xff, /* Character 3 */ 2484 (data0 ) & 0xff /* Character 4 */); 2485 break; 2486 default: 2487 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2488 "FC event (%02x %02x %02x)\n", 2489 (data1 >> 8) & 0xff, /* Port */ 2490 (data0 >> 16) & 0xff, /* Event */ 2491 (data0 >> 8) & 0xff, /* Character 3 */ 2492 (data0 ) & 0xff /* Character 4 */); 2493 } 2494 break; 2495 2496 case MPI_EVENT_LOGOUT: 2497 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2498 (data1 >> 8) & 0xff, data0); 2499 break; 2500 case MPI_EVENT_QUEUE_FULL: 2501 { 2502 struct cam_sim *sim; 2503 struct cam_path *tmppath; 2504 struct ccb_relsim crs; 2505 PTR_EVENT_DATA_QUEUE_FULL pqf; 2506 lun_id_t lun_id; 2507 2508 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 2509 pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 2510 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2511 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2512 if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 2513 pqf->TargetID) != 0) { 2514 sim = mpt->phydisk_sim; 2515 } else { 2516 sim = mpt->sim; 2517 } 2518 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2519 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2520 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2521 mpt_prt(mpt, "unable to create a path to send " 2522 "XPT_REL_SIMQ"); 2523 break; 2524 } 2525 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2526 crs.ccb_h.func_code = XPT_REL_SIMQ; 2527 crs.ccb_h.flags = CAM_DEV_QFREEZE; 2528 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2529 crs.openings = pqf->CurrentDepth - 1; 2530 xpt_action((union ccb *)&crs); 2531 if (crs.ccb_h.status != CAM_REQ_CMP) { 2532 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2533 } 2534 xpt_free_path(tmppath); 2535 } 2536 break; 2537 } 2538 case MPI_EVENT_IR_RESYNC_UPDATE: 2539 mpt_prt(mpt, "IR resync update %d completed\n", 2540 (data0 >> 16) & 0xff); 2541 break; 2542 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2543 { 2544 union ccb *ccb; 2545 struct cam_sim *sim; 2546 struct cam_path *tmppath; 2547 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc; 2548 2549 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data; 2550 if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 2551 psdsc->TargetID) != 0) 2552 sim = mpt->phydisk_sim; 2553 else 2554 sim = mpt->sim; 2555 switch(psdsc->ReasonCode) { 2556 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 2557 ccb = xpt_alloc_ccb_nowait(); 2558 if (ccb == NULL) { 2559 mpt_prt(mpt, 2560 "unable to alloc CCB for rescan\n"); 2561 break; 2562 } 2563 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 2564 cam_sim_path(sim), psdsc->TargetID, 2565 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2566 mpt_prt(mpt, 2567 "unable to create path for rescan\n"); 2568 xpt_free_ccb(ccb); 2569 break; 2570 } 2571 xpt_rescan(ccb); 2572 break; 2573 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 2574 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2575 psdsc->TargetID, CAM_LUN_WILDCARD) != 2576 CAM_REQ_CMP) { 2577 mpt_prt(mpt, 2578 "unable to create path for async event"); 2579 break; 2580 } 2581 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2582 xpt_free_path(tmppath); 2583 break; 2584 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: 2585 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL: 2586 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 2587 break; 2588 default: 2589 mpt_lprt(mpt, MPT_PRT_WARN, 2590 "SAS device status change: Bus: 0x%02x TargetID: " 2591 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus, 2592 psdsc->TargetID, psdsc->ReasonCode); 2593 break; 2594 } 2595 break; 2596 } 2597 case MPI_EVENT_SAS_DISCOVERY_ERROR: 2598 { 2599 PTR_EVENT_DATA_DISCOVERY_ERROR pde; 2600 2601 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data; 2602 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus); 2603 mpt_lprt(mpt, MPT_PRT_WARN, 2604 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n", 2605 pde->Port, pde->DiscoveryStatus); 2606 break; 2607 } 2608 case MPI_EVENT_EVENT_CHANGE: 2609 case MPI_EVENT_INTEGRATED_RAID: 2610 case MPI_EVENT_IR2: 2611 case MPI_EVENT_LOG_ENTRY_ADDED: 2612 case MPI_EVENT_SAS_DISCOVERY: 2613 case MPI_EVENT_SAS_PHY_LINK_STATUS: 2614 case MPI_EVENT_SAS_SES: 2615 break; 2616 default: 2617 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2618 msg->Event & 0xFF); 2619 return (0); 2620 } 2621 return (1); 2622 } 2623 2624 /* 2625 * Reply path for all SCSI I/O requests, called from our 2626 * interrupt handler by extracting our handler index from 2627 * the MsgContext field of the reply from the IOC. 2628 * 2629 * This routine is optimized for the common case of a 2630 * completion without error. All exception handling is 2631 * offloaded to non-inlined helper routines to minimize 2632 * cache footprint. 2633 */ 2634 static int 2635 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2636 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2637 { 2638 MSG_SCSI_IO_REQUEST *scsi_req; 2639 union ccb *ccb; 2640 2641 if (req->state == REQ_STATE_FREE) { 2642 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2643 return (TRUE); 2644 } 2645 2646 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2647 ccb = req->ccb; 2648 if (ccb == NULL) { 2649 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2650 req, req->serno); 2651 return (TRUE); 2652 } 2653 2654 mpt_req_untimeout(req, mpt_timeout, ccb); 2655 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2656 2657 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2658 bus_dmasync_op_t op; 2659 2660 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2661 op = BUS_DMASYNC_POSTREAD; 2662 else 2663 op = BUS_DMASYNC_POSTWRITE; 2664 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2665 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2666 } 2667 2668 if (reply_frame == NULL) { 2669 /* 2670 * Context only reply, completion without error status. 2671 */ 2672 ccb->csio.resid = 0; 2673 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2674 ccb->csio.scsi_status = SCSI_STATUS_OK; 2675 } else { 2676 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2677 } 2678 2679 if (mpt->outofbeer) { 2680 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2681 mpt->outofbeer = 0; 2682 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2683 } 2684 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2685 struct scsi_inquiry_data *iq = 2686 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2687 if (scsi_req->Function == 2688 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2689 /* 2690 * Fake out the device type so that only the 2691 * pass-thru device will attach. 2692 */ 2693 iq->device &= ~0x1F; 2694 iq->device |= T_NODEVICE; 2695 } 2696 } 2697 if (mpt->verbose == MPT_PRT_DEBUG) { 2698 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2699 req, req->serno); 2700 } 2701 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 2702 xpt_done(ccb); 2703 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2704 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2705 } else { 2706 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2707 req, req->serno); 2708 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2709 } 2710 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2711 ("CCB req needed wakeup")); 2712 #ifdef INVARIANTS 2713 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2714 #endif 2715 mpt_free_request(mpt, req); 2716 return (TRUE); 2717 } 2718 2719 static int 2720 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2721 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2722 { 2723 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2724 2725 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2726 #ifdef INVARIANTS 2727 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2728 #endif 2729 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2730 /* Record IOC Status and Response Code of TMF for any waiters. */ 2731 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2732 req->ResponseCode = tmf_reply->ResponseCode; 2733 2734 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2735 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2736 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2737 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2738 req->state |= REQ_STATE_DONE; 2739 wakeup(req); 2740 } else { 2741 mpt->tmf_req->state = REQ_STATE_FREE; 2742 } 2743 return (TRUE); 2744 } 2745 2746 /* 2747 * XXX: Move to definitions file 2748 */ 2749 #define ELS 0x22 2750 #define FC4LS 0x32 2751 #define ABTS 0x81 2752 #define BA_ACC 0x84 2753 2754 #define LS_RJT 0x01 2755 #define LS_ACC 0x02 2756 #define PLOGI 0x03 2757 #define LOGO 0x05 2758 #define SRR 0x14 2759 #define PRLI 0x20 2760 #define PRLO 0x21 2761 #define ADISC 0x52 2762 #define RSCN 0x61 2763 2764 static void 2765 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2766 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2767 { 2768 uint32_t fl; 2769 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2770 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2771 2772 /* 2773 * We are going to reuse the ELS request to send this response back. 2774 */ 2775 rsp = &tmp; 2776 memset(rsp, 0, sizeof(*rsp)); 2777 2778 #ifdef USE_IMMEDIATE_LINK_DATA 2779 /* 2780 * Apparently the IMMEDIATE stuff doesn't seem to work. 2781 */ 2782 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2783 #endif 2784 rsp->RspLength = length; 2785 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2786 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2787 2788 /* 2789 * Copy over information from the original reply frame to 2790 * it's correct place in the response. 2791 */ 2792 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2793 2794 /* 2795 * And now copy back the temporary area to the original frame. 2796 */ 2797 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2798 rsp = req->req_vbuf; 2799 2800 #ifdef USE_IMMEDIATE_LINK_DATA 2801 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2802 #else 2803 { 2804 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2805 bus_addr_t paddr = req->req_pbuf; 2806 paddr += MPT_RQSL(mpt); 2807 2808 fl = 2809 MPI_SGE_FLAGS_HOST_TO_IOC | 2810 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2811 MPI_SGE_FLAGS_LAST_ELEMENT | 2812 MPI_SGE_FLAGS_END_OF_LIST | 2813 MPI_SGE_FLAGS_END_OF_BUFFER; 2814 fl <<= MPI_SGE_FLAGS_SHIFT; 2815 fl |= (length); 2816 se->FlagsLength = htole32(fl); 2817 se->Address = htole32((uint32_t) paddr); 2818 } 2819 #endif 2820 2821 /* 2822 * Send it on... 2823 */ 2824 mpt_send_cmd(mpt, req); 2825 } 2826 2827 static int 2828 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2829 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2830 { 2831 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2832 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2833 U8 rctl; 2834 U8 type; 2835 U8 cmd; 2836 U16 status = le16toh(reply_frame->IOCStatus); 2837 U32 *elsbuf; 2838 int ioindex; 2839 int do_refresh = TRUE; 2840 2841 #ifdef INVARIANTS 2842 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2843 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2844 req, req->serno, rp->Function)); 2845 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2846 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2847 } else { 2848 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2849 } 2850 #endif 2851 mpt_lprt(mpt, MPT_PRT_DEBUG, 2852 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2853 req, req->serno, reply_frame, reply_frame->Function); 2854 2855 if (status != MPI_IOCSTATUS_SUCCESS) { 2856 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2857 status, reply_frame->Function); 2858 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2859 /* 2860 * XXX: to get around shutdown issue 2861 */ 2862 mpt->disabled = 1; 2863 return (TRUE); 2864 } 2865 return (TRUE); 2866 } 2867 2868 /* 2869 * If the function of a link service response, we recycle the 2870 * response to be a refresh for a new link service request. 2871 * 2872 * The request pointer is bogus in this case and we have to fetch 2873 * it based upon the TransactionContext. 2874 */ 2875 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2876 /* Freddie Uncle Charlie Katie */ 2877 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2878 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2879 if (mpt->els_cmd_ptrs[ioindex] == req) { 2880 break; 2881 } 2882 2883 KASSERT(ioindex < mpt->els_cmds_allocated, 2884 ("can't find my mommie!")); 2885 2886 /* remove from active list as we're going to re-post it */ 2887 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2888 req->state &= ~REQ_STATE_QUEUED; 2889 req->state |= REQ_STATE_DONE; 2890 mpt_fc_post_els(mpt, req, ioindex); 2891 return (TRUE); 2892 } 2893 2894 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2895 /* remove from active list as we're done */ 2896 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2897 req->state &= ~REQ_STATE_QUEUED; 2898 req->state |= REQ_STATE_DONE; 2899 if (req->state & REQ_STATE_TIMEDOUT) { 2900 mpt_lprt(mpt, MPT_PRT_DEBUG, 2901 "Sync Primitive Send Completed After Timeout\n"); 2902 mpt_free_request(mpt, req); 2903 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2904 mpt_lprt(mpt, MPT_PRT_DEBUG, 2905 "Async Primitive Send Complete\n"); 2906 mpt_free_request(mpt, req); 2907 } else { 2908 mpt_lprt(mpt, MPT_PRT_DEBUG, 2909 "Sync Primitive Send Complete- Waking Waiter\n"); 2910 wakeup(req); 2911 } 2912 return (TRUE); 2913 } 2914 2915 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2916 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2917 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2918 rp->MsgLength, rp->MsgFlags); 2919 return (TRUE); 2920 } 2921 2922 if (rp->MsgLength <= 5) { 2923 /* 2924 * This is just a ack of an original ELS buffer post 2925 */ 2926 mpt_lprt(mpt, MPT_PRT_DEBUG, 2927 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2928 return (TRUE); 2929 } 2930 2931 2932 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2933 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2934 2935 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2936 cmd = be32toh(elsbuf[0]) >> 24; 2937 2938 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2939 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2940 return (TRUE); 2941 } 2942 2943 ioindex = le32toh(rp->TransactionContext); 2944 req = mpt->els_cmd_ptrs[ioindex]; 2945 2946 if (rctl == ELS && type == 1) { 2947 switch (cmd) { 2948 case PRLI: 2949 /* 2950 * Send back a PRLI ACC 2951 */ 2952 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2953 le32toh(rp->Wwn.PortNameHigh), 2954 le32toh(rp->Wwn.PortNameLow)); 2955 elsbuf[0] = htobe32(0x02100014); 2956 elsbuf[1] |= htobe32(0x00000100); 2957 elsbuf[4] = htobe32(0x00000002); 2958 if (mpt->role & MPT_ROLE_TARGET) 2959 elsbuf[4] |= htobe32(0x00000010); 2960 if (mpt->role & MPT_ROLE_INITIATOR) 2961 elsbuf[4] |= htobe32(0x00000020); 2962 /* remove from active list as we're done */ 2963 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2964 req->state &= ~REQ_STATE_QUEUED; 2965 req->state |= REQ_STATE_DONE; 2966 mpt_fc_els_send_response(mpt, req, rp, 20); 2967 do_refresh = FALSE; 2968 break; 2969 case PRLO: 2970 memset(elsbuf, 0, 5 * (sizeof (U32))); 2971 elsbuf[0] = htobe32(0x02100014); 2972 elsbuf[1] = htobe32(0x08000100); 2973 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2974 le32toh(rp->Wwn.PortNameHigh), 2975 le32toh(rp->Wwn.PortNameLow)); 2976 /* remove from active list as we're done */ 2977 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2978 req->state &= ~REQ_STATE_QUEUED; 2979 req->state |= REQ_STATE_DONE; 2980 mpt_fc_els_send_response(mpt, req, rp, 20); 2981 do_refresh = FALSE; 2982 break; 2983 default: 2984 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2985 break; 2986 } 2987 } else if (rctl == ABTS && type == 0) { 2988 uint16_t rx_id = le16toh(rp->Rxid); 2989 uint16_t ox_id = le16toh(rp->Oxid); 2990 request_t *tgt_req = NULL; 2991 2992 mpt_prt(mpt, 2993 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2994 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2995 le32toh(rp->Wwn.PortNameLow)); 2996 if (rx_id >= mpt->mpt_max_tgtcmds) { 2997 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2998 } else if (mpt->tgt_cmd_ptrs == NULL) { 2999 mpt_prt(mpt, "No TGT CMD PTRS\n"); 3000 } else { 3001 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 3002 } 3003 if (tgt_req) { 3004 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 3005 union ccb *ccb; 3006 uint32_t ct_id; 3007 3008 /* 3009 * Check to make sure we have the correct command 3010 * The reply descriptor in the target state should 3011 * should contain an IoIndex that should match the 3012 * RX_ID. 3013 * 3014 * It'd be nice to have OX_ID to crosscheck with 3015 * as well. 3016 */ 3017 ct_id = GET_IO_INDEX(tgt->reply_desc); 3018 3019 if (ct_id != rx_id) { 3020 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 3021 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 3022 rx_id, ct_id); 3023 goto skip; 3024 } 3025 3026 ccb = tgt->ccb; 3027 if (ccb) { 3028 mpt_prt(mpt, 3029 "CCB (%p): lun %u flags %x status %x\n", 3030 ccb, ccb->ccb_h.target_lun, 3031 ccb->ccb_h.flags, ccb->ccb_h.status); 3032 } 3033 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 3034 "%x nxfers %x\n", tgt->state, 3035 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 3036 tgt->nxfers); 3037 skip: 3038 if (mpt_abort_target_cmd(mpt, tgt_req)) { 3039 mpt_prt(mpt, "unable to start TargetAbort\n"); 3040 } 3041 } else { 3042 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 3043 } 3044 memset(elsbuf, 0, 5 * (sizeof (U32))); 3045 elsbuf[0] = htobe32(0); 3046 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 3047 elsbuf[2] = htobe32(0x000ffff); 3048 /* 3049 * Dork with the reply frame so that the response to it 3050 * will be correct. 3051 */ 3052 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 3053 /* remove from active list as we're done */ 3054 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3055 req->state &= ~REQ_STATE_QUEUED; 3056 req->state |= REQ_STATE_DONE; 3057 mpt_fc_els_send_response(mpt, req, rp, 12); 3058 do_refresh = FALSE; 3059 } else { 3060 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 3061 } 3062 if (do_refresh == TRUE) { 3063 /* remove from active list as we're done */ 3064 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3065 req->state &= ~REQ_STATE_QUEUED; 3066 req->state |= REQ_STATE_DONE; 3067 mpt_fc_post_els(mpt, req, ioindex); 3068 } 3069 return (TRUE); 3070 } 3071 3072 /* 3073 * Clean up all SCSI Initiator personality state in response 3074 * to a controller reset. 3075 */ 3076 static void 3077 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3078 { 3079 3080 /* 3081 * The pending list is already run down by 3082 * the generic handler. Perform the same 3083 * operation on the timed out request list. 3084 */ 3085 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3086 MPI_IOCSTATUS_INVALID_STATE); 3087 3088 /* 3089 * XXX: We need to repost ELS and Target Command Buffers? 3090 */ 3091 3092 /* 3093 * Inform the XPT that a bus reset has occurred. 3094 */ 3095 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3096 } 3097 3098 /* 3099 * Parse additional completion information in the reply 3100 * frame for SCSI I/O requests. 3101 */ 3102 static int 3103 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3104 MSG_DEFAULT_REPLY *reply_frame) 3105 { 3106 union ccb *ccb; 3107 MSG_SCSI_IO_REPLY *scsi_io_reply; 3108 u_int ioc_status; 3109 u_int sstate; 3110 3111 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3112 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3113 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3114 ("MPT SCSI I/O Handler called with incorrect reply type")); 3115 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3116 ("MPT SCSI I/O Handler called with continuation reply")); 3117 3118 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3119 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3120 ioc_status &= MPI_IOCSTATUS_MASK; 3121 sstate = scsi_io_reply->SCSIState; 3122 3123 ccb = req->ccb; 3124 ccb->csio.resid = 3125 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3126 3127 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3128 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3129 uint32_t sense_returned; 3130 3131 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3132 3133 sense_returned = le32toh(scsi_io_reply->SenseCount); 3134 if (sense_returned < ccb->csio.sense_len) 3135 ccb->csio.sense_resid = ccb->csio.sense_len - 3136 sense_returned; 3137 else 3138 ccb->csio.sense_resid = 0; 3139 3140 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); 3141 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3142 min(ccb->csio.sense_len, sense_returned)); 3143 } 3144 3145 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3146 /* 3147 * Tag messages rejected, but non-tagged retry 3148 * was successful. 3149 XXXX 3150 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3151 */ 3152 } 3153 3154 switch(ioc_status) { 3155 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3156 /* 3157 * XXX 3158 * Linux driver indicates that a zero 3159 * transfer length with this error code 3160 * indicates a CRC error. 3161 * 3162 * No need to swap the bytes for checking 3163 * against zero. 3164 */ 3165 if (scsi_io_reply->TransferCount == 0) { 3166 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3167 break; 3168 } 3169 /* FALLTHROUGH */ 3170 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3171 case MPI_IOCSTATUS_SUCCESS: 3172 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3173 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3174 /* 3175 * Status was never returned for this transaction. 3176 */ 3177 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3178 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3179 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3180 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3181 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3182 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3183 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3184 3185 /* XXX Handle SPI-Packet and FCP-2 response info. */ 3186 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3187 } else 3188 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3189 break; 3190 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3191 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3192 break; 3193 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3194 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3195 break; 3196 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3197 /* 3198 * Since selection timeouts and "device really not 3199 * there" are grouped into this error code, report 3200 * selection timeout. Selection timeouts are 3201 * typically retried before giving up on the device 3202 * whereas "device not there" errors are considered 3203 * unretryable. 3204 */ 3205 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3206 break; 3207 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3208 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3209 break; 3210 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3211 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3212 break; 3213 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3214 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3215 break; 3216 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3217 ccb->ccb_h.status = CAM_UA_TERMIO; 3218 break; 3219 case MPI_IOCSTATUS_INVALID_STATE: 3220 /* 3221 * The IOC has been reset. Emulate a bus reset. 3222 */ 3223 /* FALLTHROUGH */ 3224 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3225 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3226 break; 3227 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3228 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3229 /* 3230 * Don't clobber any timeout status that has 3231 * already been set for this transaction. We 3232 * want the SCSI layer to be able to differentiate 3233 * between the command we aborted due to timeout 3234 * and any innocent bystanders. 3235 */ 3236 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3237 break; 3238 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3239 break; 3240 3241 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3242 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3243 break; 3244 case MPI_IOCSTATUS_BUSY: 3245 mpt_set_ccb_status(ccb, CAM_BUSY); 3246 break; 3247 case MPI_IOCSTATUS_INVALID_FUNCTION: 3248 case MPI_IOCSTATUS_INVALID_SGL: 3249 case MPI_IOCSTATUS_INTERNAL_ERROR: 3250 case MPI_IOCSTATUS_INVALID_FIELD: 3251 default: 3252 /* XXX 3253 * Some of the above may need to kick 3254 * of a recovery action!!!! 3255 */ 3256 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3257 break; 3258 } 3259 3260 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3261 mpt_freeze_ccb(ccb); 3262 } 3263 3264 return (TRUE); 3265 } 3266 3267 static void 3268 mpt_action(struct cam_sim *sim, union ccb *ccb) 3269 { 3270 struct mpt_softc *mpt; 3271 struct ccb_trans_settings *cts; 3272 target_id_t tgt; 3273 lun_id_t lun; 3274 int raid_passthru; 3275 3276 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3277 3278 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3279 raid_passthru = (sim == mpt->phydisk_sim); 3280 MPT_LOCK_ASSERT(mpt); 3281 3282 tgt = ccb->ccb_h.target_id; 3283 lun = ccb->ccb_h.target_lun; 3284 if (raid_passthru && 3285 ccb->ccb_h.func_code != XPT_PATH_INQ && 3286 ccb->ccb_h.func_code != XPT_RESET_BUS && 3287 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3288 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3289 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3290 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3291 xpt_done(ccb); 3292 return; 3293 } 3294 } 3295 ccb->ccb_h.ccb_mpt_ptr = mpt; 3296 3297 switch (ccb->ccb_h.func_code) { 3298 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3299 /* 3300 * Do a couple of preliminary checks... 3301 */ 3302 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3303 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3304 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3305 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3306 break; 3307 } 3308 } 3309 /* Max supported CDB length is 16 bytes */ 3310 /* XXX Unless we implement the new 32byte message type */ 3311 if (ccb->csio.cdb_len > 3312 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3313 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3314 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3315 break; 3316 } 3317 #ifdef MPT_TEST_MULTIPATH 3318 if (mpt->failure_id == ccb->ccb_h.target_id) { 3319 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3320 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3321 break; 3322 } 3323 #endif 3324 ccb->csio.scsi_status = SCSI_STATUS_OK; 3325 mpt_start(sim, ccb); 3326 return; 3327 3328 case XPT_RESET_BUS: 3329 if (raid_passthru) { 3330 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3331 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3332 break; 3333 } 3334 case XPT_RESET_DEV: 3335 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3336 if (bootverbose) { 3337 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3338 } 3339 } else { 3340 xpt_print(ccb->ccb_h.path, "reset device\n"); 3341 } 3342 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3343 3344 /* 3345 * mpt_bus_reset is always successful in that it 3346 * will fall back to a hard reset should a bus 3347 * reset attempt fail. 3348 */ 3349 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3350 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3351 break; 3352 3353 case XPT_ABORT: 3354 { 3355 union ccb *accb = ccb->cab.abort_ccb; 3356 switch (accb->ccb_h.func_code) { 3357 case XPT_ACCEPT_TARGET_IO: 3358 case XPT_IMMEDIATE_NOTIFY: 3359 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3360 break; 3361 case XPT_CONT_TARGET_IO: 3362 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3363 ccb->ccb_h.status = CAM_UA_ABORT; 3364 break; 3365 case XPT_SCSI_IO: 3366 ccb->ccb_h.status = CAM_UA_ABORT; 3367 break; 3368 default: 3369 ccb->ccb_h.status = CAM_REQ_INVALID; 3370 break; 3371 } 3372 break; 3373 } 3374 3375 #ifdef CAM_NEW_TRAN_CODE 3376 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3377 #else 3378 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) 3379 #endif 3380 #define DP_DISC_ENABLE 0x1 3381 #define DP_DISC_DISABL 0x2 3382 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3383 3384 #define DP_TQING_ENABLE 0x4 3385 #define DP_TQING_DISABL 0x8 3386 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3387 3388 #define DP_WIDE 0x10 3389 #define DP_NARROW 0x20 3390 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3391 3392 #define DP_SYNC 0x40 3393 3394 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3395 { 3396 #ifdef CAM_NEW_TRAN_CODE 3397 struct ccb_trans_settings_scsi *scsi; 3398 struct ccb_trans_settings_spi *spi; 3399 #endif 3400 uint8_t dval; 3401 u_int period; 3402 u_int offset; 3403 int i, j; 3404 3405 cts = &ccb->cts; 3406 3407 if (mpt->is_fc || mpt->is_sas) { 3408 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3409 break; 3410 } 3411 3412 #ifdef CAM_NEW_TRAN_CODE 3413 scsi = &cts->proto_specific.scsi; 3414 spi = &cts->xport_specific.spi; 3415 3416 /* 3417 * We can be called just to valid transport and proto versions 3418 */ 3419 if (scsi->valid == 0 && spi->valid == 0) { 3420 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3421 break; 3422 } 3423 #endif 3424 3425 /* 3426 * Skip attempting settings on RAID volume disks. 3427 * Other devices on the bus get the normal treatment. 3428 */ 3429 if (mpt->phydisk_sim && raid_passthru == 0 && 3430 mpt_is_raid_volume(mpt, tgt) != 0) { 3431 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3432 "no transfer settings for RAID vols\n"); 3433 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3434 break; 3435 } 3436 3437 i = mpt->mpt_port_page2.PortSettings & 3438 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3439 j = mpt->mpt_port_page2.PortFlags & 3440 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3441 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3442 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3443 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3444 "honoring BIOS transfer negotiations\n"); 3445 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3446 break; 3447 } 3448 3449 dval = 0; 3450 period = 0; 3451 offset = 0; 3452 3453 #ifndef CAM_NEW_TRAN_CODE 3454 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 3455 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? 3456 DP_DISC_ENABLE : DP_DISC_DISABL; 3457 } 3458 3459 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 3460 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? 3461 DP_TQING_ENABLE : DP_TQING_DISABL; 3462 } 3463 3464 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 3465 dval |= cts->bus_width ? DP_WIDE : DP_NARROW; 3466 } 3467 3468 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 3469 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { 3470 dval |= DP_SYNC; 3471 period = cts->sync_period; 3472 offset = cts->sync_offset; 3473 } 3474 #else 3475 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3476 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3477 DP_DISC_ENABLE : DP_DISC_DISABL; 3478 } 3479 3480 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3481 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3482 DP_TQING_ENABLE : DP_TQING_DISABL; 3483 } 3484 3485 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3486 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3487 DP_WIDE : DP_NARROW; 3488 } 3489 3490 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3491 dval |= DP_SYNC; 3492 offset = spi->sync_offset; 3493 } else { 3494 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3495 &mpt->mpt_dev_page1[tgt]; 3496 offset = ptr->RequestedParameters; 3497 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3498 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3499 } 3500 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3501 dval |= DP_SYNC; 3502 period = spi->sync_period; 3503 } else { 3504 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3505 &mpt->mpt_dev_page1[tgt]; 3506 period = ptr->RequestedParameters; 3507 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3508 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3509 } 3510 #endif 3511 if (dval & DP_DISC_ENABLE) { 3512 mpt->mpt_disc_enable |= (1 << tgt); 3513 } else if (dval & DP_DISC_DISABL) { 3514 mpt->mpt_disc_enable &= ~(1 << tgt); 3515 } 3516 if (dval & DP_TQING_ENABLE) { 3517 mpt->mpt_tag_enable |= (1 << tgt); 3518 } else if (dval & DP_TQING_DISABL) { 3519 mpt->mpt_tag_enable &= ~(1 << tgt); 3520 } 3521 if (dval & DP_WIDTH) { 3522 mpt_setwidth(mpt, tgt, 1); 3523 } 3524 if (dval & DP_SYNC) { 3525 mpt_setsync(mpt, tgt, period, offset); 3526 } 3527 if (dval == 0) { 3528 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3529 break; 3530 } 3531 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3532 "set [%d]: 0x%x period 0x%x offset %d\n", 3533 tgt, dval, period, offset); 3534 if (mpt_update_spi_config(mpt, tgt)) { 3535 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3536 } else { 3537 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3538 } 3539 break; 3540 } 3541 case XPT_GET_TRAN_SETTINGS: 3542 { 3543 #ifdef CAM_NEW_TRAN_CODE 3544 struct ccb_trans_settings_scsi *scsi; 3545 cts = &ccb->cts; 3546 cts->protocol = PROTO_SCSI; 3547 if (mpt->is_fc) { 3548 struct ccb_trans_settings_fc *fc = 3549 &cts->xport_specific.fc; 3550 cts->protocol_version = SCSI_REV_SPC; 3551 cts->transport = XPORT_FC; 3552 cts->transport_version = 0; 3553 fc->valid = CTS_FC_VALID_SPEED; 3554 fc->bitrate = 100000; 3555 } else if (mpt->is_sas) { 3556 struct ccb_trans_settings_sas *sas = 3557 &cts->xport_specific.sas; 3558 cts->protocol_version = SCSI_REV_SPC2; 3559 cts->transport = XPORT_SAS; 3560 cts->transport_version = 0; 3561 sas->valid = CTS_SAS_VALID_SPEED; 3562 sas->bitrate = 300000; 3563 } else { 3564 cts->protocol_version = SCSI_REV_2; 3565 cts->transport = XPORT_SPI; 3566 cts->transport_version = 2; 3567 if (mpt_get_spi_settings(mpt, cts) != 0) { 3568 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3569 break; 3570 } 3571 } 3572 scsi = &cts->proto_specific.scsi; 3573 scsi->valid = CTS_SCSI_VALID_TQ; 3574 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3575 #else 3576 cts = &ccb->cts; 3577 if (mpt->is_fc) { 3578 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3579 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3580 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3581 } else if (mpt->is_sas) { 3582 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3583 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3584 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3585 } else if (mpt_get_spi_settings(mpt, cts) != 0) { 3586 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3587 break; 3588 } 3589 #endif 3590 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3591 break; 3592 } 3593 case XPT_CALC_GEOMETRY: 3594 { 3595 struct ccb_calc_geometry *ccg; 3596 3597 ccg = &ccb->ccg; 3598 if (ccg->block_size == 0) { 3599 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3600 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3601 break; 3602 } 3603 cam_calc_geometry(ccg, /* extended */ 1); 3604 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 3605 break; 3606 } 3607 case XPT_PATH_INQ: /* Path routing inquiry */ 3608 { 3609 struct ccb_pathinq *cpi = &ccb->cpi; 3610 3611 cpi->version_num = 1; 3612 cpi->target_sprt = 0; 3613 cpi->hba_eng_cnt = 0; 3614 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3615 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; 3616 /* 3617 * FC cards report MAX_DEVICES of 512, but 3618 * the MSG_SCSI_IO_REQUEST target id field 3619 * is only 8 bits. Until we fix the driver 3620 * to support 'channels' for bus overflow, 3621 * just limit it. 3622 */ 3623 if (cpi->max_target > 255) { 3624 cpi->max_target = 255; 3625 } 3626 3627 /* 3628 * VMware ESX reports > 16 devices and then dies when we probe. 3629 */ 3630 if (mpt->is_spi && cpi->max_target > 15) { 3631 cpi->max_target = 15; 3632 } 3633 if (mpt->is_spi) 3634 cpi->max_lun = 7; 3635 else 3636 cpi->max_lun = MPT_MAX_LUNS; 3637 cpi->initiator_id = mpt->mpt_ini_id; 3638 cpi->bus_id = cam_sim_bus(sim); 3639 3640 /* 3641 * The base speed is the speed of the underlying connection. 3642 */ 3643 #ifdef CAM_NEW_TRAN_CODE 3644 cpi->protocol = PROTO_SCSI; 3645 if (mpt->is_fc) { 3646 cpi->hba_misc = PIM_NOBUSRESET; 3647 cpi->base_transfer_speed = 100000; 3648 cpi->hba_inquiry = PI_TAG_ABLE; 3649 cpi->transport = XPORT_FC; 3650 cpi->transport_version = 0; 3651 cpi->protocol_version = SCSI_REV_SPC; 3652 } else if (mpt->is_sas) { 3653 cpi->hba_misc = PIM_NOBUSRESET; 3654 cpi->base_transfer_speed = 300000; 3655 cpi->hba_inquiry = PI_TAG_ABLE; 3656 cpi->transport = XPORT_SAS; 3657 cpi->transport_version = 0; 3658 cpi->protocol_version = SCSI_REV_SPC2; 3659 } else { 3660 cpi->hba_misc = PIM_SEQSCAN; 3661 cpi->base_transfer_speed = 3300; 3662 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3663 cpi->transport = XPORT_SPI; 3664 cpi->transport_version = 2; 3665 cpi->protocol_version = SCSI_REV_2; 3666 } 3667 #else 3668 if (mpt->is_fc) { 3669 cpi->hba_misc = PIM_NOBUSRESET; 3670 cpi->base_transfer_speed = 100000; 3671 cpi->hba_inquiry = PI_TAG_ABLE; 3672 } else if (mpt->is_sas) { 3673 cpi->hba_misc = PIM_NOBUSRESET; 3674 cpi->base_transfer_speed = 300000; 3675 cpi->hba_inquiry = PI_TAG_ABLE; 3676 } else { 3677 cpi->hba_misc = PIM_SEQSCAN; 3678 cpi->base_transfer_speed = 3300; 3679 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3680 } 3681 #endif 3682 3683 /* 3684 * We give our fake RAID passhtru bus a width that is MaxVolumes 3685 * wide and restrict it to one lun. 3686 */ 3687 if (raid_passthru) { 3688 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3689 cpi->initiator_id = cpi->max_target + 1; 3690 cpi->max_lun = 0; 3691 } 3692 3693 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3694 cpi->hba_misc |= PIM_NOINITIATOR; 3695 } 3696 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3697 cpi->target_sprt = 3698 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3699 } else { 3700 cpi->target_sprt = 0; 3701 } 3702 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3703 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3704 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3705 cpi->unit_number = cam_sim_unit(sim); 3706 cpi->ccb_h.status = CAM_REQ_CMP; 3707 break; 3708 } 3709 case XPT_EN_LUN: /* Enable LUN as a target */ 3710 { 3711 int result; 3712 3713 if (ccb->cel.enable) 3714 result = mpt_enable_lun(mpt, 3715 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3716 else 3717 result = mpt_disable_lun(mpt, 3718 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3719 if (result == 0) { 3720 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3721 } else { 3722 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3723 } 3724 break; 3725 } 3726 case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */ 3727 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ 3728 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3729 { 3730 tgt_resource_t *trtp; 3731 lun_id_t lun = ccb->ccb_h.target_lun; 3732 ccb->ccb_h.sim_priv.entries[0].field = 0; 3733 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3734 ccb->ccb_h.flags = 0; 3735 3736 if (lun == CAM_LUN_WILDCARD) { 3737 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3738 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3739 break; 3740 } 3741 trtp = &mpt->trt_wildcard; 3742 } else if (lun >= MPT_MAX_LUNS) { 3743 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3744 break; 3745 } else { 3746 trtp = &mpt->trt[lun]; 3747 } 3748 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3749 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3750 "Put FREE ATIO %p lun %d\n", ccb, lun); 3751 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3752 sim_links.stqe); 3753 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { 3754 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3755 "Put FREE INOT lun %d\n", lun); 3756 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3757 sim_links.stqe); 3758 } else { 3759 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3760 } 3761 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3762 return; 3763 } 3764 case XPT_CONT_TARGET_IO: 3765 mpt_target_start_io(mpt, ccb); 3766 return; 3767 3768 default: 3769 ccb->ccb_h.status = CAM_REQ_INVALID; 3770 break; 3771 } 3772 xpt_done(ccb); 3773 } 3774 3775 static int 3776 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3777 { 3778 #ifdef CAM_NEW_TRAN_CODE 3779 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3780 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3781 #endif 3782 target_id_t tgt; 3783 uint32_t dval, pval, oval; 3784 int rv; 3785 3786 if (IS_CURRENT_SETTINGS(cts) == 0) { 3787 tgt = cts->ccb_h.target_id; 3788 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3789 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3790 return (-1); 3791 } 3792 } else { 3793 tgt = cts->ccb_h.target_id; 3794 } 3795 3796 /* 3797 * We aren't looking at Port Page 2 BIOS settings here- 3798 * sometimes these have been known to be bogus XXX. 3799 * 3800 * For user settings, we pick the max from port page 0 3801 * 3802 * For current settings we read the current settings out from 3803 * device page 0 for that target. 3804 */ 3805 if (IS_CURRENT_SETTINGS(cts)) { 3806 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3807 dval = 0; 3808 3809 tmp = mpt->mpt_dev_page0[tgt]; 3810 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3811 sizeof(tmp), FALSE, 5000); 3812 if (rv) { 3813 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3814 return (rv); 3815 } 3816 mpt2host_config_page_scsi_device_0(&tmp); 3817 3818 mpt_lprt(mpt, MPT_PRT_DEBUG, 3819 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3820 tmp.NegotiatedParameters, tmp.Information); 3821 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3822 DP_WIDE : DP_NARROW; 3823 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3824 DP_DISC_ENABLE : DP_DISC_DISABL; 3825 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3826 DP_TQING_ENABLE : DP_TQING_DISABL; 3827 oval = tmp.NegotiatedParameters; 3828 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3829 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3830 pval = tmp.NegotiatedParameters; 3831 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3832 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3833 mpt->mpt_dev_page0[tgt] = tmp; 3834 } else { 3835 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3836 oval = mpt->mpt_port_page0.Capabilities; 3837 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3838 pval = mpt->mpt_port_page0.Capabilities; 3839 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3840 } 3841 3842 #ifndef CAM_NEW_TRAN_CODE 3843 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 3844 cts->valid = 0; 3845 cts->sync_period = pval; 3846 cts->sync_offset = oval; 3847 cts->valid |= CCB_TRANS_SYNC_RATE_VALID; 3848 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; 3849 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; 3850 if (dval & DP_WIDE) { 3851 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3852 } else { 3853 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3854 } 3855 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3856 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3857 if (dval & DP_DISC_ENABLE) { 3858 cts->flags |= CCB_TRANS_DISC_ENB; 3859 } 3860 if (dval & DP_TQING_ENABLE) { 3861 cts->flags |= CCB_TRANS_TAG_ENB; 3862 } 3863 } 3864 #else 3865 spi->valid = 0; 3866 scsi->valid = 0; 3867 spi->flags = 0; 3868 scsi->flags = 0; 3869 spi->sync_offset = oval; 3870 spi->sync_period = pval; 3871 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3872 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3873 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3874 if (dval & DP_WIDE) { 3875 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3876 } else { 3877 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3878 } 3879 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3880 scsi->valid = CTS_SCSI_VALID_TQ; 3881 if (dval & DP_TQING_ENABLE) { 3882 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3883 } 3884 spi->valid |= CTS_SPI_VALID_DISC; 3885 if (dval & DP_DISC_ENABLE) { 3886 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3887 } 3888 } 3889 #endif 3890 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3891 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3892 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); 3893 return (0); 3894 } 3895 3896 static void 3897 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3898 { 3899 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3900 3901 ptr = &mpt->mpt_dev_page1[tgt]; 3902 if (onoff) { 3903 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3904 } else { 3905 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3906 } 3907 } 3908 3909 static void 3910 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3911 { 3912 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3913 3914 ptr = &mpt->mpt_dev_page1[tgt]; 3915 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3916 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3917 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3918 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3919 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3920 if (period == 0) { 3921 return; 3922 } 3923 ptr->RequestedParameters |= 3924 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3925 ptr->RequestedParameters |= 3926 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3927 if (period < 0xa) { 3928 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3929 } 3930 if (period < 0x9) { 3931 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3932 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3933 } 3934 } 3935 3936 static int 3937 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3938 { 3939 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3940 int rv; 3941 3942 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3943 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3944 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3945 tmp = mpt->mpt_dev_page1[tgt]; 3946 host2mpt_config_page_scsi_device_1(&tmp); 3947 rv = mpt_write_cur_cfg_page(mpt, tgt, 3948 &tmp.Header, sizeof(tmp), FALSE, 5000); 3949 if (rv) { 3950 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3951 return (-1); 3952 } 3953 return (0); 3954 } 3955 3956 /****************************** Timeout Recovery ******************************/ 3957 static int 3958 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3959 { 3960 int error; 3961 3962 error = mpt_kthread_create(mpt_recovery_thread, mpt, 3963 &mpt->recovery_thread, /*flags*/0, 3964 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3965 return (error); 3966 } 3967 3968 static void 3969 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3970 { 3971 3972 if (mpt->recovery_thread == NULL) { 3973 return; 3974 } 3975 mpt->shutdwn_recovery = 1; 3976 wakeup(mpt); 3977 /* 3978 * Sleep on a slightly different location 3979 * for this interlock just for added safety. 3980 */ 3981 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 3982 } 3983 3984 static void 3985 mpt_recovery_thread(void *arg) 3986 { 3987 struct mpt_softc *mpt; 3988 3989 mpt = (struct mpt_softc *)arg; 3990 MPT_LOCK(mpt); 3991 for (;;) { 3992 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3993 if (mpt->shutdwn_recovery == 0) { 3994 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 3995 } 3996 } 3997 if (mpt->shutdwn_recovery != 0) { 3998 break; 3999 } 4000 mpt_recover_commands(mpt); 4001 } 4002 mpt->recovery_thread = NULL; 4003 wakeup(&mpt->recovery_thread); 4004 MPT_UNLOCK(mpt); 4005 mpt_kthread_exit(0); 4006 } 4007 4008 static int 4009 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 4010 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 4011 { 4012 MSG_SCSI_TASK_MGMT *tmf_req; 4013 int error; 4014 4015 /* 4016 * Wait for any current TMF request to complete. 4017 * We're only allowed to issue one TMF at a time. 4018 */ 4019 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 4020 sleep_ok, MPT_TMF_MAX_TIMEOUT); 4021 if (error != 0) { 4022 mpt_reset(mpt, TRUE); 4023 return (ETIMEDOUT); 4024 } 4025 4026 mpt_assign_serno(mpt, mpt->tmf_req); 4027 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 4028 4029 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 4030 memset(tmf_req, 0, sizeof(*tmf_req)); 4031 tmf_req->TargetID = target; 4032 tmf_req->Bus = channel; 4033 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 4034 tmf_req->TaskType = type; 4035 tmf_req->MsgFlags = flags; 4036 tmf_req->MsgContext = 4037 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 4038 if (lun > MPT_MAX_LUNS) { 4039 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4040 tmf_req->LUN[1] = lun & 0xff; 4041 } else { 4042 tmf_req->LUN[1] = lun; 4043 } 4044 tmf_req->TaskMsgContext = abort_ctx; 4045 4046 mpt_lprt(mpt, MPT_PRT_DEBUG, 4047 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 4048 mpt->tmf_req->serno, tmf_req->MsgContext); 4049 if (mpt->verbose > MPT_PRT_DEBUG) { 4050 mpt_print_request(tmf_req); 4051 } 4052 4053 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 4054 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 4055 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 4056 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 4057 if (error != MPT_OK) { 4058 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 4059 mpt->tmf_req->state = REQ_STATE_FREE; 4060 mpt_reset(mpt, TRUE); 4061 } 4062 return (error); 4063 } 4064 4065 /* 4066 * When a command times out, it is placed on the requeust_timeout_list 4067 * and we wake our recovery thread. The MPT-Fusion architecture supports 4068 * only a single TMF operation at a time, so we serially abort/bdr, etc, 4069 * the timedout transactions. The next TMF is issued either by the 4070 * completion handler of the current TMF waking our recovery thread, 4071 * or the TMF timeout handler causing a hard reset sequence. 4072 */ 4073 static void 4074 mpt_recover_commands(struct mpt_softc *mpt) 4075 { 4076 request_t *req; 4077 union ccb *ccb; 4078 int error; 4079 4080 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4081 /* 4082 * No work to do- leave. 4083 */ 4084 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 4085 return; 4086 } 4087 4088 /* 4089 * Flush any commands whose completion coincides with their timeout. 4090 */ 4091 mpt_intr(mpt); 4092 4093 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4094 /* 4095 * The timedout commands have already 4096 * completed. This typically means 4097 * that either the timeout value was on 4098 * the hairy edge of what the device 4099 * requires or - more likely - interrupts 4100 * are not happening. 4101 */ 4102 mpt_prt(mpt, "Timedout requests already complete. " 4103 "Interrupts may not be functioning.\n"); 4104 mpt_enable_ints(mpt); 4105 return; 4106 } 4107 4108 /* 4109 * We have no visibility into the current state of the 4110 * controller, so attempt to abort the commands in the 4111 * order they timed-out. For initiator commands, we 4112 * depend on the reply handler pulling requests off 4113 * the timeout list. 4114 */ 4115 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4116 uint16_t status; 4117 uint8_t response; 4118 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4119 4120 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4121 req, req->serno, hdrp->Function); 4122 ccb = req->ccb; 4123 if (ccb == NULL) { 4124 mpt_prt(mpt, "null ccb in timed out request. " 4125 "Resetting Controller.\n"); 4126 mpt_reset(mpt, TRUE); 4127 continue; 4128 } 4129 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4130 4131 /* 4132 * Check to see if this is not an initiator command and 4133 * deal with it differently if it is. 4134 */ 4135 switch (hdrp->Function) { 4136 case MPI_FUNCTION_SCSI_IO_REQUEST: 4137 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4138 break; 4139 default: 4140 /* 4141 * XXX: FIX ME: need to abort target assists... 4142 */ 4143 mpt_prt(mpt, "just putting it back on the pend q\n"); 4144 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4145 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4146 links); 4147 continue; 4148 } 4149 4150 error = mpt_scsi_send_tmf(mpt, 4151 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4152 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4153 htole32(req->index | scsi_io_handler_id), TRUE); 4154 4155 if (error != 0) { 4156 /* 4157 * mpt_scsi_send_tmf hard resets on failure, so no 4158 * need to do so here. Our queue should be emptied 4159 * by the hard reset. 4160 */ 4161 continue; 4162 } 4163 4164 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4165 REQ_STATE_DONE, TRUE, 500); 4166 4167 status = le16toh(mpt->tmf_req->IOCStatus); 4168 response = mpt->tmf_req->ResponseCode; 4169 mpt->tmf_req->state = REQ_STATE_FREE; 4170 4171 if (error != 0) { 4172 /* 4173 * If we've errored out,, reset the controller. 4174 */ 4175 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4176 "Resetting controller\n"); 4177 mpt_reset(mpt, TRUE); 4178 continue; 4179 } 4180 4181 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4182 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4183 "Resetting controller.\n", status); 4184 mpt_reset(mpt, TRUE); 4185 continue; 4186 } 4187 4188 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4189 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4190 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4191 "Resetting controller.\n", response); 4192 mpt_reset(mpt, TRUE); 4193 continue; 4194 } 4195 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4196 } 4197 } 4198 4199 /************************ Target Mode Support ****************************/ 4200 static void 4201 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4202 { 4203 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4204 PTR_SGE_TRANSACTION32 tep; 4205 PTR_SGE_SIMPLE32 se; 4206 bus_addr_t paddr; 4207 uint32_t fl; 4208 4209 paddr = req->req_pbuf; 4210 paddr += MPT_RQSL(mpt); 4211 4212 fc = req->req_vbuf; 4213 memset(fc, 0, MPT_REQUEST_AREA); 4214 fc->BufferCount = 1; 4215 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4216 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4217 4218 /* 4219 * Okay, set up ELS buffer pointers. ELS buffer pointers 4220 * consist of a TE SGL element (with details length of zero) 4221 * followed by a SIMPLE SGL element which holds the address 4222 * of the buffer. 4223 */ 4224 4225 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4226 4227 tep->ContextSize = 4; 4228 tep->Flags = 0; 4229 tep->TransactionContext[0] = htole32(ioindex); 4230 4231 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4232 fl = 4233 MPI_SGE_FLAGS_HOST_TO_IOC | 4234 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4235 MPI_SGE_FLAGS_LAST_ELEMENT | 4236 MPI_SGE_FLAGS_END_OF_LIST | 4237 MPI_SGE_FLAGS_END_OF_BUFFER; 4238 fl <<= MPI_SGE_FLAGS_SHIFT; 4239 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4240 se->FlagsLength = htole32(fl); 4241 se->Address = htole32((uint32_t) paddr); 4242 mpt_lprt(mpt, MPT_PRT_DEBUG, 4243 "add ELS index %d ioindex %d for %p:%u\n", 4244 req->index, ioindex, req, req->serno); 4245 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4246 ("mpt_fc_post_els: request not locked")); 4247 mpt_send_cmd(mpt, req); 4248 } 4249 4250 static void 4251 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4252 { 4253 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4254 PTR_CMD_BUFFER_DESCRIPTOR cb; 4255 bus_addr_t paddr; 4256 4257 paddr = req->req_pbuf; 4258 paddr += MPT_RQSL(mpt); 4259 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4260 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4261 4262 fc = req->req_vbuf; 4263 fc->BufferCount = 1; 4264 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4265 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4266 4267 cb = &fc->Buffer[0]; 4268 cb->IoIndex = htole16(ioindex); 4269 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4270 4271 mpt_check_doorbell(mpt); 4272 mpt_send_cmd(mpt, req); 4273 } 4274 4275 static int 4276 mpt_add_els_buffers(struct mpt_softc *mpt) 4277 { 4278 int i; 4279 4280 if (mpt->is_fc == 0) { 4281 return (TRUE); 4282 } 4283 4284 if (mpt->els_cmds_allocated) { 4285 return (TRUE); 4286 } 4287 4288 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), 4289 M_DEVBUF, M_NOWAIT | M_ZERO); 4290 4291 if (mpt->els_cmd_ptrs == NULL) { 4292 return (FALSE); 4293 } 4294 4295 /* 4296 * Feed the chip some ELS buffer resources 4297 */ 4298 for (i = 0; i < MPT_MAX_ELS; i++) { 4299 request_t *req = mpt_get_request(mpt, FALSE); 4300 if (req == NULL) { 4301 break; 4302 } 4303 req->state |= REQ_STATE_LOCKED; 4304 mpt->els_cmd_ptrs[i] = req; 4305 mpt_fc_post_els(mpt, req, i); 4306 } 4307 4308 if (i == 0) { 4309 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4310 free(mpt->els_cmd_ptrs, M_DEVBUF); 4311 mpt->els_cmd_ptrs = NULL; 4312 return (FALSE); 4313 } 4314 if (i != MPT_MAX_ELS) { 4315 mpt_lprt(mpt, MPT_PRT_INFO, 4316 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4317 } 4318 mpt->els_cmds_allocated = i; 4319 return(TRUE); 4320 } 4321 4322 static int 4323 mpt_add_target_commands(struct mpt_softc *mpt) 4324 { 4325 int i, max; 4326 4327 if (mpt->tgt_cmd_ptrs) { 4328 return (TRUE); 4329 } 4330 4331 max = MPT_MAX_REQUESTS(mpt) >> 1; 4332 if (max > mpt->mpt_max_tgtcmds) { 4333 max = mpt->mpt_max_tgtcmds; 4334 } 4335 mpt->tgt_cmd_ptrs = 4336 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4337 if (mpt->tgt_cmd_ptrs == NULL) { 4338 mpt_prt(mpt, 4339 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4340 return (FALSE); 4341 } 4342 4343 for (i = 0; i < max; i++) { 4344 request_t *req; 4345 4346 req = mpt_get_request(mpt, FALSE); 4347 if (req == NULL) { 4348 break; 4349 } 4350 req->state |= REQ_STATE_LOCKED; 4351 mpt->tgt_cmd_ptrs[i] = req; 4352 mpt_post_target_command(mpt, req, i); 4353 } 4354 4355 4356 if (i == 0) { 4357 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4358 free(mpt->tgt_cmd_ptrs, M_DEVBUF); 4359 mpt->tgt_cmd_ptrs = NULL; 4360 return (FALSE); 4361 } 4362 4363 mpt->tgt_cmds_allocated = i; 4364 4365 if (i < max) { 4366 mpt_lprt(mpt, MPT_PRT_INFO, 4367 "added %d of %d target bufs\n", i, max); 4368 } 4369 return (i); 4370 } 4371 4372 static int 4373 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4374 { 4375 4376 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4377 mpt->twildcard = 1; 4378 } else if (lun >= MPT_MAX_LUNS) { 4379 return (EINVAL); 4380 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4381 return (EINVAL); 4382 } 4383 if (mpt->tenabled == 0) { 4384 if (mpt->is_fc) { 4385 (void) mpt_fc_reset_link(mpt, 0); 4386 } 4387 mpt->tenabled = 1; 4388 } 4389 if (lun == CAM_LUN_WILDCARD) { 4390 mpt->trt_wildcard.enabled = 1; 4391 } else { 4392 mpt->trt[lun].enabled = 1; 4393 } 4394 return (0); 4395 } 4396 4397 static int 4398 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4399 { 4400 int i; 4401 4402 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4403 mpt->twildcard = 0; 4404 } else if (lun >= MPT_MAX_LUNS) { 4405 return (EINVAL); 4406 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4407 return (EINVAL); 4408 } 4409 if (lun == CAM_LUN_WILDCARD) { 4410 mpt->trt_wildcard.enabled = 0; 4411 } else { 4412 mpt->trt[lun].enabled = 0; 4413 } 4414 for (i = 0; i < MPT_MAX_LUNS; i++) { 4415 if (mpt->trt[lun].enabled) { 4416 break; 4417 } 4418 } 4419 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4420 if (mpt->is_fc) { 4421 (void) mpt_fc_reset_link(mpt, 0); 4422 } 4423 mpt->tenabled = 0; 4424 } 4425 return (0); 4426 } 4427 4428 /* 4429 * Called with MPT lock held 4430 */ 4431 static void 4432 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4433 { 4434 struct ccb_scsiio *csio = &ccb->csio; 4435 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4436 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4437 4438 switch (tgt->state) { 4439 case TGT_STATE_IN_CAM: 4440 break; 4441 case TGT_STATE_MOVING_DATA: 4442 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4443 xpt_freeze_simq(mpt->sim, 1); 4444 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4445 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4446 xpt_done(ccb); 4447 return; 4448 default: 4449 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4450 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4451 mpt_tgt_dump_req_state(mpt, cmd_req); 4452 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4453 xpt_done(ccb); 4454 return; 4455 } 4456 4457 if (csio->dxfer_len) { 4458 bus_dmamap_callback_t *cb; 4459 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4460 request_t *req; 4461 4462 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4463 ("dxfer_len %u but direction is NONE", csio->dxfer_len)); 4464 4465 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4466 if (mpt->outofbeer == 0) { 4467 mpt->outofbeer = 1; 4468 xpt_freeze_simq(mpt->sim, 1); 4469 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4470 } 4471 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4472 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4473 xpt_done(ccb); 4474 return; 4475 } 4476 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4477 if (sizeof (bus_addr_t) > 4) { 4478 cb = mpt_execute_req_a64; 4479 } else { 4480 cb = mpt_execute_req; 4481 } 4482 4483 req->ccb = ccb; 4484 ccb->ccb_h.ccb_req_ptr = req; 4485 4486 /* 4487 * Record the currently active ccb and the 4488 * request for it in our target state area. 4489 */ 4490 tgt->ccb = ccb; 4491 tgt->req = req; 4492 4493 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4494 ta = req->req_vbuf; 4495 4496 if (mpt->is_sas) { 4497 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4498 cmd_req->req_vbuf; 4499 ta->QueueTag = ssp->InitiatorTag; 4500 } else if (mpt->is_spi) { 4501 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4502 cmd_req->req_vbuf; 4503 ta->QueueTag = sp->Tag; 4504 } 4505 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4506 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4507 ta->ReplyWord = htole32(tgt->reply_desc); 4508 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { 4509 ta->LUN[0] = 4510 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4511 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4512 } else { 4513 ta->LUN[1] = csio->ccb_h.target_lun; 4514 } 4515 4516 ta->RelativeOffset = tgt->bytes_xfered; 4517 ta->DataLength = ccb->csio.dxfer_len; 4518 if (ta->DataLength > tgt->resid) { 4519 ta->DataLength = tgt->resid; 4520 } 4521 4522 /* 4523 * XXX Should be done after data transfer completes? 4524 */ 4525 tgt->resid -= csio->dxfer_len; 4526 tgt->bytes_xfered += csio->dxfer_len; 4527 4528 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4529 ta->TargetAssistFlags |= 4530 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4531 } 4532 4533 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4534 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4535 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4536 ta->TargetAssistFlags |= 4537 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4538 } 4539 #endif 4540 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4541 4542 mpt_lprt(mpt, MPT_PRT_DEBUG, 4543 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4544 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4545 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4546 4547 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4548 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4549 int error; 4550 int s = splsoftvm(); 4551 error = bus_dmamap_load(mpt->buffer_dmat, 4552 req->dmap, csio->data_ptr, csio->dxfer_len, 4553 cb, req, 0); 4554 splx(s); 4555 if (error == EINPROGRESS) { 4556 xpt_freeze_simq(mpt->sim, 1); 4557 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4558 } 4559 } else { 4560 /* 4561 * We have been given a pointer to single 4562 * physical buffer. 4563 */ 4564 struct bus_dma_segment seg; 4565 seg.ds_addr = (bus_addr_t) 4566 (vm_offset_t)csio->data_ptr; 4567 seg.ds_len = csio->dxfer_len; 4568 (*cb)(req, &seg, 1, 0); 4569 } 4570 } else { 4571 /* 4572 * We have been given a list of addresses. 4573 * This case could be easily supported but they are not 4574 * currently generated by the CAM subsystem so there 4575 * is no point in wasting the time right now. 4576 */ 4577 struct bus_dma_segment *sgs; 4578 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4579 (*cb)(req, NULL, 0, EFAULT); 4580 } else { 4581 /* Just use the segments provided */ 4582 sgs = (struct bus_dma_segment *)csio->data_ptr; 4583 (*cb)(req, sgs, csio->sglist_cnt, 0); 4584 } 4585 } 4586 } else { 4587 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4588 4589 /* 4590 * XXX: I don't know why this seems to happen, but 4591 * XXX: completing the CCB seems to make things happy. 4592 * XXX: This seems to happen if the initiator requests 4593 * XXX: enough data that we have to do multiple CTIOs. 4594 */ 4595 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4596 mpt_lprt(mpt, MPT_PRT_DEBUG, 4597 "Meaningless STATUS CCB (%p): flags %x status %x " 4598 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4599 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4600 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4601 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4602 xpt_done(ccb); 4603 return; 4604 } 4605 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4606 sp = sense; 4607 memcpy(sp, &csio->sense_data, 4608 min(csio->sense_len, MPT_SENSE_SIZE)); 4609 } 4610 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4611 } 4612 } 4613 4614 static void 4615 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4616 uint32_t lun, int send, uint8_t *data, size_t length) 4617 { 4618 mpt_tgt_state_t *tgt; 4619 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4620 SGE_SIMPLE32 *se; 4621 uint32_t flags; 4622 uint8_t *dptr; 4623 bus_addr_t pptr; 4624 request_t *req; 4625 4626 /* 4627 * We enter with resid set to the data load for the command. 4628 */ 4629 tgt = MPT_TGT_STATE(mpt, cmd_req); 4630 if (length == 0 || tgt->resid == 0) { 4631 tgt->resid = 0; 4632 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4633 return; 4634 } 4635 4636 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4637 mpt_prt(mpt, "out of resources- dropping local response\n"); 4638 return; 4639 } 4640 tgt->is_local = 1; 4641 4642 4643 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4644 ta = req->req_vbuf; 4645 4646 if (mpt->is_sas) { 4647 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4648 ta->QueueTag = ssp->InitiatorTag; 4649 } else if (mpt->is_spi) { 4650 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4651 ta->QueueTag = sp->Tag; 4652 } 4653 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4654 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4655 ta->ReplyWord = htole32(tgt->reply_desc); 4656 if (lun > MPT_MAX_LUNS) { 4657 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4658 ta->LUN[1] = lun & 0xff; 4659 } else { 4660 ta->LUN[1] = lun; 4661 } 4662 ta->RelativeOffset = 0; 4663 ta->DataLength = length; 4664 4665 dptr = req->req_vbuf; 4666 dptr += MPT_RQSL(mpt); 4667 pptr = req->req_pbuf; 4668 pptr += MPT_RQSL(mpt); 4669 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4670 4671 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4672 memset(se, 0,sizeof (*se)); 4673 4674 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4675 if (send) { 4676 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4677 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4678 } 4679 se->Address = pptr; 4680 MPI_pSGE_SET_LENGTH(se, length); 4681 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4682 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4683 MPI_pSGE_SET_FLAGS(se, flags); 4684 4685 tgt->ccb = NULL; 4686 tgt->req = req; 4687 tgt->resid -= length; 4688 tgt->bytes_xfered = length; 4689 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4690 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4691 #else 4692 tgt->state = TGT_STATE_MOVING_DATA; 4693 #endif 4694 mpt_send_cmd(mpt, req); 4695 } 4696 4697 /* 4698 * Abort queued up CCBs 4699 */ 4700 static cam_status 4701 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4702 { 4703 struct mpt_hdr_stailq *lp; 4704 struct ccb_hdr *srch; 4705 int found = 0; 4706 union ccb *accb = ccb->cab.abort_ccb; 4707 tgt_resource_t *trtp; 4708 4709 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4710 4711 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4712 trtp = &mpt->trt_wildcard; 4713 } else { 4714 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4715 } 4716 4717 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4718 lp = &trtp->atios; 4719 } else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { 4720 lp = &trtp->inots; 4721 } else { 4722 return (CAM_REQ_INVALID); 4723 } 4724 4725 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4726 if (srch == &accb->ccb_h) { 4727 found = 1; 4728 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4729 break; 4730 } 4731 } 4732 if (found) { 4733 accb->ccb_h.status = CAM_REQ_ABORTED; 4734 xpt_done(accb); 4735 return (CAM_REQ_CMP); 4736 } 4737 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4738 return (CAM_PATH_INVALID); 4739 } 4740 4741 /* 4742 * Ask the MPT to abort the current target command 4743 */ 4744 static int 4745 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4746 { 4747 int error; 4748 request_t *req; 4749 PTR_MSG_TARGET_MODE_ABORT abtp; 4750 4751 req = mpt_get_request(mpt, FALSE); 4752 if (req == NULL) { 4753 return (-1); 4754 } 4755 abtp = req->req_vbuf; 4756 memset(abtp, 0, sizeof (*abtp)); 4757 4758 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4759 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4760 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4761 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4762 error = 0; 4763 if (mpt->is_fc || mpt->is_sas) { 4764 mpt_send_cmd(mpt, req); 4765 } else { 4766 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4767 } 4768 return (error); 4769 } 4770 4771 /* 4772 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4773 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4774 * FC929 to set bogus FC_RSP fields (nonzero residuals 4775 * but w/o RESID fields set). This causes QLogic initiators 4776 * to think maybe that a frame was lost. 4777 * 4778 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4779 * we use allocated requests to do TARGET_ASSIST and we 4780 * need to know when to release them. 4781 */ 4782 4783 static void 4784 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4785 uint8_t status, uint8_t const *sense_data) 4786 { 4787 uint8_t *cmd_vbuf; 4788 mpt_tgt_state_t *tgt; 4789 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4790 request_t *req; 4791 bus_addr_t paddr; 4792 int resplen = 0; 4793 uint32_t fl; 4794 4795 cmd_vbuf = cmd_req->req_vbuf; 4796 cmd_vbuf += MPT_RQSL(mpt); 4797 tgt = MPT_TGT_STATE(mpt, cmd_req); 4798 4799 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4800 if (mpt->outofbeer == 0) { 4801 mpt->outofbeer = 1; 4802 xpt_freeze_simq(mpt->sim, 1); 4803 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4804 } 4805 if (ccb) { 4806 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4807 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4808 xpt_done(ccb); 4809 } else { 4810 mpt_prt(mpt, 4811 "could not allocate status request- dropping\n"); 4812 } 4813 return; 4814 } 4815 req->ccb = ccb; 4816 if (ccb) { 4817 ccb->ccb_h.ccb_mpt_ptr = mpt; 4818 ccb->ccb_h.ccb_req_ptr = req; 4819 } 4820 4821 /* 4822 * Record the currently active ccb, if any, and the 4823 * request for it in our target state area. 4824 */ 4825 tgt->ccb = ccb; 4826 tgt->req = req; 4827 tgt->state = TGT_STATE_SENDING_STATUS; 4828 4829 tp = req->req_vbuf; 4830 paddr = req->req_pbuf; 4831 paddr += MPT_RQSL(mpt); 4832 4833 memset(tp, 0, sizeof (*tp)); 4834 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4835 if (mpt->is_fc) { 4836 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4837 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4838 uint8_t *sts_vbuf; 4839 uint32_t *rsp; 4840 4841 sts_vbuf = req->req_vbuf; 4842 sts_vbuf += MPT_RQSL(mpt); 4843 rsp = (uint32_t *) sts_vbuf; 4844 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4845 4846 /* 4847 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4848 * It has to be big-endian in memory and is organized 4849 * in 32 bit words, which are much easier to deal with 4850 * as words which are swizzled as needed. 4851 * 4852 * All we're filling here is the FC_RSP payload. 4853 * We may just have the chip synthesize it if 4854 * we have no residual and an OK status. 4855 * 4856 */ 4857 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4858 4859 rsp[2] = status; 4860 if (tgt->resid) { 4861 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4862 rsp[3] = htobe32(tgt->resid); 4863 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4864 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4865 #endif 4866 } 4867 if (status == SCSI_STATUS_CHECK_COND) { 4868 int i; 4869 4870 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4871 rsp[4] = htobe32(MPT_SENSE_SIZE); 4872 if (sense_data) { 4873 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4874 } else { 4875 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4876 "TION but no sense data?\n"); 4877 memset(&rsp, 0, MPT_SENSE_SIZE); 4878 } 4879 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4880 rsp[i] = htobe32(rsp[i]); 4881 } 4882 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4883 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4884 #endif 4885 } 4886 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4887 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4888 #endif 4889 rsp[2] = htobe32(rsp[2]); 4890 } else if (mpt->is_sas) { 4891 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4892 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4893 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4894 } else { 4895 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4896 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4897 tp->StatusCode = status; 4898 tp->QueueTag = htole16(sp->Tag); 4899 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4900 } 4901 4902 tp->ReplyWord = htole32(tgt->reply_desc); 4903 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4904 4905 #ifdef WE_CAN_USE_AUTO_REPOST 4906 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4907 #endif 4908 if (status == SCSI_STATUS_OK && resplen == 0) { 4909 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4910 } else { 4911 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4912 fl = 4913 MPI_SGE_FLAGS_HOST_TO_IOC | 4914 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4915 MPI_SGE_FLAGS_LAST_ELEMENT | 4916 MPI_SGE_FLAGS_END_OF_LIST | 4917 MPI_SGE_FLAGS_END_OF_BUFFER; 4918 fl <<= MPI_SGE_FLAGS_SHIFT; 4919 fl |= resplen; 4920 tp->StatusDataSGE.FlagsLength = htole32(fl); 4921 } 4922 4923 mpt_lprt(mpt, MPT_PRT_DEBUG, 4924 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 4925 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 4926 req->serno, tgt->resid); 4927 if (ccb) { 4928 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4929 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); 4930 } 4931 mpt_send_cmd(mpt, req); 4932 } 4933 4934 static void 4935 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4936 tgt_resource_t *trtp, int init_id) 4937 { 4938 struct ccb_immediate_notify *inot; 4939 mpt_tgt_state_t *tgt; 4940 4941 tgt = MPT_TGT_STATE(mpt, req); 4942 inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots); 4943 if (inot == NULL) { 4944 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4945 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 4946 return; 4947 } 4948 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4949 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4950 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 4951 4952 inot->initiator_id = init_id; /* XXX */ 4953 /* 4954 * This is a somewhat grotesque attempt to map from task management 4955 * to old style SCSI messages. God help us all. 4956 */ 4957 switch (fc) { 4958 case MPT_ABORT_TASK_SET: 4959 inot->arg = MSG_ABORT_TAG; 4960 break; 4961 case MPT_CLEAR_TASK_SET: 4962 inot->arg = MSG_CLEAR_TASK_SET; 4963 break; 4964 case MPT_TARGET_RESET: 4965 inot->arg = MSG_TARGET_RESET; 4966 break; 4967 case MPT_CLEAR_ACA: 4968 inot->arg = MSG_CLEAR_ACA; 4969 break; 4970 case MPT_TERMINATE_TASK: 4971 inot->arg = MSG_ABORT_TAG; 4972 break; 4973 default: 4974 inot->arg = MSG_NOOP; 4975 break; 4976 } 4977 /* 4978 * XXX KDM we need the sequence/tag number for the target of the 4979 * task management operation, especially if it is an abort. 4980 */ 4981 tgt->ccb = (union ccb *) inot; 4982 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 4983 xpt_done((union ccb *)inot); 4984 } 4985 4986 static void 4987 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 4988 { 4989 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 4990 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 4991 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 4992 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 4993 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 4994 '0', '0', '0', '1' 4995 }; 4996 struct ccb_accept_tio *atiop; 4997 lun_id_t lun; 4998 int tag_action = 0; 4999 mpt_tgt_state_t *tgt; 5000 tgt_resource_t *trtp = NULL; 5001 U8 *lunptr; 5002 U8 *vbuf; 5003 U16 itag; 5004 U16 ioindex; 5005 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 5006 uint8_t *cdbp; 5007 5008 /* 5009 * Stash info for the current command where we can get at it later. 5010 */ 5011 vbuf = req->req_vbuf; 5012 vbuf += MPT_RQSL(mpt); 5013 5014 /* 5015 * Get our state pointer set up. 5016 */ 5017 tgt = MPT_TGT_STATE(mpt, req); 5018 if (tgt->state != TGT_STATE_LOADED) { 5019 mpt_tgt_dump_req_state(mpt, req); 5020 panic("bad target state in mpt_scsi_tgt_atio"); 5021 } 5022 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 5023 tgt->state = TGT_STATE_IN_CAM; 5024 tgt->reply_desc = reply_desc; 5025 ioindex = GET_IO_INDEX(reply_desc); 5026 if (mpt->verbose >= MPT_PRT_DEBUG) { 5027 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 5028 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 5029 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 5030 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 5031 } 5032 if (mpt->is_fc) { 5033 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 5034 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 5035 if (fc->FcpCntl[2]) { 5036 /* 5037 * Task Management Request 5038 */ 5039 switch (fc->FcpCntl[2]) { 5040 case 0x2: 5041 fct = MPT_ABORT_TASK_SET; 5042 break; 5043 case 0x4: 5044 fct = MPT_CLEAR_TASK_SET; 5045 break; 5046 case 0x20: 5047 fct = MPT_TARGET_RESET; 5048 break; 5049 case 0x40: 5050 fct = MPT_CLEAR_ACA; 5051 break; 5052 case 0x80: 5053 fct = MPT_TERMINATE_TASK; 5054 break; 5055 default: 5056 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 5057 fc->FcpCntl[2]); 5058 mpt_scsi_tgt_status(mpt, 0, req, 5059 SCSI_STATUS_OK, 0); 5060 return; 5061 } 5062 } else { 5063 switch (fc->FcpCntl[1]) { 5064 case 0: 5065 tag_action = MSG_SIMPLE_Q_TAG; 5066 break; 5067 case 1: 5068 tag_action = MSG_HEAD_OF_Q_TAG; 5069 break; 5070 case 2: 5071 tag_action = MSG_ORDERED_Q_TAG; 5072 break; 5073 default: 5074 /* 5075 * Bah. Ignore Untagged Queing and ACA 5076 */ 5077 tag_action = MSG_SIMPLE_Q_TAG; 5078 break; 5079 } 5080 } 5081 tgt->resid = be32toh(fc->FcpDl); 5082 cdbp = fc->FcpCdb; 5083 lunptr = fc->FcpLun; 5084 itag = be16toh(fc->OptionalOxid); 5085 } else if (mpt->is_sas) { 5086 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 5087 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 5088 cdbp = ssp->CDB; 5089 lunptr = ssp->LogicalUnitNumber; 5090 itag = ssp->InitiatorTag; 5091 } else { 5092 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 5093 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 5094 cdbp = sp->CDB; 5095 lunptr = sp->LogicalUnitNumber; 5096 itag = sp->Tag; 5097 } 5098 5099 /* 5100 * Generate a simple lun 5101 */ 5102 switch (lunptr[0] & 0xc0) { 5103 case 0x40: 5104 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 5105 break; 5106 case 0: 5107 lun = lunptr[1]; 5108 break; 5109 default: 5110 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 5111 lun = 0xffff; 5112 break; 5113 } 5114 5115 /* 5116 * Deal with non-enabled or bad luns here. 5117 */ 5118 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 5119 mpt->trt[lun].enabled == 0) { 5120 if (mpt->twildcard) { 5121 trtp = &mpt->trt_wildcard; 5122 } else if (fct == MPT_NIL_TMT_VALUE) { 5123 /* 5124 * In this case, we haven't got an upstream listener 5125 * for either a specific lun or wildcard luns. We 5126 * have to make some sensible response. For regular 5127 * inquiry, just return some NOT HERE inquiry data. 5128 * For VPD inquiry, report illegal field in cdb. 5129 * For REQUEST SENSE, just return NO SENSE data. 5130 * REPORT LUNS gets illegal command. 5131 * All other commands get 'no such device'. 5132 */ 5133 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 5134 size_t len; 5135 5136 memset(buf, 0, MPT_SENSE_SIZE); 5137 cond = SCSI_STATUS_CHECK_COND; 5138 buf[0] = 0xf0; 5139 buf[2] = 0x5; 5140 buf[7] = 0x8; 5141 sp = buf; 5142 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5143 5144 switch (cdbp[0]) { 5145 case INQUIRY: 5146 { 5147 if (cdbp[1] != 0) { 5148 buf[12] = 0x26; 5149 buf[13] = 0x01; 5150 break; 5151 } 5152 len = min(tgt->resid, cdbp[4]); 5153 len = min(len, sizeof (null_iqd)); 5154 mpt_lprt(mpt, MPT_PRT_DEBUG, 5155 "local inquiry %ld bytes\n", (long) len); 5156 mpt_scsi_tgt_local(mpt, req, lun, 1, 5157 null_iqd, len); 5158 return; 5159 } 5160 case REQUEST_SENSE: 5161 { 5162 buf[2] = 0x0; 5163 len = min(tgt->resid, cdbp[4]); 5164 len = min(len, sizeof (buf)); 5165 mpt_lprt(mpt, MPT_PRT_DEBUG, 5166 "local reqsense %ld bytes\n", (long) len); 5167 mpt_scsi_tgt_local(mpt, req, lun, 1, 5168 buf, len); 5169 return; 5170 } 5171 case REPORT_LUNS: 5172 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5173 buf[12] = 0x26; 5174 return; 5175 default: 5176 mpt_lprt(mpt, MPT_PRT_DEBUG, 5177 "CMD 0x%x to unmanaged lun %u\n", 5178 cdbp[0], lun); 5179 buf[12] = 0x25; 5180 break; 5181 } 5182 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 5183 return; 5184 } 5185 /* otherwise, leave trtp NULL */ 5186 } else { 5187 trtp = &mpt->trt[lun]; 5188 } 5189 5190 /* 5191 * Deal with any task management 5192 */ 5193 if (fct != MPT_NIL_TMT_VALUE) { 5194 if (trtp == NULL) { 5195 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5196 fct); 5197 mpt_scsi_tgt_status(mpt, 0, req, 5198 SCSI_STATUS_OK, 0); 5199 } else { 5200 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5201 GET_INITIATOR_INDEX(reply_desc)); 5202 } 5203 return; 5204 } 5205 5206 5207 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5208 if (atiop == NULL) { 5209 mpt_lprt(mpt, MPT_PRT_WARN, 5210 "no ATIOs for lun %u- sending back %s\n", lun, 5211 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5212 mpt_scsi_tgt_status(mpt, NULL, req, 5213 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5214 NULL); 5215 return; 5216 } 5217 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5218 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5219 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 5220 atiop->ccb_h.ccb_mpt_ptr = mpt; 5221 atiop->ccb_h.status = CAM_CDB_RECVD; 5222 atiop->ccb_h.target_lun = lun; 5223 atiop->sense_len = 0; 5224 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5225 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 5226 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5227 5228 /* 5229 * The tag we construct here allows us to find the 5230 * original request that the command came in with. 5231 * 5232 * This way we don't have to depend on anything but the 5233 * tag to find things when CCBs show back up from CAM. 5234 */ 5235 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5236 tgt->tag_id = atiop->tag_id; 5237 if (tag_action) { 5238 atiop->tag_action = tag_action; 5239 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 5240 } 5241 if (mpt->verbose >= MPT_PRT_DEBUG) { 5242 int i; 5243 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 5244 atiop->ccb_h.target_lun); 5245 for (i = 0; i < atiop->cdb_len; i++) { 5246 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5247 (i == (atiop->cdb_len - 1))? '>' : ' '); 5248 } 5249 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5250 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 5251 } 5252 5253 xpt_done((union ccb *)atiop); 5254 } 5255 5256 static void 5257 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5258 { 5259 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5260 5261 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5262 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 5263 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 5264 tgt->tag_id, tgt->state); 5265 } 5266 5267 static void 5268 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5269 { 5270 5271 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5272 req->index, req->index, req->state); 5273 mpt_tgt_dump_tgt_state(mpt, req); 5274 } 5275 5276 static int 5277 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5278 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5279 { 5280 int dbg; 5281 union ccb *ccb; 5282 U16 status; 5283 5284 if (reply_frame == NULL) { 5285 /* 5286 * Figure out what the state of the command is. 5287 */ 5288 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5289 5290 #ifdef INVARIANTS 5291 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5292 if (tgt->req) { 5293 mpt_req_not_spcl(mpt, tgt->req, 5294 "turbo scsi_tgt_reply associated req", __LINE__); 5295 } 5296 #endif 5297 switch(tgt->state) { 5298 case TGT_STATE_LOADED: 5299 /* 5300 * This is a new command starting. 5301 */ 5302 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5303 break; 5304 case TGT_STATE_MOVING_DATA: 5305 { 5306 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 5307 5308 ccb = tgt->ccb; 5309 if (tgt->req == NULL) { 5310 panic("mpt: turbo target reply with null " 5311 "associated request moving data"); 5312 /* NOTREACHED */ 5313 } 5314 if (ccb == NULL) { 5315 if (tgt->is_local == 0) { 5316 panic("mpt: turbo target reply with " 5317 "null associated ccb moving data"); 5318 /* NOTREACHED */ 5319 } 5320 mpt_lprt(mpt, MPT_PRT_DEBUG, 5321 "TARGET_ASSIST local done\n"); 5322 TAILQ_REMOVE(&mpt->request_pending_list, 5323 tgt->req, links); 5324 mpt_free_request(mpt, tgt->req); 5325 tgt->req = NULL; 5326 mpt_scsi_tgt_status(mpt, NULL, req, 5327 0, NULL); 5328 return (TRUE); 5329 } 5330 tgt->ccb = NULL; 5331 tgt->nxfers++; 5332 mpt_req_untimeout(req, mpt_timeout, ccb); 5333 mpt_lprt(mpt, MPT_PRT_DEBUG, 5334 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5335 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5336 /* 5337 * Free the Target Assist Request 5338 */ 5339 KASSERT(tgt->req->ccb == ccb, 5340 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5341 tgt->req->serno, tgt->req->ccb)); 5342 TAILQ_REMOVE(&mpt->request_pending_list, 5343 tgt->req, links); 5344 mpt_free_request(mpt, tgt->req); 5345 tgt->req = NULL; 5346 5347 /* 5348 * Do we need to send status now? That is, are 5349 * we done with all our data transfers? 5350 */ 5351 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5352 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5353 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5354 KASSERT(ccb->ccb_h.status, 5355 ("zero ccb sts at %d", __LINE__)); 5356 tgt->state = TGT_STATE_IN_CAM; 5357 if (mpt->outofbeer) { 5358 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5359 mpt->outofbeer = 0; 5360 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5361 } 5362 xpt_done(ccb); 5363 break; 5364 } 5365 /* 5366 * Otherwise, send status (and sense) 5367 */ 5368 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5369 sp = sense; 5370 memcpy(sp, &ccb->csio.sense_data, 5371 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5372 } 5373 mpt_scsi_tgt_status(mpt, ccb, req, 5374 ccb->csio.scsi_status, sp); 5375 break; 5376 } 5377 case TGT_STATE_SENDING_STATUS: 5378 case TGT_STATE_MOVING_DATA_AND_STATUS: 5379 { 5380 int ioindex; 5381 ccb = tgt->ccb; 5382 5383 if (tgt->req == NULL) { 5384 panic("mpt: turbo target reply with null " 5385 "associated request sending status"); 5386 /* NOTREACHED */ 5387 } 5388 5389 if (ccb) { 5390 tgt->ccb = NULL; 5391 if (tgt->state == 5392 TGT_STATE_MOVING_DATA_AND_STATUS) { 5393 tgt->nxfers++; 5394 } 5395 mpt_req_untimeout(req, mpt_timeout, ccb); 5396 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5397 ccb->ccb_h.status |= CAM_SENT_SENSE; 5398 } 5399 mpt_lprt(mpt, MPT_PRT_DEBUG, 5400 "TARGET_STATUS tag %x sts %x flgs %x req " 5401 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5402 ccb->ccb_h.flags, tgt->req); 5403 /* 5404 * Free the Target Send Status Request 5405 */ 5406 KASSERT(tgt->req->ccb == ccb, 5407 ("tgt->req %p:%u tgt->req->ccb %p", 5408 tgt->req, tgt->req->serno, tgt->req->ccb)); 5409 /* 5410 * Notify CAM that we're done 5411 */ 5412 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5413 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5414 KASSERT(ccb->ccb_h.status, 5415 ("ZERO ccb sts at %d", __LINE__)); 5416 tgt->ccb = NULL; 5417 } else { 5418 mpt_lprt(mpt, MPT_PRT_DEBUG, 5419 "TARGET_STATUS non-CAM for req %p:%u\n", 5420 tgt->req, tgt->req->serno); 5421 } 5422 TAILQ_REMOVE(&mpt->request_pending_list, 5423 tgt->req, links); 5424 mpt_free_request(mpt, tgt->req); 5425 tgt->req = NULL; 5426 5427 /* 5428 * And re-post the Command Buffer. 5429 * This will reset the state. 5430 */ 5431 ioindex = GET_IO_INDEX(reply_desc); 5432 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5433 tgt->is_local = 0; 5434 mpt_post_target_command(mpt, req, ioindex); 5435 5436 /* 5437 * And post a done for anyone who cares 5438 */ 5439 if (ccb) { 5440 if (mpt->outofbeer) { 5441 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5442 mpt->outofbeer = 0; 5443 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5444 } 5445 xpt_done(ccb); 5446 } 5447 break; 5448 } 5449 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5450 tgt->state = TGT_STATE_LOADED; 5451 break; 5452 default: 5453 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5454 "Reply Function\n", tgt->state); 5455 } 5456 return (TRUE); 5457 } 5458 5459 status = le16toh(reply_frame->IOCStatus); 5460 if (status != MPI_IOCSTATUS_SUCCESS) { 5461 dbg = MPT_PRT_ERROR; 5462 } else { 5463 dbg = MPT_PRT_DEBUG1; 5464 } 5465 5466 mpt_lprt(mpt, dbg, 5467 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5468 req, req->serno, reply_frame, reply_frame->Function, status); 5469 5470 switch (reply_frame->Function) { 5471 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5472 { 5473 mpt_tgt_state_t *tgt; 5474 #ifdef INVARIANTS 5475 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5476 #endif 5477 if (status != MPI_IOCSTATUS_SUCCESS) { 5478 /* 5479 * XXX What to do? 5480 */ 5481 break; 5482 } 5483 tgt = MPT_TGT_STATE(mpt, req); 5484 KASSERT(tgt->state == TGT_STATE_LOADING, 5485 ("bad state 0x%x on reply to buffer post", tgt->state)); 5486 mpt_assign_serno(mpt, req); 5487 tgt->state = TGT_STATE_LOADED; 5488 break; 5489 } 5490 case MPI_FUNCTION_TARGET_ASSIST: 5491 #ifdef INVARIANTS 5492 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5493 #endif 5494 mpt_prt(mpt, "target assist completion\n"); 5495 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5496 mpt_free_request(mpt, req); 5497 break; 5498 case MPI_FUNCTION_TARGET_STATUS_SEND: 5499 #ifdef INVARIANTS 5500 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5501 #endif 5502 mpt_prt(mpt, "status send completion\n"); 5503 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5504 mpt_free_request(mpt, req); 5505 break; 5506 case MPI_FUNCTION_TARGET_MODE_ABORT: 5507 { 5508 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5509 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5510 PTR_MSG_TARGET_MODE_ABORT abtp = 5511 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5512 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5513 #ifdef INVARIANTS 5514 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5515 #endif 5516 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5517 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5518 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5519 mpt_free_request(mpt, req); 5520 break; 5521 } 5522 default: 5523 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5524 "0x%x\n", reply_frame->Function); 5525 break; 5526 } 5527 return (TRUE); 5528 } 5529