1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 */ 96 #include <sys/cdefs.h> 97 __FBSDID("$FreeBSD$"); 98 99 #include <dev/mpt/mpt.h> 100 #include <dev/mpt/mpt_cam.h> 101 #include <dev/mpt/mpt_raid.h> 102 103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/mpt/mpilib/mpi_init.h" 105 #include "dev/mpt/mpilib/mpi_targ.h" 106 #include "dev/mpt/mpilib/mpi_fc.h" 107 #include "dev/mpt/mpilib/mpi_sas.h" 108 #if __FreeBSD_version >= 500000 109 #include <sys/sysctl.h> 110 #endif 111 #include <sys/callout.h> 112 #include <sys/kthread.h> 113 114 #if __FreeBSD_version >= 700025 115 #ifndef CAM_NEW_TRAN_CODE 116 #define CAM_NEW_TRAN_CODE 1 117 #endif 118 #endif 119 120 static void mpt_poll(struct cam_sim *); 121 static timeout_t mpt_timeout; 122 static void mpt_action(struct cam_sim *, union ccb *); 123 static int 124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 125 static void mpt_setwidth(struct mpt_softc *, int, int); 126 static void mpt_setsync(struct mpt_softc *, int, int, int); 127 static int mpt_update_spi_config(struct mpt_softc *, int); 128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); 129 130 static mpt_reply_handler_t mpt_scsi_reply_handler; 131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 132 static mpt_reply_handler_t mpt_fc_els_reply_handler; 133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 134 MSG_DEFAULT_REPLY *); 135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 136 static int mpt_fc_reset_link(struct mpt_softc *, int); 137 138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 140 static void mpt_recovery_thread(void *arg); 141 static void mpt_recover_commands(struct mpt_softc *mpt); 142 143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 144 u_int, u_int, u_int, int); 145 146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 148 static int mpt_add_els_buffers(struct mpt_softc *mpt); 149 static int mpt_add_target_commands(struct mpt_softc *mpt); 150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 152 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 156 uint8_t, uint8_t const *); 157 static void 158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 159 tgt_resource_t *, int); 160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 163 static mpt_reply_handler_t mpt_sata_pass_reply_handler; 164 165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 169 170 static mpt_probe_handler_t mpt_cam_probe; 171 static mpt_attach_handler_t mpt_cam_attach; 172 static mpt_enable_handler_t mpt_cam_enable; 173 static mpt_ready_handler_t mpt_cam_ready; 174 static mpt_event_handler_t mpt_cam_event; 175 static mpt_reset_handler_t mpt_cam_ioc_reset; 176 static mpt_detach_handler_t mpt_cam_detach; 177 178 static struct mpt_personality mpt_cam_personality = 179 { 180 .name = "mpt_cam", 181 .probe = mpt_cam_probe, 182 .attach = mpt_cam_attach, 183 .enable = mpt_cam_enable, 184 .ready = mpt_cam_ready, 185 .event = mpt_cam_event, 186 .reset = mpt_cam_ioc_reset, 187 .detach = mpt_cam_detach, 188 }; 189 190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 192 193 int mpt_enable_sata_wc = -1; 194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 195 196 int 197 mpt_cam_probe(struct mpt_softc *mpt) 198 { 199 int role; 200 201 /* 202 * Only attach to nodes that support the initiator or target role 203 * (or want to) or have RAID physical devices that need CAM pass-thru 204 * support. 205 */ 206 if (mpt->do_cfg_role) { 207 role = mpt->cfg_role; 208 } else { 209 role = mpt->role; 210 } 211 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 212 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 213 return (0); 214 } 215 return (ENODEV); 216 } 217 218 int 219 mpt_cam_attach(struct mpt_softc *mpt) 220 { 221 struct cam_devq *devq; 222 mpt_handler_t handler; 223 int maxq; 224 int error; 225 226 MPT_LOCK(mpt); 227 TAILQ_INIT(&mpt->request_timeout_list); 228 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 229 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 230 231 handler.reply_handler = mpt_scsi_reply_handler; 232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 233 &scsi_io_handler_id); 234 if (error != 0) { 235 MPT_UNLOCK(mpt); 236 goto cleanup; 237 } 238 239 handler.reply_handler = mpt_scsi_tmf_reply_handler; 240 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 241 &scsi_tmf_handler_id); 242 if (error != 0) { 243 MPT_UNLOCK(mpt); 244 goto cleanup; 245 } 246 247 /* 248 * If we're fibre channel and could support target mode, we register 249 * an ELS reply handler and give it resources. 250 */ 251 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 252 handler.reply_handler = mpt_fc_els_reply_handler; 253 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 254 &fc_els_handler_id); 255 if (error != 0) { 256 MPT_UNLOCK(mpt); 257 goto cleanup; 258 } 259 if (mpt_add_els_buffers(mpt) == FALSE) { 260 error = ENOMEM; 261 MPT_UNLOCK(mpt); 262 goto cleanup; 263 } 264 maxq -= mpt->els_cmds_allocated; 265 } 266 267 /* 268 * If we support target mode, we register a reply handler for it, 269 * but don't add command resources until we actually enable target 270 * mode. 271 */ 272 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 273 handler.reply_handler = mpt_scsi_tgt_reply_handler; 274 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 275 &mpt->scsi_tgt_handler_id); 276 if (error != 0) { 277 MPT_UNLOCK(mpt); 278 goto cleanup; 279 } 280 } 281 282 if (mpt->is_sas) { 283 handler.reply_handler = mpt_sata_pass_reply_handler; 284 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 285 &sata_pass_handler_id); 286 if (error != 0) { 287 MPT_UNLOCK(mpt); 288 goto cleanup; 289 } 290 } 291 292 /* 293 * We keep one request reserved for timeout TMF requests. 294 */ 295 mpt->tmf_req = mpt_get_request(mpt, FALSE); 296 if (mpt->tmf_req == NULL) { 297 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 298 error = ENOMEM; 299 MPT_UNLOCK(mpt); 300 goto cleanup; 301 } 302 303 /* 304 * Mark the request as free even though not on the free list. 305 * There is only one TMF request allowed to be outstanding at 306 * a time and the TMF routines perform their own allocation 307 * tracking using the standard state flags. 308 */ 309 mpt->tmf_req->state = REQ_STATE_FREE; 310 maxq--; 311 312 /* 313 * The rest of this is CAM foo, for which we need to drop our lock 314 */ 315 MPT_UNLOCK(mpt); 316 317 if (mpt_spawn_recovery_thread(mpt) != 0) { 318 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 319 error = ENOMEM; 320 goto cleanup; 321 } 322 323 /* 324 * Create the device queue for our SIM(s). 325 */ 326 devq = cam_simq_alloc(maxq); 327 if (devq == NULL) { 328 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 329 error = ENOMEM; 330 goto cleanup; 331 } 332 333 /* 334 * Construct our SIM entry. 335 */ 336 mpt->sim = 337 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 338 if (mpt->sim == NULL) { 339 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 340 cam_simq_free(devq); 341 error = ENOMEM; 342 goto cleanup; 343 } 344 345 /* 346 * Register exactly this bus. 347 */ 348 MPT_LOCK(mpt); 349 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { 350 mpt_prt(mpt, "Bus registration Failed!\n"); 351 error = ENOMEM; 352 MPT_UNLOCK(mpt); 353 goto cleanup; 354 } 355 356 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 357 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 358 mpt_prt(mpt, "Unable to allocate Path!\n"); 359 error = ENOMEM; 360 MPT_UNLOCK(mpt); 361 goto cleanup; 362 } 363 MPT_UNLOCK(mpt); 364 365 /* 366 * Only register a second bus for RAID physical 367 * devices if the controller supports RAID. 368 */ 369 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 370 return (0); 371 } 372 373 /* 374 * Create a "bus" to export all hidden disks to CAM. 375 */ 376 mpt->phydisk_sim = 377 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 378 if (mpt->phydisk_sim == NULL) { 379 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 380 error = ENOMEM; 381 goto cleanup; 382 } 383 384 /* 385 * Register this bus. 386 */ 387 MPT_LOCK(mpt); 388 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != 389 CAM_SUCCESS) { 390 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 391 error = ENOMEM; 392 MPT_UNLOCK(mpt); 393 goto cleanup; 394 } 395 396 if (xpt_create_path(&mpt->phydisk_path, NULL, 397 cam_sim_path(mpt->phydisk_sim), 398 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 399 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 400 error = ENOMEM; 401 MPT_UNLOCK(mpt); 402 goto cleanup; 403 } 404 MPT_UNLOCK(mpt); 405 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 406 return (0); 407 408 cleanup: 409 mpt_cam_detach(mpt); 410 return (error); 411 } 412 413 /* 414 * Read FC configuration information 415 */ 416 static int 417 mpt_read_config_info_fc(struct mpt_softc *mpt) 418 { 419 char *topology = NULL; 420 int rv; 421 422 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 423 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 424 if (rv) { 425 return (-1); 426 } 427 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 428 mpt->mpt_fcport_page0.Header.PageVersion, 429 mpt->mpt_fcport_page0.Header.PageLength, 430 mpt->mpt_fcport_page0.Header.PageNumber, 431 mpt->mpt_fcport_page0.Header.PageType); 432 433 434 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 435 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 436 if (rv) { 437 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 438 return (-1); 439 } 440 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 441 442 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 443 444 switch (mpt->mpt_fcport_page0.Flags & 445 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 447 mpt->mpt_fcport_speed = 0; 448 topology = "<NO LOOP>"; 449 break; 450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 451 topology = "N-Port"; 452 break; 453 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 454 topology = "NL-Port"; 455 break; 456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 457 topology = "F-Port"; 458 break; 459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 460 topology = "FL-Port"; 461 break; 462 default: 463 mpt->mpt_fcport_speed = 0; 464 topology = "?"; 465 break; 466 } 467 468 mpt_lprt(mpt, MPT_PRT_INFO, 469 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 470 "Speed %u-Gbit\n", topology, 471 mpt->mpt_fcport_page0.WWNN.High, 472 mpt->mpt_fcport_page0.WWNN.Low, 473 mpt->mpt_fcport_page0.WWPN.High, 474 mpt->mpt_fcport_page0.WWPN.Low, 475 mpt->mpt_fcport_speed); 476 #if __FreeBSD_version >= 500000 477 MPT_UNLOCK(mpt); 478 { 479 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 480 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 481 482 snprintf(mpt->scinfo.fc.wwnn, 483 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 484 mpt->mpt_fcport_page0.WWNN.High, 485 mpt->mpt_fcport_page0.WWNN.Low); 486 487 snprintf(mpt->scinfo.fc.wwpn, 488 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 489 mpt->mpt_fcport_page0.WWPN.High, 490 mpt->mpt_fcport_page0.WWPN.Low); 491 492 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 493 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 494 "World Wide Node Name"); 495 496 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 497 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 498 "World Wide Port Name"); 499 500 } 501 MPT_LOCK(mpt); 502 #endif 503 return (0); 504 } 505 506 /* 507 * Set FC configuration information. 508 */ 509 static int 510 mpt_set_initial_config_fc(struct mpt_softc *mpt) 511 { 512 513 CONFIG_PAGE_FC_PORT_1 fc; 514 U32 fl; 515 int r, doit = 0; 516 int role; 517 518 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 519 &fc.Header, FALSE, 5000); 520 if (r) { 521 mpt_prt(mpt, "failed to read FC page 1 header\n"); 522 return (mpt_fc_reset_link(mpt, 1)); 523 } 524 525 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 526 &fc.Header, sizeof (fc), FALSE, 5000); 527 if (r) { 528 mpt_prt(mpt, "failed to read FC page 1\n"); 529 return (mpt_fc_reset_link(mpt, 1)); 530 } 531 mpt2host_config_page_fc_port_1(&fc); 532 533 /* 534 * Check our flags to make sure we support the role we want. 535 */ 536 doit = 0; 537 role = 0; 538 fl = fc.Flags; 539 540 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 541 role |= MPT_ROLE_INITIATOR; 542 } 543 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 544 role |= MPT_ROLE_TARGET; 545 } 546 547 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 548 549 if (mpt->do_cfg_role == 0) { 550 role = mpt->cfg_role; 551 } else { 552 mpt->do_cfg_role = 0; 553 } 554 555 if (role != mpt->cfg_role) { 556 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 557 if ((role & MPT_ROLE_INITIATOR) == 0) { 558 mpt_prt(mpt, "adding initiator role\n"); 559 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 560 doit++; 561 } else { 562 mpt_prt(mpt, "keeping initiator role\n"); 563 } 564 } else if (role & MPT_ROLE_INITIATOR) { 565 mpt_prt(mpt, "removing initiator role\n"); 566 doit++; 567 } 568 if (mpt->cfg_role & MPT_ROLE_TARGET) { 569 if ((role & MPT_ROLE_TARGET) == 0) { 570 mpt_prt(mpt, "adding target role\n"); 571 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 572 doit++; 573 } else { 574 mpt_prt(mpt, "keeping target role\n"); 575 } 576 } else if (role & MPT_ROLE_TARGET) { 577 mpt_prt(mpt, "removing target role\n"); 578 doit++; 579 } 580 mpt->role = mpt->cfg_role; 581 } 582 583 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 584 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 585 mpt_prt(mpt, "adding OXID option\n"); 586 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 587 doit++; 588 } 589 } 590 591 if (doit) { 592 fc.Flags = fl; 593 host2mpt_config_page_fc_port_1(&fc); 594 r = mpt_write_cfg_page(mpt, 595 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 596 sizeof(fc), FALSE, 5000); 597 if (r != 0) { 598 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 599 return (0); 600 } 601 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 602 "effect until next reboot or IOC reset\n"); 603 } 604 return (0); 605 } 606 607 static int 608 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 609 { 610 ConfigExtendedPageHeader_t hdr; 611 struct mptsas_phyinfo *phyinfo; 612 SasIOUnitPage0_t *buffer; 613 int error, len, i; 614 615 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 616 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 617 &hdr, 0, 10000); 618 if (error) 619 goto out; 620 if (hdr.ExtPageLength == 0) { 621 error = ENXIO; 622 goto out; 623 } 624 625 len = hdr.ExtPageLength * 4; 626 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 627 if (buffer == NULL) { 628 error = ENOMEM; 629 goto out; 630 } 631 632 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 633 0, &hdr, buffer, len, 0, 10000); 634 if (error) { 635 free(buffer, M_DEVBUF); 636 goto out; 637 } 638 639 portinfo->num_phys = buffer->NumPhys; 640 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * 641 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 642 if (portinfo->phy_info == NULL) { 643 free(buffer, M_DEVBUF); 644 error = ENOMEM; 645 goto out; 646 } 647 648 for (i = 0; i < portinfo->num_phys; i++) { 649 phyinfo = &portinfo->phy_info[i]; 650 phyinfo->phy_num = i; 651 phyinfo->port_id = buffer->PhyData[i].Port; 652 phyinfo->negotiated_link_rate = 653 buffer->PhyData[i].NegotiatedLinkRate; 654 phyinfo->handle = 655 le16toh(buffer->PhyData[i].ControllerDevHandle); 656 } 657 658 free(buffer, M_DEVBUF); 659 out: 660 return (error); 661 } 662 663 static int 664 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 665 uint32_t form, uint32_t form_specific) 666 { 667 ConfigExtendedPageHeader_t hdr; 668 SasPhyPage0_t *buffer; 669 int error; 670 671 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 672 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 673 0, 10000); 674 if (error) 675 goto out; 676 if (hdr.ExtPageLength == 0) { 677 error = ENXIO; 678 goto out; 679 } 680 681 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 682 if (buffer == NULL) { 683 error = ENOMEM; 684 goto out; 685 } 686 687 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 688 form + form_specific, &hdr, buffer, 689 sizeof(SasPhyPage0_t), 0, 10000); 690 if (error) { 691 free(buffer, M_DEVBUF); 692 goto out; 693 } 694 695 phy_info->hw_link_rate = buffer->HwLinkRate; 696 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 697 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 698 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 699 700 free(buffer, M_DEVBUF); 701 out: 702 return (error); 703 } 704 705 static int 706 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 707 uint32_t form, uint32_t form_specific) 708 { 709 ConfigExtendedPageHeader_t hdr; 710 SasDevicePage0_t *buffer; 711 uint64_t sas_address; 712 int error = 0; 713 714 bzero(device_info, sizeof(*device_info)); 715 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 716 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 717 &hdr, 0, 10000); 718 if (error) 719 goto out; 720 if (hdr.ExtPageLength == 0) { 721 error = ENXIO; 722 goto out; 723 } 724 725 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 726 if (buffer == NULL) { 727 error = ENOMEM; 728 goto out; 729 } 730 731 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 732 form + form_specific, &hdr, buffer, 733 sizeof(SasDevicePage0_t), 0, 10000); 734 if (error) { 735 free(buffer, M_DEVBUF); 736 goto out; 737 } 738 739 device_info->dev_handle = le16toh(buffer->DevHandle); 740 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 741 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 742 device_info->slot = le16toh(buffer->Slot); 743 device_info->phy_num = buffer->PhyNum; 744 device_info->physical_port = buffer->PhysicalPort; 745 device_info->target_id = buffer->TargetID; 746 device_info->bus = buffer->Bus; 747 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 748 device_info->sas_address = le64toh(sas_address); 749 device_info->device_info = le32toh(buffer->DeviceInfo); 750 751 free(buffer, M_DEVBUF); 752 out: 753 return (error); 754 } 755 756 /* 757 * Read SAS configuration information. Nothing to do yet. 758 */ 759 static int 760 mpt_read_config_info_sas(struct mpt_softc *mpt) 761 { 762 struct mptsas_portinfo *portinfo; 763 struct mptsas_phyinfo *phyinfo; 764 int error, i; 765 766 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 767 if (portinfo == NULL) 768 return (ENOMEM); 769 770 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 771 if (error) { 772 free(portinfo, M_DEVBUF); 773 return (0); 774 } 775 776 for (i = 0; i < portinfo->num_phys; i++) { 777 phyinfo = &portinfo->phy_info[i]; 778 error = mptsas_sas_phy_pg0(mpt, phyinfo, 779 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 780 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 781 if (error) 782 break; 783 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 784 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 785 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 786 phyinfo->handle); 787 if (error) 788 break; 789 phyinfo->identify.phy_num = phyinfo->phy_num = i; 790 if (phyinfo->attached.dev_handle) 791 error = mptsas_sas_device_pg0(mpt, 792 &phyinfo->attached, 793 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 794 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 795 phyinfo->attached.dev_handle); 796 if (error) 797 break; 798 } 799 mpt->sas_portinfo = portinfo; 800 return (0); 801 } 802 803 static void 804 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 805 int enabled) 806 { 807 SataPassthroughRequest_t *pass; 808 request_t *req; 809 int error, status; 810 811 req = mpt_get_request(mpt, 0); 812 if (req == NULL) 813 return; 814 815 pass = req->req_vbuf; 816 bzero(pass, sizeof(SataPassthroughRequest_t)); 817 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 818 pass->TargetID = devinfo->target_id; 819 pass->Bus = devinfo->bus; 820 pass->PassthroughFlags = 0; 821 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 822 pass->DataLength = 0; 823 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 824 pass->CommandFIS[0] = 0x27; 825 pass->CommandFIS[1] = 0x80; 826 pass->CommandFIS[2] = 0xef; 827 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 828 pass->CommandFIS[7] = 0x40; 829 pass->CommandFIS[15] = 0x08; 830 831 mpt_check_doorbell(mpt); 832 mpt_send_cmd(mpt, req); 833 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 834 10 * 1000); 835 if (error) { 836 mpt_free_request(mpt, req); 837 printf("error %d sending passthrough\n", error); 838 return; 839 } 840 841 status = le16toh(req->IOCStatus); 842 if (status != MPI_IOCSTATUS_SUCCESS) { 843 mpt_free_request(mpt, req); 844 printf("IOCSTATUS %d\n", status); 845 return; 846 } 847 848 mpt_free_request(mpt, req); 849 } 850 851 /* 852 * Set SAS configuration information. Nothing to do yet. 853 */ 854 static int 855 mpt_set_initial_config_sas(struct mpt_softc *mpt) 856 { 857 struct mptsas_phyinfo *phyinfo; 858 int i; 859 860 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 861 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 862 phyinfo = &mpt->sas_portinfo->phy_info[i]; 863 if (phyinfo->attached.dev_handle == 0) 864 continue; 865 if ((phyinfo->attached.device_info & 866 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 867 continue; 868 if (bootverbose) 869 device_printf(mpt->dev, 870 "%sabling SATA WC on phy %d\n", 871 (mpt_enable_sata_wc) ? "En" : "Dis", i); 872 mptsas_set_sata_wc(mpt, &phyinfo->attached, 873 mpt_enable_sata_wc); 874 } 875 } 876 877 return (0); 878 } 879 880 static int 881 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 882 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 883 { 884 if (req != NULL) { 885 886 if (reply_frame != NULL) { 887 req->IOCStatus = le16toh(reply_frame->IOCStatus); 888 } 889 req->state &= ~REQ_STATE_QUEUED; 890 req->state |= REQ_STATE_DONE; 891 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 892 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 893 wakeup(req); 894 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 895 /* 896 * Whew- we can free this request (late completion) 897 */ 898 mpt_free_request(mpt, req); 899 } 900 } 901 902 return (TRUE); 903 } 904 905 /* 906 * Read SCSI configuration information 907 */ 908 static int 909 mpt_read_config_info_spi(struct mpt_softc *mpt) 910 { 911 int rv, i; 912 913 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 914 &mpt->mpt_port_page0.Header, FALSE, 5000); 915 if (rv) { 916 return (-1); 917 } 918 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 919 mpt->mpt_port_page0.Header.PageVersion, 920 mpt->mpt_port_page0.Header.PageLength, 921 mpt->mpt_port_page0.Header.PageNumber, 922 mpt->mpt_port_page0.Header.PageType); 923 924 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 925 &mpt->mpt_port_page1.Header, FALSE, 5000); 926 if (rv) { 927 return (-1); 928 } 929 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 930 mpt->mpt_port_page1.Header.PageVersion, 931 mpt->mpt_port_page1.Header.PageLength, 932 mpt->mpt_port_page1.Header.PageNumber, 933 mpt->mpt_port_page1.Header.PageType); 934 935 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 936 &mpt->mpt_port_page2.Header, FALSE, 5000); 937 if (rv) { 938 return (-1); 939 } 940 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 941 mpt->mpt_port_page2.Header.PageVersion, 942 mpt->mpt_port_page2.Header.PageLength, 943 mpt->mpt_port_page2.Header.PageNumber, 944 mpt->mpt_port_page2.Header.PageType); 945 946 for (i = 0; i < 16; i++) { 947 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 948 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 949 if (rv) { 950 return (-1); 951 } 952 mpt_lprt(mpt, MPT_PRT_DEBUG, 953 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 954 mpt->mpt_dev_page0[i].Header.PageVersion, 955 mpt->mpt_dev_page0[i].Header.PageLength, 956 mpt->mpt_dev_page0[i].Header.PageNumber, 957 mpt->mpt_dev_page0[i].Header.PageType); 958 959 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 960 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 961 if (rv) { 962 return (-1); 963 } 964 mpt_lprt(mpt, MPT_PRT_DEBUG, 965 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 966 mpt->mpt_dev_page1[i].Header.PageVersion, 967 mpt->mpt_dev_page1[i].Header.PageLength, 968 mpt->mpt_dev_page1[i].Header.PageNumber, 969 mpt->mpt_dev_page1[i].Header.PageType); 970 } 971 972 /* 973 * At this point, we don't *have* to fail. As long as we have 974 * valid config header information, we can (barely) lurch 975 * along. 976 */ 977 978 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 979 sizeof(mpt->mpt_port_page0), FALSE, 5000); 980 if (rv) { 981 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 982 } else { 983 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 984 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 985 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 986 mpt->mpt_port_page0.Capabilities, 987 mpt->mpt_port_page0.PhysicalInterface); 988 } 989 990 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 991 sizeof(mpt->mpt_port_page1), FALSE, 5000); 992 if (rv) { 993 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 994 } else { 995 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 996 mpt_lprt(mpt, MPT_PRT_DEBUG, 997 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 998 mpt->mpt_port_page1.Configuration, 999 mpt->mpt_port_page1.OnBusTimerValue); 1000 } 1001 1002 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 1003 sizeof(mpt->mpt_port_page2), FALSE, 5000); 1004 if (rv) { 1005 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 1006 } else { 1007 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1008 "Port Page 2: Flags %x Settings %x\n", 1009 mpt->mpt_port_page2.PortFlags, 1010 mpt->mpt_port_page2.PortSettings); 1011 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 1012 for (i = 0; i < 16; i++) { 1013 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1014 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1015 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1016 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1017 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1018 } 1019 } 1020 1021 for (i = 0; i < 16; i++) { 1022 rv = mpt_read_cur_cfg_page(mpt, i, 1023 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1024 FALSE, 5000); 1025 if (rv) { 1026 mpt_prt(mpt, 1027 "cannot read SPI Target %d Device Page 0\n", i); 1028 continue; 1029 } 1030 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 1031 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1032 "target %d page 0: Negotiated Params %x Information %x\n", 1033 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1034 mpt->mpt_dev_page0[i].Information); 1035 1036 rv = mpt_read_cur_cfg_page(mpt, i, 1037 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1038 FALSE, 5000); 1039 if (rv) { 1040 mpt_prt(mpt, 1041 "cannot read SPI Target %d Device Page 1\n", i); 1042 continue; 1043 } 1044 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 1045 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1046 "target %d page 1: Requested Params %x Configuration %x\n", 1047 i, mpt->mpt_dev_page1[i].RequestedParameters, 1048 mpt->mpt_dev_page1[i].Configuration); 1049 } 1050 return (0); 1051 } 1052 1053 /* 1054 * Validate SPI configuration information. 1055 * 1056 * In particular, validate SPI Port Page 1. 1057 */ 1058 static int 1059 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1060 { 1061 int error, i, pp1val; 1062 1063 mpt->mpt_disc_enable = 0xff; 1064 mpt->mpt_tag_enable = 0; 1065 1066 pp1val = ((1 << mpt->mpt_ini_id) << 1067 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; 1068 if (mpt->mpt_port_page1.Configuration != pp1val) { 1069 CONFIG_PAGE_SCSI_PORT_1 tmp; 1070 1071 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1072 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 1073 tmp = mpt->mpt_port_page1; 1074 tmp.Configuration = pp1val; 1075 host2mpt_config_page_scsi_port_1(&tmp); 1076 error = mpt_write_cur_cfg_page(mpt, 0, 1077 &tmp.Header, sizeof(tmp), FALSE, 5000); 1078 if (error) { 1079 return (-1); 1080 } 1081 error = mpt_read_cur_cfg_page(mpt, 0, 1082 &tmp.Header, sizeof(tmp), FALSE, 5000); 1083 if (error) { 1084 return (-1); 1085 } 1086 mpt2host_config_page_scsi_port_1(&tmp); 1087 if (tmp.Configuration != pp1val) { 1088 mpt_prt(mpt, 1089 "failed to reset SPI Port Page 1 Config value\n"); 1090 return (-1); 1091 } 1092 mpt->mpt_port_page1 = tmp; 1093 } 1094 1095 /* 1096 * The purpose of this exercise is to get 1097 * all targets back to async/narrow. 1098 * 1099 * We skip this step if the BIOS has already negotiated 1100 * speeds with the targets. 1101 */ 1102 i = mpt->mpt_port_page2.PortSettings & 1103 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1104 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 1105 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1106 "honoring BIOS transfer negotiations\n"); 1107 } else { 1108 for (i = 0; i < 16; i++) { 1109 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1110 mpt->mpt_dev_page1[i].Configuration = 0; 1111 (void) mpt_update_spi_config(mpt, i); 1112 } 1113 } 1114 return (0); 1115 } 1116 1117 int 1118 mpt_cam_enable(struct mpt_softc *mpt) 1119 { 1120 int error; 1121 1122 MPT_LOCK(mpt); 1123 1124 error = EIO; 1125 if (mpt->is_fc) { 1126 if (mpt_read_config_info_fc(mpt)) { 1127 goto out; 1128 } 1129 if (mpt_set_initial_config_fc(mpt)) { 1130 goto out; 1131 } 1132 } else if (mpt->is_sas) { 1133 if (mpt_read_config_info_sas(mpt)) { 1134 goto out; 1135 } 1136 if (mpt_set_initial_config_sas(mpt)) { 1137 goto out; 1138 } 1139 } else if (mpt->is_spi) { 1140 if (mpt_read_config_info_spi(mpt)) { 1141 goto out; 1142 } 1143 if (mpt_set_initial_config_spi(mpt)) { 1144 goto out; 1145 } 1146 } 1147 error = 0; 1148 1149 out: 1150 MPT_UNLOCK(mpt); 1151 return (error); 1152 } 1153 1154 void 1155 mpt_cam_ready(struct mpt_softc *mpt) 1156 { 1157 /* 1158 * If we're in target mode, hang out resources now 1159 * so we don't cause the world to hang talking to us. 1160 */ 1161 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1162 /* 1163 * Try to add some target command resources 1164 */ 1165 MPT_LOCK(mpt); 1166 if (mpt_add_target_commands(mpt) == FALSE) { 1167 mpt_prt(mpt, "failed to add target commands\n"); 1168 } 1169 MPT_UNLOCK(mpt); 1170 } 1171 mpt->ready = 1; 1172 } 1173 1174 void 1175 mpt_cam_detach(struct mpt_softc *mpt) 1176 { 1177 mpt_handler_t handler; 1178 1179 MPT_LOCK(mpt); 1180 mpt->ready = 0; 1181 mpt_terminate_recovery_thread(mpt); 1182 1183 handler.reply_handler = mpt_scsi_reply_handler; 1184 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1185 scsi_io_handler_id); 1186 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1188 scsi_tmf_handler_id); 1189 handler.reply_handler = mpt_fc_els_reply_handler; 1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1191 fc_els_handler_id); 1192 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1193 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1194 mpt->scsi_tgt_handler_id); 1195 handler.reply_handler = mpt_sata_pass_reply_handler; 1196 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1197 sata_pass_handler_id); 1198 1199 if (mpt->tmf_req != NULL) { 1200 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1201 mpt_free_request(mpt, mpt->tmf_req); 1202 mpt->tmf_req = NULL; 1203 } 1204 if (mpt->sas_portinfo != NULL) { 1205 free(mpt->sas_portinfo, M_DEVBUF); 1206 mpt->sas_portinfo = NULL; 1207 } 1208 1209 if (mpt->sim != NULL) { 1210 xpt_free_path(mpt->path); 1211 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1212 cam_sim_free(mpt->sim, TRUE); 1213 mpt->sim = NULL; 1214 } 1215 1216 if (mpt->phydisk_sim != NULL) { 1217 xpt_free_path(mpt->phydisk_path); 1218 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1219 cam_sim_free(mpt->phydisk_sim, TRUE); 1220 mpt->phydisk_sim = NULL; 1221 } 1222 MPT_UNLOCK(mpt); 1223 } 1224 1225 /* This routine is used after a system crash to dump core onto the swap device. 1226 */ 1227 static void 1228 mpt_poll(struct cam_sim *sim) 1229 { 1230 struct mpt_softc *mpt; 1231 1232 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1233 mpt_intr(mpt); 1234 } 1235 1236 /* 1237 * Watchdog timeout routine for SCSI requests. 1238 */ 1239 static void 1240 mpt_timeout(void *arg) 1241 { 1242 union ccb *ccb; 1243 struct mpt_softc *mpt; 1244 request_t *req; 1245 1246 ccb = (union ccb *)arg; 1247 mpt = ccb->ccb_h.ccb_mpt_ptr; 1248 1249 #if __FreeBSD_version < 500000 1250 MPT_LOCK(mpt); 1251 #endif 1252 MPT_LOCK_ASSERT(mpt); 1253 req = ccb->ccb_h.ccb_req_ptr; 1254 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1255 req->serno, ccb, req->ccb); 1256 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1257 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1258 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1259 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1260 req->state |= REQ_STATE_TIMEDOUT; 1261 mpt_wakeup_recovery_thread(mpt); 1262 } 1263 #if __FreeBSD_version < 500000 1264 MPT_UNLOCK(mpt); 1265 #endif 1266 } 1267 1268 /* 1269 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 1270 * 1271 * Takes a list of physical segments and builds the SGL for SCSI IO command 1272 * and forwards the commard to the IOC after one last check that CAM has not 1273 * aborted the transaction. 1274 */ 1275 static void 1276 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1277 { 1278 request_t *req, *trq; 1279 char *mpt_off; 1280 union ccb *ccb; 1281 struct mpt_softc *mpt; 1282 int seg, first_lim; 1283 uint32_t flags, nxt_off; 1284 void *sglp = NULL; 1285 MSG_REQUEST_HEADER *hdrp; 1286 SGE_SIMPLE64 *se; 1287 SGE_CHAIN64 *ce; 1288 int istgt = 0; 1289 1290 req = (request_t *)arg; 1291 ccb = req->ccb; 1292 1293 mpt = ccb->ccb_h.ccb_mpt_ptr; 1294 req = ccb->ccb_h.ccb_req_ptr; 1295 1296 hdrp = req->req_vbuf; 1297 mpt_off = req->req_vbuf; 1298 1299 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1300 error = EFBIG; 1301 } 1302 1303 if (error == 0) { 1304 switch (hdrp->Function) { 1305 case MPI_FUNCTION_SCSI_IO_REQUEST: 1306 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1307 istgt = 0; 1308 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1309 break; 1310 case MPI_FUNCTION_TARGET_ASSIST: 1311 istgt = 1; 1312 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1313 break; 1314 default: 1315 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1316 hdrp->Function); 1317 error = EINVAL; 1318 break; 1319 } 1320 } 1321 1322 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1323 error = EFBIG; 1324 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1325 nseg, mpt->max_seg_cnt); 1326 } 1327 1328 bad: 1329 if (error != 0) { 1330 if (error != EFBIG && error != ENOMEM) { 1331 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1332 } 1333 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1334 cam_status status; 1335 mpt_freeze_ccb(ccb); 1336 if (error == EFBIG) { 1337 status = CAM_REQ_TOO_BIG; 1338 } else if (error == ENOMEM) { 1339 if (mpt->outofbeer == 0) { 1340 mpt->outofbeer = 1; 1341 xpt_freeze_simq(mpt->sim, 1); 1342 mpt_lprt(mpt, MPT_PRT_DEBUG, 1343 "FREEZEQ\n"); 1344 } 1345 status = CAM_REQUEUE_REQ; 1346 } else { 1347 status = CAM_REQ_CMP_ERR; 1348 } 1349 mpt_set_ccb_status(ccb, status); 1350 } 1351 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1352 request_t *cmd_req = 1353 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1354 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1355 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1356 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1357 } 1358 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1359 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1360 xpt_done(ccb); 1361 CAMLOCK_2_MPTLOCK(mpt); 1362 mpt_free_request(mpt, req); 1363 MPTLOCK_2_CAMLOCK(mpt); 1364 return; 1365 } 1366 1367 /* 1368 * No data to transfer? 1369 * Just make a single simple SGL with zero length. 1370 */ 1371 1372 if (mpt->verbose >= MPT_PRT_DEBUG) { 1373 int tidx = ((char *)sglp) - mpt_off; 1374 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1375 } 1376 1377 if (nseg == 0) { 1378 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1379 MPI_pSGE_SET_FLAGS(se1, 1380 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1381 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1382 se1->FlagsLength = htole32(se1->FlagsLength); 1383 goto out; 1384 } 1385 1386 1387 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1388 if (istgt == 0) { 1389 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1390 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1391 } 1392 } else { 1393 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1394 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1395 } 1396 } 1397 1398 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1399 bus_dmasync_op_t op; 1400 if (istgt == 0) { 1401 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1402 op = BUS_DMASYNC_PREREAD; 1403 } else { 1404 op = BUS_DMASYNC_PREWRITE; 1405 } 1406 } else { 1407 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1408 op = BUS_DMASYNC_PREWRITE; 1409 } else { 1410 op = BUS_DMASYNC_PREREAD; 1411 } 1412 } 1413 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1414 } 1415 1416 /* 1417 * Okay, fill in what we can at the end of the command frame. 1418 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1419 * the command frame. 1420 * 1421 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1422 * SIMPLE64 pointers and start doing CHAIN64 entries after 1423 * that. 1424 */ 1425 1426 if (nseg < MPT_NSGL_FIRST(mpt)) { 1427 first_lim = nseg; 1428 } else { 1429 /* 1430 * Leave room for CHAIN element 1431 */ 1432 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1433 } 1434 1435 se = (SGE_SIMPLE64 *) sglp; 1436 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1437 uint32_t tf; 1438 1439 memset(se, 0, sizeof (*se)); 1440 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1441 if (sizeof(bus_addr_t) > 4) { 1442 se->Address.High = 1443 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1444 } 1445 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1446 tf = flags; 1447 if (seg == first_lim - 1) { 1448 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1449 } 1450 if (seg == nseg - 1) { 1451 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1452 MPI_SGE_FLAGS_END_OF_BUFFER; 1453 } 1454 MPI_pSGE_SET_FLAGS(se, tf); 1455 se->FlagsLength = htole32(se->FlagsLength); 1456 } 1457 1458 if (seg == nseg) { 1459 goto out; 1460 } 1461 1462 /* 1463 * Tell the IOC where to find the first chain element. 1464 */ 1465 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1466 nxt_off = MPT_RQSL(mpt); 1467 trq = req; 1468 1469 /* 1470 * Make up the rest of the data segments out of a chain element 1471 * (contiained in the current request frame) which points to 1472 * SIMPLE64 elements in the next request frame, possibly ending 1473 * with *another* chain element (if there's more). 1474 */ 1475 while (seg < nseg) { 1476 int this_seg_lim; 1477 uint32_t tf, cur_off; 1478 bus_addr_t chain_list_addr; 1479 1480 /* 1481 * Point to the chain descriptor. Note that the chain 1482 * descriptor is at the end of the *previous* list (whether 1483 * chain or simple). 1484 */ 1485 ce = (SGE_CHAIN64 *) se; 1486 1487 /* 1488 * Before we change our current pointer, make sure we won't 1489 * overflow the request area with this frame. Note that we 1490 * test against 'greater than' here as it's okay in this case 1491 * to have next offset be just outside the request area. 1492 */ 1493 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1494 nxt_off = MPT_REQUEST_AREA; 1495 goto next_chain; 1496 } 1497 1498 /* 1499 * Set our SGE element pointer to the beginning of the chain 1500 * list and update our next chain list offset. 1501 */ 1502 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1503 cur_off = nxt_off; 1504 nxt_off += MPT_RQSL(mpt); 1505 1506 /* 1507 * Now initialized the chain descriptor. 1508 */ 1509 memset(ce, 0, sizeof (*ce)); 1510 1511 /* 1512 * Get the physical address of the chain list. 1513 */ 1514 chain_list_addr = trq->req_pbuf; 1515 chain_list_addr += cur_off; 1516 if (sizeof (bus_addr_t) > 4) { 1517 ce->Address.High = 1518 htole32(((uint64_t)chain_list_addr) >> 32); 1519 } 1520 ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 1521 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1522 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1523 1524 /* 1525 * If we have more than a frame's worth of segments left, 1526 * set up the chain list to have the last element be another 1527 * chain descriptor. 1528 */ 1529 if ((nseg - seg) > MPT_NSGL(mpt)) { 1530 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1531 /* 1532 * The length of the chain is the length in bytes of the 1533 * number of segments plus the next chain element. 1534 * 1535 * The next chain descriptor offset is the length, 1536 * in words, of the number of segments. 1537 */ 1538 ce->Length = (this_seg_lim - seg) * 1539 sizeof (SGE_SIMPLE64); 1540 ce->NextChainOffset = ce->Length >> 2; 1541 ce->Length += sizeof (SGE_CHAIN64); 1542 } else { 1543 this_seg_lim = nseg; 1544 ce->Length = (this_seg_lim - seg) * 1545 sizeof (SGE_SIMPLE64); 1546 } 1547 ce->Length = htole16(ce->Length); 1548 1549 /* 1550 * Fill in the chain list SGE elements with our segment data. 1551 * 1552 * If we're the last element in this chain list, set the last 1553 * element flag. If we're the completely last element period, 1554 * set the end of list and end of buffer flags. 1555 */ 1556 while (seg < this_seg_lim) { 1557 memset(se, 0, sizeof (*se)); 1558 se->Address.Low = htole32(dm_segs->ds_addr & 1559 0xffffffff); 1560 if (sizeof (bus_addr_t) > 4) { 1561 se->Address.High = 1562 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1563 } 1564 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1565 tf = flags; 1566 if (seg == this_seg_lim - 1) { 1567 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1568 } 1569 if (seg == nseg - 1) { 1570 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1571 MPI_SGE_FLAGS_END_OF_BUFFER; 1572 } 1573 MPI_pSGE_SET_FLAGS(se, tf); 1574 se->FlagsLength = htole32(se->FlagsLength); 1575 se++; 1576 seg++; 1577 dm_segs++; 1578 } 1579 1580 next_chain: 1581 /* 1582 * If we have more segments to do and we've used up all of 1583 * the space in a request area, go allocate another one 1584 * and chain to that. 1585 */ 1586 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1587 request_t *nrq; 1588 1589 CAMLOCK_2_MPTLOCK(mpt); 1590 nrq = mpt_get_request(mpt, FALSE); 1591 MPTLOCK_2_CAMLOCK(mpt); 1592 1593 if (nrq == NULL) { 1594 error = ENOMEM; 1595 goto bad; 1596 } 1597 1598 /* 1599 * Append the new request area on the tail of our list. 1600 */ 1601 if ((trq = req->chain) == NULL) { 1602 req->chain = nrq; 1603 } else { 1604 while (trq->chain != NULL) { 1605 trq = trq->chain; 1606 } 1607 trq->chain = nrq; 1608 } 1609 trq = nrq; 1610 mpt_off = trq->req_vbuf; 1611 if (mpt->verbose >= MPT_PRT_DEBUG) { 1612 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1613 } 1614 nxt_off = 0; 1615 } 1616 } 1617 out: 1618 1619 /* 1620 * Last time we need to check if this CCB needs to be aborted. 1621 */ 1622 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1623 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1624 request_t *cmd_req = 1625 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1626 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1627 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1628 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1629 } 1630 mpt_prt(mpt, 1631 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1632 ccb->ccb_h.status & CAM_STATUS_MASK); 1633 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1634 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1635 } 1636 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1637 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1638 xpt_done(ccb); 1639 CAMLOCK_2_MPTLOCK(mpt); 1640 mpt_free_request(mpt, req); 1641 MPTLOCK_2_CAMLOCK(mpt); 1642 return; 1643 } 1644 1645 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1646 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1647 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 1648 mpt_timeout, ccb); 1649 } 1650 if (mpt->verbose > MPT_PRT_DEBUG) { 1651 int nc = 0; 1652 mpt_print_request(req->req_vbuf); 1653 for (trq = req->chain; trq; trq = trq->chain) { 1654 printf(" Additional Chain Area %d\n", nc++); 1655 mpt_dump_sgl(trq->req_vbuf, 0); 1656 } 1657 } 1658 1659 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1660 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1661 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1662 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1663 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1664 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1665 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1666 } else { 1667 tgt->state = TGT_STATE_MOVING_DATA; 1668 } 1669 #else 1670 tgt->state = TGT_STATE_MOVING_DATA; 1671 #endif 1672 } 1673 CAMLOCK_2_MPTLOCK(mpt); 1674 mpt_send_cmd(mpt, req); 1675 MPTLOCK_2_CAMLOCK(mpt); 1676 } 1677 1678 static void 1679 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1680 { 1681 request_t *req, *trq; 1682 char *mpt_off; 1683 union ccb *ccb; 1684 struct mpt_softc *mpt; 1685 int seg, first_lim; 1686 uint32_t flags, nxt_off; 1687 void *sglp = NULL; 1688 MSG_REQUEST_HEADER *hdrp; 1689 SGE_SIMPLE32 *se; 1690 SGE_CHAIN32 *ce; 1691 int istgt = 0; 1692 1693 req = (request_t *)arg; 1694 ccb = req->ccb; 1695 1696 mpt = ccb->ccb_h.ccb_mpt_ptr; 1697 req = ccb->ccb_h.ccb_req_ptr; 1698 1699 hdrp = req->req_vbuf; 1700 mpt_off = req->req_vbuf; 1701 1702 1703 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1704 error = EFBIG; 1705 } 1706 1707 if (error == 0) { 1708 switch (hdrp->Function) { 1709 case MPI_FUNCTION_SCSI_IO_REQUEST: 1710 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1711 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1712 break; 1713 case MPI_FUNCTION_TARGET_ASSIST: 1714 istgt = 1; 1715 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1716 break; 1717 default: 1718 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1719 hdrp->Function); 1720 error = EINVAL; 1721 break; 1722 } 1723 } 1724 1725 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1726 error = EFBIG; 1727 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1728 nseg, mpt->max_seg_cnt); 1729 } 1730 1731 bad: 1732 if (error != 0) { 1733 if (error != EFBIG && error != ENOMEM) { 1734 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1735 } 1736 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1737 cam_status status; 1738 mpt_freeze_ccb(ccb); 1739 if (error == EFBIG) { 1740 status = CAM_REQ_TOO_BIG; 1741 } else if (error == ENOMEM) { 1742 if (mpt->outofbeer == 0) { 1743 mpt->outofbeer = 1; 1744 xpt_freeze_simq(mpt->sim, 1); 1745 mpt_lprt(mpt, MPT_PRT_DEBUG, 1746 "FREEZEQ\n"); 1747 } 1748 status = CAM_REQUEUE_REQ; 1749 } else { 1750 status = CAM_REQ_CMP_ERR; 1751 } 1752 mpt_set_ccb_status(ccb, status); 1753 } 1754 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1755 request_t *cmd_req = 1756 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1757 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1758 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1759 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1760 } 1761 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1762 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1763 xpt_done(ccb); 1764 CAMLOCK_2_MPTLOCK(mpt); 1765 mpt_free_request(mpt, req); 1766 MPTLOCK_2_CAMLOCK(mpt); 1767 return; 1768 } 1769 1770 /* 1771 * No data to transfer? 1772 * Just make a single simple SGL with zero length. 1773 */ 1774 1775 if (mpt->verbose >= MPT_PRT_DEBUG) { 1776 int tidx = ((char *)sglp) - mpt_off; 1777 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1778 } 1779 1780 if (nseg == 0) { 1781 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1782 MPI_pSGE_SET_FLAGS(se1, 1783 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1784 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1785 se1->FlagsLength = htole32(se1->FlagsLength); 1786 goto out; 1787 } 1788 1789 1790 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1791 if (istgt == 0) { 1792 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1793 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1794 } 1795 } else { 1796 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1797 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1798 } 1799 } 1800 1801 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1802 bus_dmasync_op_t op; 1803 if (istgt) { 1804 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1805 op = BUS_DMASYNC_PREREAD; 1806 } else { 1807 op = BUS_DMASYNC_PREWRITE; 1808 } 1809 } else { 1810 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1811 op = BUS_DMASYNC_PREWRITE; 1812 } else { 1813 op = BUS_DMASYNC_PREREAD; 1814 } 1815 } 1816 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1817 } 1818 1819 /* 1820 * Okay, fill in what we can at the end of the command frame. 1821 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1822 * the command frame. 1823 * 1824 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1825 * SIMPLE32 pointers and start doing CHAIN32 entries after 1826 * that. 1827 */ 1828 1829 if (nseg < MPT_NSGL_FIRST(mpt)) { 1830 first_lim = nseg; 1831 } else { 1832 /* 1833 * Leave room for CHAIN element 1834 */ 1835 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1836 } 1837 1838 se = (SGE_SIMPLE32 *) sglp; 1839 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1840 uint32_t tf; 1841 1842 memset(se, 0,sizeof (*se)); 1843 se->Address = htole32(dm_segs->ds_addr); 1844 1845 1846 1847 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1848 tf = flags; 1849 if (seg == first_lim - 1) { 1850 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1851 } 1852 if (seg == nseg - 1) { 1853 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1854 MPI_SGE_FLAGS_END_OF_BUFFER; 1855 } 1856 MPI_pSGE_SET_FLAGS(se, tf); 1857 se->FlagsLength = htole32(se->FlagsLength); 1858 } 1859 1860 if (seg == nseg) { 1861 goto out; 1862 } 1863 1864 /* 1865 * Tell the IOC where to find the first chain element. 1866 */ 1867 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1868 nxt_off = MPT_RQSL(mpt); 1869 trq = req; 1870 1871 /* 1872 * Make up the rest of the data segments out of a chain element 1873 * (contiained in the current request frame) which points to 1874 * SIMPLE32 elements in the next request frame, possibly ending 1875 * with *another* chain element (if there's more). 1876 */ 1877 while (seg < nseg) { 1878 int this_seg_lim; 1879 uint32_t tf, cur_off; 1880 bus_addr_t chain_list_addr; 1881 1882 /* 1883 * Point to the chain descriptor. Note that the chain 1884 * descriptor is at the end of the *previous* list (whether 1885 * chain or simple). 1886 */ 1887 ce = (SGE_CHAIN32 *) se; 1888 1889 /* 1890 * Before we change our current pointer, make sure we won't 1891 * overflow the request area with this frame. Note that we 1892 * test against 'greater than' here as it's okay in this case 1893 * to have next offset be just outside the request area. 1894 */ 1895 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1896 nxt_off = MPT_REQUEST_AREA; 1897 goto next_chain; 1898 } 1899 1900 /* 1901 * Set our SGE element pointer to the beginning of the chain 1902 * list and update our next chain list offset. 1903 */ 1904 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1905 cur_off = nxt_off; 1906 nxt_off += MPT_RQSL(mpt); 1907 1908 /* 1909 * Now initialized the chain descriptor. 1910 */ 1911 memset(ce, 0, sizeof (*ce)); 1912 1913 /* 1914 * Get the physical address of the chain list. 1915 */ 1916 chain_list_addr = trq->req_pbuf; 1917 chain_list_addr += cur_off; 1918 1919 1920 1921 ce->Address = htole32(chain_list_addr); 1922 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1923 1924 1925 /* 1926 * If we have more than a frame's worth of segments left, 1927 * set up the chain list to have the last element be another 1928 * chain descriptor. 1929 */ 1930 if ((nseg - seg) > MPT_NSGL(mpt)) { 1931 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1932 /* 1933 * The length of the chain is the length in bytes of the 1934 * number of segments plus the next chain element. 1935 * 1936 * The next chain descriptor offset is the length, 1937 * in words, of the number of segments. 1938 */ 1939 ce->Length = (this_seg_lim - seg) * 1940 sizeof (SGE_SIMPLE32); 1941 ce->NextChainOffset = ce->Length >> 2; 1942 ce->Length += sizeof (SGE_CHAIN32); 1943 } else { 1944 this_seg_lim = nseg; 1945 ce->Length = (this_seg_lim - seg) * 1946 sizeof (SGE_SIMPLE32); 1947 } 1948 ce->Length = htole16(ce->Length); 1949 1950 /* 1951 * Fill in the chain list SGE elements with our segment data. 1952 * 1953 * If we're the last element in this chain list, set the last 1954 * element flag. If we're the completely last element period, 1955 * set the end of list and end of buffer flags. 1956 */ 1957 while (seg < this_seg_lim) { 1958 memset(se, 0, sizeof (*se)); 1959 se->Address = htole32(dm_segs->ds_addr); 1960 1961 1962 1963 1964 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1965 tf = flags; 1966 if (seg == this_seg_lim - 1) { 1967 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1968 } 1969 if (seg == nseg - 1) { 1970 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1971 MPI_SGE_FLAGS_END_OF_BUFFER; 1972 } 1973 MPI_pSGE_SET_FLAGS(se, tf); 1974 se->FlagsLength = htole32(se->FlagsLength); 1975 se++; 1976 seg++; 1977 dm_segs++; 1978 } 1979 1980 next_chain: 1981 /* 1982 * If we have more segments to do and we've used up all of 1983 * the space in a request area, go allocate another one 1984 * and chain to that. 1985 */ 1986 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1987 request_t *nrq; 1988 1989 CAMLOCK_2_MPTLOCK(mpt); 1990 nrq = mpt_get_request(mpt, FALSE); 1991 MPTLOCK_2_CAMLOCK(mpt); 1992 1993 if (nrq == NULL) { 1994 error = ENOMEM; 1995 goto bad; 1996 } 1997 1998 /* 1999 * Append the new request area on the tail of our list. 2000 */ 2001 if ((trq = req->chain) == NULL) { 2002 req->chain = nrq; 2003 } else { 2004 while (trq->chain != NULL) { 2005 trq = trq->chain; 2006 } 2007 trq->chain = nrq; 2008 } 2009 trq = nrq; 2010 mpt_off = trq->req_vbuf; 2011 if (mpt->verbose >= MPT_PRT_DEBUG) { 2012 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 2013 } 2014 nxt_off = 0; 2015 } 2016 } 2017 out: 2018 2019 /* 2020 * Last time we need to check if this CCB needs to be aborted. 2021 */ 2022 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2023 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2024 request_t *cmd_req = 2025 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2026 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2027 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2028 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2029 } 2030 mpt_prt(mpt, 2031 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2032 ccb->ccb_h.status & CAM_STATUS_MASK); 2033 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2034 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2035 } 2036 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2037 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2038 xpt_done(ccb); 2039 CAMLOCK_2_MPTLOCK(mpt); 2040 mpt_free_request(mpt, req); 2041 MPTLOCK_2_CAMLOCK(mpt); 2042 return; 2043 } 2044 2045 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2046 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2047 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 2048 mpt_timeout, ccb); 2049 } 2050 if (mpt->verbose > MPT_PRT_DEBUG) { 2051 int nc = 0; 2052 mpt_print_request(req->req_vbuf); 2053 for (trq = req->chain; trq; trq = trq->chain) { 2054 printf(" Additional Chain Area %d\n", nc++); 2055 mpt_dump_sgl(trq->req_vbuf, 0); 2056 } 2057 } 2058 2059 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2060 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2061 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2062 #ifdef WE_TRUST_AUTO_GOOD_STATUS 2063 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2064 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2065 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2066 } else { 2067 tgt->state = TGT_STATE_MOVING_DATA; 2068 } 2069 #else 2070 tgt->state = TGT_STATE_MOVING_DATA; 2071 #endif 2072 } 2073 CAMLOCK_2_MPTLOCK(mpt); 2074 mpt_send_cmd(mpt, req); 2075 MPTLOCK_2_CAMLOCK(mpt); 2076 } 2077 2078 static void 2079 mpt_start(struct cam_sim *sim, union ccb *ccb) 2080 { 2081 request_t *req; 2082 struct mpt_softc *mpt; 2083 MSG_SCSI_IO_REQUEST *mpt_req; 2084 struct ccb_scsiio *csio = &ccb->csio; 2085 struct ccb_hdr *ccbh = &ccb->ccb_h; 2086 bus_dmamap_callback_t *cb; 2087 target_id_t tgt; 2088 int raid_passthru; 2089 2090 /* Get the pointer for the physical addapter */ 2091 mpt = ccb->ccb_h.ccb_mpt_ptr; 2092 raid_passthru = (sim == mpt->phydisk_sim); 2093 2094 CAMLOCK_2_MPTLOCK(mpt); 2095 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2096 if (mpt->outofbeer == 0) { 2097 mpt->outofbeer = 1; 2098 xpt_freeze_simq(mpt->sim, 1); 2099 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2100 } 2101 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2102 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2103 MPTLOCK_2_CAMLOCK(mpt); 2104 xpt_done(ccb); 2105 return; 2106 } 2107 #ifdef INVARIANTS 2108 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2109 #endif 2110 MPTLOCK_2_CAMLOCK(mpt); 2111 2112 if (sizeof (bus_addr_t) > 4) { 2113 cb = mpt_execute_req_a64; 2114 } else { 2115 cb = mpt_execute_req; 2116 } 2117 2118 /* 2119 * Link the ccb and the request structure so we can find 2120 * the other knowing either the request or the ccb 2121 */ 2122 req->ccb = ccb; 2123 ccb->ccb_h.ccb_req_ptr = req; 2124 2125 /* Now we build the command for the IOC */ 2126 mpt_req = req->req_vbuf; 2127 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2128 2129 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2130 if (raid_passthru) { 2131 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2132 CAMLOCK_2_MPTLOCK(mpt); 2133 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2134 MPTLOCK_2_CAMLOCK(mpt); 2135 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2136 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2137 xpt_done(ccb); 2138 return; 2139 } 2140 MPTLOCK_2_CAMLOCK(mpt); 2141 mpt_req->Bus = 0; /* we never set bus here */ 2142 } else { 2143 tgt = ccb->ccb_h.target_id; 2144 mpt_req->Bus = 0; /* XXX */ 2145 2146 } 2147 mpt_req->SenseBufferLength = 2148 (csio->sense_len < MPT_SENSE_SIZE) ? 2149 csio->sense_len : MPT_SENSE_SIZE; 2150 2151 /* 2152 * We use the message context to find the request structure when we 2153 * Get the command completion interrupt from the IOC. 2154 */ 2155 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2156 2157 /* Which physical device to do the I/O on */ 2158 mpt_req->TargetID = tgt; 2159 2160 /* We assume a single level LUN type */ 2161 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { 2162 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 2163 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 2164 } else { 2165 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 2166 } 2167 2168 /* Set the direction of the transfer */ 2169 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2170 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2171 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2172 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2173 } else { 2174 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2175 } 2176 2177 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2178 switch(ccb->csio.tag_action) { 2179 case MSG_HEAD_OF_Q_TAG: 2180 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2181 break; 2182 case MSG_ACA_TASK: 2183 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2184 break; 2185 case MSG_ORDERED_Q_TAG: 2186 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2187 break; 2188 case MSG_SIMPLE_Q_TAG: 2189 default: 2190 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2191 break; 2192 } 2193 } else { 2194 if (mpt->is_fc || mpt->is_sas) { 2195 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2196 } else { 2197 /* XXX No such thing for a target doing packetized. */ 2198 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2199 } 2200 } 2201 2202 if (mpt->is_spi) { 2203 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2204 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2205 } 2206 } 2207 mpt_req->Control = htole32(mpt_req->Control); 2208 2209 /* Copy the scsi command block into place */ 2210 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2211 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2212 } else { 2213 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2214 } 2215 2216 mpt_req->CDBLength = csio->cdb_len; 2217 mpt_req->DataLength = htole32(csio->dxfer_len); 2218 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2219 2220 /* 2221 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2222 */ 2223 if (mpt->verbose == MPT_PRT_DEBUG) { 2224 U32 df; 2225 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2226 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2227 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2228 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2229 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2230 mpt_prtc(mpt, "(%s %u byte%s ", 2231 (df == MPI_SCSIIO_CONTROL_READ)? 2232 "read" : "write", csio->dxfer_len, 2233 (csio->dxfer_len == 1)? ")" : "s)"); 2234 } 2235 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 2236 ccb->ccb_h.target_lun, req, req->serno); 2237 } 2238 2239 /* 2240 * If we have any data to send with this command map it into bus space. 2241 */ 2242 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2243 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 2244 /* 2245 * We've been given a pointer to a single buffer. 2246 */ 2247 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 2248 /* 2249 * Virtual address that needs to translated into 2250 * one or more physical address ranges. 2251 */ 2252 int error; 2253 int s = splsoftvm(); 2254 error = bus_dmamap_load(mpt->buffer_dmat, 2255 req->dmap, csio->data_ptr, csio->dxfer_len, 2256 cb, req, 0); 2257 splx(s); 2258 if (error == EINPROGRESS) { 2259 /* 2260 * So as to maintain ordering, 2261 * freeze the controller queue 2262 * until our mapping is 2263 * returned. 2264 */ 2265 xpt_freeze_simq(mpt->sim, 1); 2266 ccbh->status |= CAM_RELEASE_SIMQ; 2267 } 2268 } else { 2269 /* 2270 * We have been given a pointer to single 2271 * physical buffer. 2272 */ 2273 struct bus_dma_segment seg; 2274 seg.ds_addr = 2275 (bus_addr_t)(vm_offset_t)csio->data_ptr; 2276 seg.ds_len = csio->dxfer_len; 2277 (*cb)(req, &seg, 1, 0); 2278 } 2279 } else { 2280 /* 2281 * We have been given a list of addresses. 2282 * This case could be easily supported but they are not 2283 * currently generated by the CAM subsystem so there 2284 * is no point in wasting the time right now. 2285 */ 2286 struct bus_dma_segment *segs; 2287 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 2288 (*cb)(req, NULL, 0, EFAULT); 2289 } else { 2290 /* Just use the segments provided */ 2291 segs = (struct bus_dma_segment *)csio->data_ptr; 2292 (*cb)(req, segs, csio->sglist_cnt, 0); 2293 } 2294 } 2295 } else { 2296 (*cb)(req, NULL, 0, 0); 2297 } 2298 } 2299 2300 static int 2301 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2302 int sleep_ok) 2303 { 2304 int error; 2305 uint16_t status; 2306 uint8_t response; 2307 2308 error = mpt_scsi_send_tmf(mpt, 2309 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2310 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2311 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2312 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2313 0, /* XXX How do I get the channel ID? */ 2314 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2315 lun != CAM_LUN_WILDCARD ? lun : 0, 2316 0, sleep_ok); 2317 2318 if (error != 0) { 2319 /* 2320 * mpt_scsi_send_tmf hard resets on failure, so no 2321 * need to do so here. 2322 */ 2323 mpt_prt(mpt, 2324 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2325 return (EIO); 2326 } 2327 2328 /* Wait for bus reset to be processed by the IOC. */ 2329 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2330 REQ_STATE_DONE, sleep_ok, 5000); 2331 2332 status = le16toh(mpt->tmf_req->IOCStatus); 2333 response = mpt->tmf_req->ResponseCode; 2334 mpt->tmf_req->state = REQ_STATE_FREE; 2335 2336 if (error) { 2337 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2338 "Resetting controller.\n"); 2339 mpt_reset(mpt, TRUE); 2340 return (ETIMEDOUT); 2341 } 2342 2343 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2344 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2345 "Resetting controller.\n", status); 2346 mpt_reset(mpt, TRUE); 2347 return (EIO); 2348 } 2349 2350 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2351 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2352 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2353 "Resetting controller.\n", response); 2354 mpt_reset(mpt, TRUE); 2355 return (EIO); 2356 } 2357 return (0); 2358 } 2359 2360 static int 2361 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2362 { 2363 int r = 0; 2364 request_t *req; 2365 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2366 2367 req = mpt_get_request(mpt, FALSE); 2368 if (req == NULL) { 2369 return (ENOMEM); 2370 } 2371 fc = req->req_vbuf; 2372 memset(fc, 0, sizeof(*fc)); 2373 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2374 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2375 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2376 mpt_send_cmd(mpt, req); 2377 if (dowait) { 2378 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2379 REQ_STATE_DONE, FALSE, 60 * 1000); 2380 if (r == 0) { 2381 mpt_free_request(mpt, req); 2382 } 2383 } 2384 return (r); 2385 } 2386 2387 static int 2388 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2389 MSG_EVENT_NOTIFY_REPLY *msg) 2390 { 2391 uint32_t data0, data1; 2392 2393 data0 = le32toh(msg->Data[0]); 2394 data1 = le32toh(msg->Data[1]); 2395 switch(msg->Event & 0xFF) { 2396 case MPI_EVENT_UNIT_ATTENTION: 2397 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2398 (data0 >> 8) & 0xff, data0 & 0xff); 2399 break; 2400 2401 case MPI_EVENT_IOC_BUS_RESET: 2402 /* We generated a bus reset */ 2403 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2404 (data0 >> 8) & 0xff); 2405 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2406 break; 2407 2408 case MPI_EVENT_EXT_BUS_RESET: 2409 /* Someone else generated a bus reset */ 2410 mpt_prt(mpt, "External Bus Reset Detected\n"); 2411 /* 2412 * These replies don't return EventData like the MPI 2413 * spec says they do 2414 */ 2415 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2416 break; 2417 2418 case MPI_EVENT_RESCAN: 2419 #if __FreeBSD_version >= 600000 2420 { 2421 union ccb *ccb; 2422 uint32_t pathid; 2423 /* 2424 * In general this means a device has been added to the loop. 2425 */ 2426 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2427 if (mpt->ready == 0) { 2428 break; 2429 } 2430 if (mpt->phydisk_sim) { 2431 pathid = cam_sim_path(mpt->phydisk_sim); 2432 } else { 2433 pathid = cam_sim_path(mpt->sim); 2434 } 2435 MPTLOCK_2_CAMLOCK(mpt); 2436 /* 2437 * Allocate a CCB, create a wildcard path for this bus, 2438 * and schedule a rescan. 2439 */ 2440 ccb = xpt_alloc_ccb_nowait(); 2441 if (ccb == NULL) { 2442 mpt_prt(mpt, "unable to alloc CCB for rescan\n"); 2443 CAMLOCK_2_MPTLOCK(mpt); 2444 break; 2445 } 2446 2447 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 2448 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2449 CAMLOCK_2_MPTLOCK(mpt); 2450 mpt_prt(mpt, "unable to create path for rescan\n"); 2451 xpt_free_ccb(ccb); 2452 break; 2453 } 2454 xpt_rescan(ccb); 2455 CAMLOCK_2_MPTLOCK(mpt); 2456 break; 2457 } 2458 #else 2459 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2460 break; 2461 #endif 2462 case MPI_EVENT_LINK_STATUS_CHANGE: 2463 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2464 (data1 >> 8) & 0xff, 2465 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2466 break; 2467 2468 case MPI_EVENT_LOOP_STATE_CHANGE: 2469 switch ((data0 >> 16) & 0xff) { 2470 case 0x01: 2471 mpt_prt(mpt, 2472 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2473 "(Loop Initialization)\n", 2474 (data1 >> 8) & 0xff, 2475 (data0 >> 8) & 0xff, 2476 (data0 ) & 0xff); 2477 switch ((data0 >> 8) & 0xff) { 2478 case 0xF7: 2479 if ((data0 & 0xff) == 0xF7) { 2480 mpt_prt(mpt, "Device needs AL_PA\n"); 2481 } else { 2482 mpt_prt(mpt, "Device %02x doesn't like " 2483 "FC performance\n", 2484 data0 & 0xFF); 2485 } 2486 break; 2487 case 0xF8: 2488 if ((data0 & 0xff) == 0xF7) { 2489 mpt_prt(mpt, "Device had loop failure " 2490 "at its receiver prior to acquiring" 2491 " AL_PA\n"); 2492 } else { 2493 mpt_prt(mpt, "Device %02x detected loop" 2494 " failure at its receiver\n", 2495 data0 & 0xFF); 2496 } 2497 break; 2498 default: 2499 mpt_prt(mpt, "Device %02x requests that device " 2500 "%02x reset itself\n", 2501 data0 & 0xFF, 2502 (data0 >> 8) & 0xFF); 2503 break; 2504 } 2505 break; 2506 case 0x02: 2507 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2508 "LPE(%02x,%02x) (Loop Port Enable)\n", 2509 (data1 >> 8) & 0xff, /* Port */ 2510 (data0 >> 8) & 0xff, /* Character 3 */ 2511 (data0 ) & 0xff /* Character 4 */); 2512 break; 2513 case 0x03: 2514 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2515 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2516 (data1 >> 8) & 0xff, /* Port */ 2517 (data0 >> 8) & 0xff, /* Character 3 */ 2518 (data0 ) & 0xff /* Character 4 */); 2519 break; 2520 default: 2521 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2522 "FC event (%02x %02x %02x)\n", 2523 (data1 >> 8) & 0xff, /* Port */ 2524 (data0 >> 16) & 0xff, /* Event */ 2525 (data0 >> 8) & 0xff, /* Character 3 */ 2526 (data0 ) & 0xff /* Character 4 */); 2527 } 2528 break; 2529 2530 case MPI_EVENT_LOGOUT: 2531 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2532 (data1 >> 8) & 0xff, data0); 2533 break; 2534 case MPI_EVENT_QUEUE_FULL: 2535 { 2536 struct cam_sim *sim; 2537 struct cam_path *tmppath; 2538 struct ccb_relsim crs; 2539 PTR_EVENT_DATA_QUEUE_FULL pqf; 2540 lun_id_t lun_id; 2541 2542 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 2543 pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 2544 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2545 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2546 if (mpt->phydisk_sim) { 2547 sim = mpt->phydisk_sim; 2548 } else { 2549 sim = mpt->sim; 2550 } 2551 MPTLOCK_2_CAMLOCK(mpt); 2552 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2553 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2554 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2555 mpt_prt(mpt, "unable to create a path to send " 2556 "XPT_REL_SIMQ"); 2557 CAMLOCK_2_MPTLOCK(mpt); 2558 break; 2559 } 2560 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2561 crs.ccb_h.func_code = XPT_REL_SIMQ; 2562 crs.ccb_h.flags = CAM_DEV_QFREEZE; 2563 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2564 crs.openings = pqf->CurrentDepth - 1; 2565 xpt_action((union ccb *)&crs); 2566 if (crs.ccb_h.status != CAM_REQ_CMP) { 2567 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2568 } 2569 xpt_free_path(tmppath); 2570 } 2571 CAMLOCK_2_MPTLOCK(mpt); 2572 break; 2573 } 2574 case MPI_EVENT_IR_RESYNC_UPDATE: 2575 mpt_prt(mpt, "IR resync update %d completed\n", 2576 (data0 >> 16) & 0xff); 2577 break; 2578 case MPI_EVENT_EVENT_CHANGE: 2579 case MPI_EVENT_INTEGRATED_RAID: 2580 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2581 case MPI_EVENT_SAS_SES: 2582 break; 2583 default: 2584 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2585 msg->Event & 0xFF); 2586 return (0); 2587 } 2588 return (1); 2589 } 2590 2591 /* 2592 * Reply path for all SCSI I/O requests, called from our 2593 * interrupt handler by extracting our handler index from 2594 * the MsgContext field of the reply from the IOC. 2595 * 2596 * This routine is optimized for the common case of a 2597 * completion without error. All exception handling is 2598 * offloaded to non-inlined helper routines to minimize 2599 * cache footprint. 2600 */ 2601 static int 2602 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2603 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2604 { 2605 MSG_SCSI_IO_REQUEST *scsi_req; 2606 union ccb *ccb; 2607 2608 if (req->state == REQ_STATE_FREE) { 2609 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2610 return (TRUE); 2611 } 2612 2613 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2614 ccb = req->ccb; 2615 if (ccb == NULL) { 2616 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2617 req, req->serno); 2618 return (TRUE); 2619 } 2620 2621 mpt_req_untimeout(req, mpt_timeout, ccb); 2622 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2623 2624 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2625 bus_dmasync_op_t op; 2626 2627 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2628 op = BUS_DMASYNC_POSTREAD; 2629 else 2630 op = BUS_DMASYNC_POSTWRITE; 2631 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2632 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2633 } 2634 2635 if (reply_frame == NULL) { 2636 /* 2637 * Context only reply, completion without error status. 2638 */ 2639 ccb->csio.resid = 0; 2640 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2641 ccb->csio.scsi_status = SCSI_STATUS_OK; 2642 } else { 2643 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2644 } 2645 2646 if (mpt->outofbeer) { 2647 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2648 mpt->outofbeer = 0; 2649 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2650 } 2651 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2652 struct scsi_inquiry_data *iq = 2653 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2654 if (scsi_req->Function == 2655 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2656 /* 2657 * Fake out the device type so that only the 2658 * pass-thru device will attach. 2659 */ 2660 iq->device &= ~0x1F; 2661 iq->device |= T_NODEVICE; 2662 } 2663 } 2664 if (mpt->verbose == MPT_PRT_DEBUG) { 2665 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2666 req, req->serno); 2667 } 2668 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2669 MPTLOCK_2_CAMLOCK(mpt); 2670 xpt_done(ccb); 2671 CAMLOCK_2_MPTLOCK(mpt); 2672 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2673 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2674 } else { 2675 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2676 req, req->serno); 2677 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2678 } 2679 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2680 ("CCB req needed wakeup")); 2681 #ifdef INVARIANTS 2682 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2683 #endif 2684 mpt_free_request(mpt, req); 2685 return (TRUE); 2686 } 2687 2688 static int 2689 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2690 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2691 { 2692 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2693 2694 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2695 #ifdef INVARIANTS 2696 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2697 #endif 2698 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2699 /* Record IOC Status and Response Code of TMF for any waiters. */ 2700 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2701 req->ResponseCode = tmf_reply->ResponseCode; 2702 2703 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2704 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2705 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2706 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2707 req->state |= REQ_STATE_DONE; 2708 wakeup(req); 2709 } else { 2710 mpt->tmf_req->state = REQ_STATE_FREE; 2711 } 2712 return (TRUE); 2713 } 2714 2715 /* 2716 * XXX: Move to definitions file 2717 */ 2718 #define ELS 0x22 2719 #define FC4LS 0x32 2720 #define ABTS 0x81 2721 #define BA_ACC 0x84 2722 2723 #define LS_RJT 0x01 2724 #define LS_ACC 0x02 2725 #define PLOGI 0x03 2726 #define LOGO 0x05 2727 #define SRR 0x14 2728 #define PRLI 0x20 2729 #define PRLO 0x21 2730 #define ADISC 0x52 2731 #define RSCN 0x61 2732 2733 static void 2734 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2735 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2736 { 2737 uint32_t fl; 2738 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2739 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2740 2741 /* 2742 * We are going to reuse the ELS request to send this response back. 2743 */ 2744 rsp = &tmp; 2745 memset(rsp, 0, sizeof(*rsp)); 2746 2747 #ifdef USE_IMMEDIATE_LINK_DATA 2748 /* 2749 * Apparently the IMMEDIATE stuff doesn't seem to work. 2750 */ 2751 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2752 #endif 2753 rsp->RspLength = length; 2754 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2755 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2756 2757 /* 2758 * Copy over information from the original reply frame to 2759 * it's correct place in the response. 2760 */ 2761 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2762 2763 /* 2764 * And now copy back the temporary area to the original frame. 2765 */ 2766 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2767 rsp = req->req_vbuf; 2768 2769 #ifdef USE_IMMEDIATE_LINK_DATA 2770 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2771 #else 2772 { 2773 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2774 bus_addr_t paddr = req->req_pbuf; 2775 paddr += MPT_RQSL(mpt); 2776 2777 fl = 2778 MPI_SGE_FLAGS_HOST_TO_IOC | 2779 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2780 MPI_SGE_FLAGS_LAST_ELEMENT | 2781 MPI_SGE_FLAGS_END_OF_LIST | 2782 MPI_SGE_FLAGS_END_OF_BUFFER; 2783 fl <<= MPI_SGE_FLAGS_SHIFT; 2784 fl |= (length); 2785 se->FlagsLength = htole32(fl); 2786 se->Address = htole32((uint32_t) paddr); 2787 } 2788 #endif 2789 2790 /* 2791 * Send it on... 2792 */ 2793 mpt_send_cmd(mpt, req); 2794 } 2795 2796 static int 2797 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2798 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2799 { 2800 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2801 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2802 U8 rctl; 2803 U8 type; 2804 U8 cmd; 2805 U16 status = le16toh(reply_frame->IOCStatus); 2806 U32 *elsbuf; 2807 int ioindex; 2808 int do_refresh = TRUE; 2809 2810 #ifdef INVARIANTS 2811 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2812 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2813 req, req->serno, rp->Function)); 2814 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2815 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2816 } else { 2817 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2818 } 2819 #endif 2820 mpt_lprt(mpt, MPT_PRT_DEBUG, 2821 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2822 req, req->serno, reply_frame, reply_frame->Function); 2823 2824 if (status != MPI_IOCSTATUS_SUCCESS) { 2825 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2826 status, reply_frame->Function); 2827 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2828 /* 2829 * XXX: to get around shutdown issue 2830 */ 2831 mpt->disabled = 1; 2832 return (TRUE); 2833 } 2834 return (TRUE); 2835 } 2836 2837 /* 2838 * If the function of a link service response, we recycle the 2839 * response to be a refresh for a new link service request. 2840 * 2841 * The request pointer is bogus in this case and we have to fetch 2842 * it based upon the TransactionContext. 2843 */ 2844 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2845 /* Freddie Uncle Charlie Katie */ 2846 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2847 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2848 if (mpt->els_cmd_ptrs[ioindex] == req) { 2849 break; 2850 } 2851 2852 KASSERT(ioindex < mpt->els_cmds_allocated, 2853 ("can't find my mommie!")); 2854 2855 /* remove from active list as we're going to re-post it */ 2856 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2857 req->state &= ~REQ_STATE_QUEUED; 2858 req->state |= REQ_STATE_DONE; 2859 mpt_fc_post_els(mpt, req, ioindex); 2860 return (TRUE); 2861 } 2862 2863 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2864 /* remove from active list as we're done */ 2865 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2866 req->state &= ~REQ_STATE_QUEUED; 2867 req->state |= REQ_STATE_DONE; 2868 if (req->state & REQ_STATE_TIMEDOUT) { 2869 mpt_lprt(mpt, MPT_PRT_DEBUG, 2870 "Sync Primitive Send Completed After Timeout\n"); 2871 mpt_free_request(mpt, req); 2872 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2873 mpt_lprt(mpt, MPT_PRT_DEBUG, 2874 "Async Primitive Send Complete\n"); 2875 mpt_free_request(mpt, req); 2876 } else { 2877 mpt_lprt(mpt, MPT_PRT_DEBUG, 2878 "Sync Primitive Send Complete- Waking Waiter\n"); 2879 wakeup(req); 2880 } 2881 return (TRUE); 2882 } 2883 2884 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2885 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2886 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2887 rp->MsgLength, rp->MsgFlags); 2888 return (TRUE); 2889 } 2890 2891 if (rp->MsgLength <= 5) { 2892 /* 2893 * This is just a ack of an original ELS buffer post 2894 */ 2895 mpt_lprt(mpt, MPT_PRT_DEBUG, 2896 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2897 return (TRUE); 2898 } 2899 2900 2901 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2902 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2903 2904 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2905 cmd = be32toh(elsbuf[0]) >> 24; 2906 2907 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2908 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2909 return (TRUE); 2910 } 2911 2912 ioindex = le32toh(rp->TransactionContext); 2913 req = mpt->els_cmd_ptrs[ioindex]; 2914 2915 if (rctl == ELS && type == 1) { 2916 switch (cmd) { 2917 case PRLI: 2918 /* 2919 * Send back a PRLI ACC 2920 */ 2921 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2922 le32toh(rp->Wwn.PortNameHigh), 2923 le32toh(rp->Wwn.PortNameLow)); 2924 elsbuf[0] = htobe32(0x02100014); 2925 elsbuf[1] |= htobe32(0x00000100); 2926 elsbuf[4] = htobe32(0x00000002); 2927 if (mpt->role & MPT_ROLE_TARGET) 2928 elsbuf[4] |= htobe32(0x00000010); 2929 if (mpt->role & MPT_ROLE_INITIATOR) 2930 elsbuf[4] |= htobe32(0x00000020); 2931 /* remove from active list as we're done */ 2932 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2933 req->state &= ~REQ_STATE_QUEUED; 2934 req->state |= REQ_STATE_DONE; 2935 mpt_fc_els_send_response(mpt, req, rp, 20); 2936 do_refresh = FALSE; 2937 break; 2938 case PRLO: 2939 memset(elsbuf, 0, 5 * (sizeof (U32))); 2940 elsbuf[0] = htobe32(0x02100014); 2941 elsbuf[1] = htobe32(0x08000100); 2942 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2943 le32toh(rp->Wwn.PortNameHigh), 2944 le32toh(rp->Wwn.PortNameLow)); 2945 /* remove from active list as we're done */ 2946 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2947 req->state &= ~REQ_STATE_QUEUED; 2948 req->state |= REQ_STATE_DONE; 2949 mpt_fc_els_send_response(mpt, req, rp, 20); 2950 do_refresh = FALSE; 2951 break; 2952 default: 2953 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2954 break; 2955 } 2956 } else if (rctl == ABTS && type == 0) { 2957 uint16_t rx_id = le16toh(rp->Rxid); 2958 uint16_t ox_id = le16toh(rp->Oxid); 2959 request_t *tgt_req = NULL; 2960 2961 mpt_prt(mpt, 2962 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2963 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2964 le32toh(rp->Wwn.PortNameLow)); 2965 if (rx_id >= mpt->mpt_max_tgtcmds) { 2966 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2967 } else if (mpt->tgt_cmd_ptrs == NULL) { 2968 mpt_prt(mpt, "No TGT CMD PTRS\n"); 2969 } else { 2970 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 2971 } 2972 if (tgt_req) { 2973 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 2974 union ccb *ccb; 2975 uint32_t ct_id; 2976 2977 /* 2978 * Check to make sure we have the correct command 2979 * The reply descriptor in the target state should 2980 * should contain an IoIndex that should match the 2981 * RX_ID. 2982 * 2983 * It'd be nice to have OX_ID to crosscheck with 2984 * as well. 2985 */ 2986 ct_id = GET_IO_INDEX(tgt->reply_desc); 2987 2988 if (ct_id != rx_id) { 2989 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2990 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 2991 rx_id, ct_id); 2992 goto skip; 2993 } 2994 2995 ccb = tgt->ccb; 2996 if (ccb) { 2997 mpt_prt(mpt, 2998 "CCB (%p): lun %u flags %x status %x\n", 2999 ccb, ccb->ccb_h.target_lun, 3000 ccb->ccb_h.flags, ccb->ccb_h.status); 3001 } 3002 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 3003 "%x nxfers %x\n", tgt->state, 3004 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 3005 tgt->nxfers); 3006 skip: 3007 if (mpt_abort_target_cmd(mpt, tgt_req)) { 3008 mpt_prt(mpt, "unable to start TargetAbort\n"); 3009 } 3010 } else { 3011 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 3012 } 3013 memset(elsbuf, 0, 5 * (sizeof (U32))); 3014 elsbuf[0] = htobe32(0); 3015 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 3016 elsbuf[2] = htobe32(0x000ffff); 3017 /* 3018 * Dork with the reply frame so that the response to it 3019 * will be correct. 3020 */ 3021 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 3022 /* remove from active list as we're done */ 3023 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3024 req->state &= ~REQ_STATE_QUEUED; 3025 req->state |= REQ_STATE_DONE; 3026 mpt_fc_els_send_response(mpt, req, rp, 12); 3027 do_refresh = FALSE; 3028 } else { 3029 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 3030 } 3031 if (do_refresh == TRUE) { 3032 /* remove from active list as we're done */ 3033 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3034 req->state &= ~REQ_STATE_QUEUED; 3035 req->state |= REQ_STATE_DONE; 3036 mpt_fc_post_els(mpt, req, ioindex); 3037 } 3038 return (TRUE); 3039 } 3040 3041 /* 3042 * Clean up all SCSI Initiator personality state in response 3043 * to a controller reset. 3044 */ 3045 static void 3046 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3047 { 3048 /* 3049 * The pending list is already run down by 3050 * the generic handler. Perform the same 3051 * operation on the timed out request list. 3052 */ 3053 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3054 MPI_IOCSTATUS_INVALID_STATE); 3055 3056 /* 3057 * XXX: We need to repost ELS and Target Command Buffers? 3058 */ 3059 3060 /* 3061 * Inform the XPT that a bus reset has occurred. 3062 */ 3063 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3064 } 3065 3066 /* 3067 * Parse additional completion information in the reply 3068 * frame for SCSI I/O requests. 3069 */ 3070 static int 3071 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3072 MSG_DEFAULT_REPLY *reply_frame) 3073 { 3074 union ccb *ccb; 3075 MSG_SCSI_IO_REPLY *scsi_io_reply; 3076 u_int ioc_status; 3077 u_int sstate; 3078 3079 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3080 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3081 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3082 ("MPT SCSI I/O Handler called with incorrect reply type")); 3083 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3084 ("MPT SCSI I/O Handler called with continuation reply")); 3085 3086 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3087 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3088 ioc_status &= MPI_IOCSTATUS_MASK; 3089 sstate = scsi_io_reply->SCSIState; 3090 3091 ccb = req->ccb; 3092 ccb->csio.resid = 3093 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3094 3095 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3096 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3097 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3098 ccb->csio.sense_resid = 3099 ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount); 3100 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3101 min(ccb->csio.sense_len, 3102 le32toh(scsi_io_reply->SenseCount))); 3103 } 3104 3105 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3106 /* 3107 * Tag messages rejected, but non-tagged retry 3108 * was successful. 3109 XXXX 3110 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3111 */ 3112 } 3113 3114 switch(ioc_status) { 3115 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3116 /* 3117 * XXX 3118 * Linux driver indicates that a zero 3119 * transfer length with this error code 3120 * indicates a CRC error. 3121 * 3122 * No need to swap the bytes for checking 3123 * against zero. 3124 */ 3125 if (scsi_io_reply->TransferCount == 0) { 3126 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3127 break; 3128 } 3129 /* FALLTHROUGH */ 3130 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3131 case MPI_IOCSTATUS_SUCCESS: 3132 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3133 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3134 /* 3135 * Status was never returned for this transaction. 3136 */ 3137 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3138 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3139 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3140 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3141 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3142 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3143 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3144 3145 /* XXX Handle SPI-Packet and FCP-2 response info. */ 3146 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3147 } else 3148 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3149 break; 3150 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3151 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3152 break; 3153 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3154 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3155 break; 3156 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3157 /* 3158 * Since selection timeouts and "device really not 3159 * there" are grouped into this error code, report 3160 * selection timeout. Selection timeouts are 3161 * typically retried before giving up on the device 3162 * whereas "device not there" errors are considered 3163 * unretryable. 3164 */ 3165 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3166 break; 3167 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3168 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3169 break; 3170 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3171 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3172 break; 3173 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3174 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3175 break; 3176 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3177 ccb->ccb_h.status = CAM_UA_TERMIO; 3178 break; 3179 case MPI_IOCSTATUS_INVALID_STATE: 3180 /* 3181 * The IOC has been reset. Emulate a bus reset. 3182 */ 3183 /* FALLTHROUGH */ 3184 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3185 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3186 break; 3187 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3188 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3189 /* 3190 * Don't clobber any timeout status that has 3191 * already been set for this transaction. We 3192 * want the SCSI layer to be able to differentiate 3193 * between the command we aborted due to timeout 3194 * and any innocent bystanders. 3195 */ 3196 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3197 break; 3198 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3199 break; 3200 3201 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3202 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3203 break; 3204 case MPI_IOCSTATUS_BUSY: 3205 mpt_set_ccb_status(ccb, CAM_BUSY); 3206 break; 3207 case MPI_IOCSTATUS_INVALID_FUNCTION: 3208 case MPI_IOCSTATUS_INVALID_SGL: 3209 case MPI_IOCSTATUS_INTERNAL_ERROR: 3210 case MPI_IOCSTATUS_INVALID_FIELD: 3211 default: 3212 /* XXX 3213 * Some of the above may need to kick 3214 * of a recovery action!!!! 3215 */ 3216 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3217 break; 3218 } 3219 3220 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3221 mpt_freeze_ccb(ccb); 3222 } 3223 3224 return (TRUE); 3225 } 3226 3227 static void 3228 mpt_action(struct cam_sim *sim, union ccb *ccb) 3229 { 3230 struct mpt_softc *mpt; 3231 struct ccb_trans_settings *cts; 3232 target_id_t tgt; 3233 lun_id_t lun; 3234 int raid_passthru; 3235 3236 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3237 3238 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3239 raid_passthru = (sim == mpt->phydisk_sim); 3240 MPT_LOCK_ASSERT(mpt); 3241 3242 tgt = ccb->ccb_h.target_id; 3243 lun = ccb->ccb_h.target_lun; 3244 if (raid_passthru && 3245 ccb->ccb_h.func_code != XPT_PATH_INQ && 3246 ccb->ccb_h.func_code != XPT_RESET_BUS && 3247 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3248 CAMLOCK_2_MPTLOCK(mpt); 3249 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3250 MPTLOCK_2_CAMLOCK(mpt); 3251 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3252 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3253 xpt_done(ccb); 3254 return; 3255 } 3256 MPTLOCK_2_CAMLOCK(mpt); 3257 } 3258 ccb->ccb_h.ccb_mpt_ptr = mpt; 3259 3260 switch (ccb->ccb_h.func_code) { 3261 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3262 /* 3263 * Do a couple of preliminary checks... 3264 */ 3265 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3266 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3267 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3268 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3269 break; 3270 } 3271 } 3272 /* Max supported CDB length is 16 bytes */ 3273 /* XXX Unless we implement the new 32byte message type */ 3274 if (ccb->csio.cdb_len > 3275 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3276 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3277 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3278 break; 3279 } 3280 #ifdef MPT_TEST_MULTIPATH 3281 if (mpt->failure_id == ccb->ccb_h.target_id) { 3282 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3283 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3284 break; 3285 } 3286 #endif 3287 ccb->csio.scsi_status = SCSI_STATUS_OK; 3288 mpt_start(sim, ccb); 3289 return; 3290 3291 case XPT_RESET_BUS: 3292 if (raid_passthru) { 3293 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3294 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3295 break; 3296 } 3297 case XPT_RESET_DEV: 3298 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3299 if (bootverbose) { 3300 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3301 } 3302 } else { 3303 xpt_print(ccb->ccb_h.path, "reset device\n"); 3304 } 3305 CAMLOCK_2_MPTLOCK(mpt); 3306 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3307 MPTLOCK_2_CAMLOCK(mpt); 3308 3309 /* 3310 * mpt_bus_reset is always successful in that it 3311 * will fall back to a hard reset should a bus 3312 * reset attempt fail. 3313 */ 3314 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3315 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3316 break; 3317 3318 case XPT_ABORT: 3319 { 3320 union ccb *accb = ccb->cab.abort_ccb; 3321 CAMLOCK_2_MPTLOCK(mpt); 3322 switch (accb->ccb_h.func_code) { 3323 case XPT_ACCEPT_TARGET_IO: 3324 case XPT_IMMED_NOTIFY: 3325 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3326 break; 3327 case XPT_CONT_TARGET_IO: 3328 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3329 ccb->ccb_h.status = CAM_UA_ABORT; 3330 break; 3331 case XPT_SCSI_IO: 3332 ccb->ccb_h.status = CAM_UA_ABORT; 3333 break; 3334 default: 3335 ccb->ccb_h.status = CAM_REQ_INVALID; 3336 break; 3337 } 3338 MPTLOCK_2_CAMLOCK(mpt); 3339 break; 3340 } 3341 3342 #ifdef CAM_NEW_TRAN_CODE 3343 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3344 #else 3345 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) 3346 #endif 3347 #define DP_DISC_ENABLE 0x1 3348 #define DP_DISC_DISABL 0x2 3349 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3350 3351 #define DP_TQING_ENABLE 0x4 3352 #define DP_TQING_DISABL 0x8 3353 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3354 3355 #define DP_WIDE 0x10 3356 #define DP_NARROW 0x20 3357 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3358 3359 #define DP_SYNC 0x40 3360 3361 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3362 { 3363 #ifdef CAM_NEW_TRAN_CODE 3364 struct ccb_trans_settings_scsi *scsi; 3365 struct ccb_trans_settings_spi *spi; 3366 #endif 3367 uint8_t dval; 3368 u_int period; 3369 u_int offset; 3370 int i, j; 3371 3372 cts = &ccb->cts; 3373 3374 if (mpt->is_fc || mpt->is_sas) { 3375 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3376 break; 3377 } 3378 3379 #ifdef CAM_NEW_TRAN_CODE 3380 scsi = &cts->proto_specific.scsi; 3381 spi = &cts->xport_specific.spi; 3382 3383 /* 3384 * We can be called just to valid transport and proto versions 3385 */ 3386 if (scsi->valid == 0 && spi->valid == 0) { 3387 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3388 break; 3389 } 3390 #endif 3391 3392 /* 3393 * Skip attempting settings on RAID volume disks. 3394 * Other devices on the bus get the normal treatment. 3395 */ 3396 if (mpt->phydisk_sim && raid_passthru == 0 && 3397 mpt_is_raid_volume(mpt, tgt) != 0) { 3398 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3399 "no transfer settings for RAID vols\n"); 3400 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3401 break; 3402 } 3403 3404 i = mpt->mpt_port_page2.PortSettings & 3405 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3406 j = mpt->mpt_port_page2.PortFlags & 3407 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3408 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3409 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3410 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3411 "honoring BIOS transfer negotiations\n"); 3412 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3413 break; 3414 } 3415 3416 dval = 0; 3417 period = 0; 3418 offset = 0; 3419 3420 #ifndef CAM_NEW_TRAN_CODE 3421 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 3422 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? 3423 DP_DISC_ENABLE : DP_DISC_DISABL; 3424 } 3425 3426 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 3427 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? 3428 DP_TQING_ENABLE : DP_TQING_DISABL; 3429 } 3430 3431 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 3432 dval |= cts->bus_width ? DP_WIDE : DP_NARROW; 3433 } 3434 3435 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 3436 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { 3437 dval |= DP_SYNC; 3438 period = cts->sync_period; 3439 offset = cts->sync_offset; 3440 } 3441 #else 3442 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3443 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3444 DP_DISC_ENABLE : DP_DISC_DISABL; 3445 } 3446 3447 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3448 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3449 DP_TQING_ENABLE : DP_TQING_DISABL; 3450 } 3451 3452 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3453 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3454 DP_WIDE : DP_NARROW; 3455 } 3456 3457 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3458 dval |= DP_SYNC; 3459 offset = spi->sync_offset; 3460 } else { 3461 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3462 &mpt->mpt_dev_page1[tgt]; 3463 offset = ptr->RequestedParameters; 3464 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3465 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3466 } 3467 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3468 dval |= DP_SYNC; 3469 period = spi->sync_period; 3470 } else { 3471 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3472 &mpt->mpt_dev_page1[tgt]; 3473 period = ptr->RequestedParameters; 3474 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3475 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3476 } 3477 #endif 3478 CAMLOCK_2_MPTLOCK(mpt); 3479 if (dval & DP_DISC_ENABLE) { 3480 mpt->mpt_disc_enable |= (1 << tgt); 3481 } else if (dval & DP_DISC_DISABL) { 3482 mpt->mpt_disc_enable &= ~(1 << tgt); 3483 } 3484 if (dval & DP_TQING_ENABLE) { 3485 mpt->mpt_tag_enable |= (1 << tgt); 3486 } else if (dval & DP_TQING_DISABL) { 3487 mpt->mpt_tag_enable &= ~(1 << tgt); 3488 } 3489 if (dval & DP_WIDTH) { 3490 mpt_setwidth(mpt, tgt, 1); 3491 } 3492 if (dval & DP_SYNC) { 3493 mpt_setsync(mpt, tgt, period, offset); 3494 } 3495 if (dval == 0) { 3496 MPTLOCK_2_CAMLOCK(mpt); 3497 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3498 break; 3499 } 3500 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3501 "set [%d]: 0x%x period 0x%x offset %d\n", 3502 tgt, dval, period, offset); 3503 if (mpt_update_spi_config(mpt, tgt)) { 3504 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3505 } else { 3506 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3507 } 3508 MPTLOCK_2_CAMLOCK(mpt); 3509 break; 3510 } 3511 case XPT_GET_TRAN_SETTINGS: 3512 { 3513 #ifdef CAM_NEW_TRAN_CODE 3514 struct ccb_trans_settings_scsi *scsi; 3515 cts = &ccb->cts; 3516 cts->protocol = PROTO_SCSI; 3517 if (mpt->is_fc) { 3518 struct ccb_trans_settings_fc *fc = 3519 &cts->xport_specific.fc; 3520 cts->protocol_version = SCSI_REV_SPC; 3521 cts->transport = XPORT_FC; 3522 cts->transport_version = 0; 3523 fc->valid = CTS_FC_VALID_SPEED; 3524 fc->bitrate = 100000; 3525 } else if (mpt->is_sas) { 3526 struct ccb_trans_settings_sas *sas = 3527 &cts->xport_specific.sas; 3528 cts->protocol_version = SCSI_REV_SPC2; 3529 cts->transport = XPORT_SAS; 3530 cts->transport_version = 0; 3531 sas->valid = CTS_SAS_VALID_SPEED; 3532 sas->bitrate = 300000; 3533 } else { 3534 cts->protocol_version = SCSI_REV_2; 3535 cts->transport = XPORT_SPI; 3536 cts->transport_version = 2; 3537 if (mpt_get_spi_settings(mpt, cts) != 0) { 3538 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3539 break; 3540 } 3541 } 3542 scsi = &cts->proto_specific.scsi; 3543 scsi->valid = CTS_SCSI_VALID_TQ; 3544 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3545 #else 3546 cts = &ccb->cts; 3547 if (mpt->is_fc) { 3548 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3549 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3550 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3551 } else if (mpt->is_sas) { 3552 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3553 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3554 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3555 } else if (mpt_get_spi_settings(mpt, cts) != 0) { 3556 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3557 break; 3558 } 3559 #endif 3560 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3561 break; 3562 } 3563 case XPT_CALC_GEOMETRY: 3564 { 3565 struct ccb_calc_geometry *ccg; 3566 3567 ccg = &ccb->ccg; 3568 if (ccg->block_size == 0) { 3569 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3570 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3571 break; 3572 } 3573 mpt_calc_geometry(ccg, /*extended*/1); 3574 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 3575 break; 3576 } 3577 case XPT_PATH_INQ: /* Path routing inquiry */ 3578 { 3579 struct ccb_pathinq *cpi = &ccb->cpi; 3580 3581 cpi->version_num = 1; 3582 cpi->target_sprt = 0; 3583 cpi->hba_eng_cnt = 0; 3584 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3585 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; 3586 /* 3587 * FC cards report MAX_DEVICES of 512, but 3588 * the MSG_SCSI_IO_REQUEST target id field 3589 * is only 8 bits. Until we fix the driver 3590 * to support 'channels' for bus overflow, 3591 * just limit it. 3592 */ 3593 if (cpi->max_target > 255) { 3594 cpi->max_target = 255; 3595 } 3596 3597 /* 3598 * VMware ESX reports > 16 devices and then dies when we probe. 3599 */ 3600 if (mpt->is_spi && cpi->max_target > 15) { 3601 cpi->max_target = 15; 3602 } 3603 if (mpt->is_spi) 3604 cpi->max_lun = 7; 3605 else 3606 cpi->max_lun = MPT_MAX_LUNS; 3607 cpi->initiator_id = mpt->mpt_ini_id; 3608 cpi->bus_id = cam_sim_bus(sim); 3609 3610 /* 3611 * The base speed is the speed of the underlying connection. 3612 */ 3613 #ifdef CAM_NEW_TRAN_CODE 3614 cpi->protocol = PROTO_SCSI; 3615 if (mpt->is_fc) { 3616 cpi->hba_misc = PIM_NOBUSRESET; 3617 cpi->base_transfer_speed = 100000; 3618 cpi->hba_inquiry = PI_TAG_ABLE; 3619 cpi->transport = XPORT_FC; 3620 cpi->transport_version = 0; 3621 cpi->protocol_version = SCSI_REV_SPC; 3622 } else if (mpt->is_sas) { 3623 cpi->hba_misc = PIM_NOBUSRESET; 3624 cpi->base_transfer_speed = 300000; 3625 cpi->hba_inquiry = PI_TAG_ABLE; 3626 cpi->transport = XPORT_SAS; 3627 cpi->transport_version = 0; 3628 cpi->protocol_version = SCSI_REV_SPC2; 3629 } else { 3630 cpi->hba_misc = PIM_SEQSCAN; 3631 cpi->base_transfer_speed = 3300; 3632 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3633 cpi->transport = XPORT_SPI; 3634 cpi->transport_version = 2; 3635 cpi->protocol_version = SCSI_REV_2; 3636 } 3637 #else 3638 if (mpt->is_fc) { 3639 cpi->hba_misc = PIM_NOBUSRESET; 3640 cpi->base_transfer_speed = 100000; 3641 cpi->hba_inquiry = PI_TAG_ABLE; 3642 } else if (mpt->is_sas) { 3643 cpi->hba_misc = PIM_NOBUSRESET; 3644 cpi->base_transfer_speed = 300000; 3645 cpi->hba_inquiry = PI_TAG_ABLE; 3646 } else { 3647 cpi->hba_misc = PIM_SEQSCAN; 3648 cpi->base_transfer_speed = 3300; 3649 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3650 } 3651 #endif 3652 3653 /* 3654 * We give our fake RAID passhtru bus a width that is MaxVolumes 3655 * wide and restrict it to one lun. 3656 */ 3657 if (raid_passthru) { 3658 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3659 cpi->initiator_id = cpi->max_target + 1; 3660 cpi->max_lun = 0; 3661 } 3662 3663 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3664 cpi->hba_misc |= PIM_NOINITIATOR; 3665 } 3666 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3667 cpi->target_sprt = 3668 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3669 } else { 3670 cpi->target_sprt = 0; 3671 } 3672 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3673 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3674 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3675 cpi->unit_number = cam_sim_unit(sim); 3676 cpi->ccb_h.status = CAM_REQ_CMP; 3677 break; 3678 } 3679 case XPT_EN_LUN: /* Enable LUN as a target */ 3680 { 3681 int result; 3682 3683 CAMLOCK_2_MPTLOCK(mpt); 3684 if (ccb->cel.enable) 3685 result = mpt_enable_lun(mpt, 3686 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3687 else 3688 result = mpt_disable_lun(mpt, 3689 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3690 MPTLOCK_2_CAMLOCK(mpt); 3691 if (result == 0) { 3692 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3693 } else { 3694 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3695 } 3696 break; 3697 } 3698 case XPT_NOTIFY_ACK: /* recycle notify ack */ 3699 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 3700 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3701 { 3702 tgt_resource_t *trtp; 3703 lun_id_t lun = ccb->ccb_h.target_lun; 3704 ccb->ccb_h.sim_priv.entries[0].field = 0; 3705 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3706 ccb->ccb_h.flags = 0; 3707 3708 if (lun == CAM_LUN_WILDCARD) { 3709 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3710 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3711 break; 3712 } 3713 trtp = &mpt->trt_wildcard; 3714 } else if (lun >= MPT_MAX_LUNS) { 3715 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3716 break; 3717 } else { 3718 trtp = &mpt->trt[lun]; 3719 } 3720 CAMLOCK_2_MPTLOCK(mpt); 3721 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3722 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3723 "Put FREE ATIO %p lun %d\n", ccb, lun); 3724 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3725 sim_links.stqe); 3726 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 3727 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3728 "Put FREE INOT lun %d\n", lun); 3729 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3730 sim_links.stqe); 3731 } else { 3732 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3733 } 3734 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3735 MPTLOCK_2_CAMLOCK(mpt); 3736 return; 3737 } 3738 case XPT_CONT_TARGET_IO: 3739 CAMLOCK_2_MPTLOCK(mpt); 3740 mpt_target_start_io(mpt, ccb); 3741 MPTLOCK_2_CAMLOCK(mpt); 3742 return; 3743 3744 default: 3745 ccb->ccb_h.status = CAM_REQ_INVALID; 3746 break; 3747 } 3748 xpt_done(ccb); 3749 } 3750 3751 static int 3752 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3753 { 3754 #ifdef CAM_NEW_TRAN_CODE 3755 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3756 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3757 #endif 3758 target_id_t tgt; 3759 uint32_t dval, pval, oval; 3760 int rv; 3761 3762 if (IS_CURRENT_SETTINGS(cts) == 0) { 3763 tgt = cts->ccb_h.target_id; 3764 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3765 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3766 return (-1); 3767 } 3768 } else { 3769 tgt = cts->ccb_h.target_id; 3770 } 3771 3772 /* 3773 * We aren't looking at Port Page 2 BIOS settings here- 3774 * sometimes these have been known to be bogus XXX. 3775 * 3776 * For user settings, we pick the max from port page 0 3777 * 3778 * For current settings we read the current settings out from 3779 * device page 0 for that target. 3780 */ 3781 if (IS_CURRENT_SETTINGS(cts)) { 3782 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3783 dval = 0; 3784 3785 CAMLOCK_2_MPTLOCK(mpt); 3786 tmp = mpt->mpt_dev_page0[tgt]; 3787 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3788 sizeof(tmp), FALSE, 5000); 3789 if (rv) { 3790 MPTLOCK_2_CAMLOCK(mpt); 3791 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3792 return (rv); 3793 } 3794 mpt2host_config_page_scsi_device_0(&tmp); 3795 3796 MPTLOCK_2_CAMLOCK(mpt); 3797 mpt_lprt(mpt, MPT_PRT_DEBUG, 3798 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3799 tmp.NegotiatedParameters, tmp.Information); 3800 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3801 DP_WIDE : DP_NARROW; 3802 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3803 DP_DISC_ENABLE : DP_DISC_DISABL; 3804 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3805 DP_TQING_ENABLE : DP_TQING_DISABL; 3806 oval = tmp.NegotiatedParameters; 3807 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3808 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3809 pval = tmp.NegotiatedParameters; 3810 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3811 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3812 mpt->mpt_dev_page0[tgt] = tmp; 3813 } else { 3814 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3815 oval = mpt->mpt_port_page0.Capabilities; 3816 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3817 pval = mpt->mpt_port_page0.Capabilities; 3818 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3819 } 3820 3821 #ifndef CAM_NEW_TRAN_CODE 3822 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 3823 cts->valid = 0; 3824 cts->sync_period = pval; 3825 cts->sync_offset = oval; 3826 cts->valid |= CCB_TRANS_SYNC_RATE_VALID; 3827 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; 3828 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; 3829 if (dval & DP_WIDE) { 3830 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3831 } else { 3832 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3833 } 3834 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3835 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3836 if (dval & DP_DISC_ENABLE) { 3837 cts->flags |= CCB_TRANS_DISC_ENB; 3838 } 3839 if (dval & DP_TQING_ENABLE) { 3840 cts->flags |= CCB_TRANS_TAG_ENB; 3841 } 3842 } 3843 #else 3844 spi->valid = 0; 3845 scsi->valid = 0; 3846 spi->flags = 0; 3847 scsi->flags = 0; 3848 spi->sync_offset = oval; 3849 spi->sync_period = pval; 3850 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3851 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3852 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3853 if (dval & DP_WIDE) { 3854 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3855 } else { 3856 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3857 } 3858 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3859 scsi->valid = CTS_SCSI_VALID_TQ; 3860 if (dval & DP_TQING_ENABLE) { 3861 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3862 } 3863 spi->valid |= CTS_SPI_VALID_DISC; 3864 if (dval & DP_DISC_ENABLE) { 3865 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3866 } 3867 } 3868 #endif 3869 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3870 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3871 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); 3872 return (0); 3873 } 3874 3875 static void 3876 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3877 { 3878 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3879 3880 ptr = &mpt->mpt_dev_page1[tgt]; 3881 if (onoff) { 3882 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3883 } else { 3884 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3885 } 3886 } 3887 3888 static void 3889 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3890 { 3891 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3892 3893 ptr = &mpt->mpt_dev_page1[tgt]; 3894 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3895 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3896 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3897 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3898 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3899 if (period == 0) { 3900 return; 3901 } 3902 ptr->RequestedParameters |= 3903 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3904 ptr->RequestedParameters |= 3905 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3906 if (period < 0xa) { 3907 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3908 } 3909 if (period < 0x9) { 3910 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3911 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3912 } 3913 } 3914 3915 static int 3916 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3917 { 3918 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3919 int rv; 3920 3921 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3922 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3923 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3924 tmp = mpt->mpt_dev_page1[tgt]; 3925 host2mpt_config_page_scsi_device_1(&tmp); 3926 rv = mpt_write_cur_cfg_page(mpt, tgt, 3927 &tmp.Header, sizeof(tmp), FALSE, 5000); 3928 if (rv) { 3929 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3930 return (-1); 3931 } 3932 return (0); 3933 } 3934 3935 static void 3936 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) 3937 { 3938 #if __FreeBSD_version >= 500000 3939 cam_calc_geometry(ccg, extended); 3940 #else 3941 uint32_t size_mb; 3942 uint32_t secs_per_cylinder; 3943 3944 if (ccg->block_size == 0) { 3945 ccg->ccb_h.status = CAM_REQ_INVALID; 3946 return; 3947 } 3948 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); 3949 if (size_mb > 1024 && extended) { 3950 ccg->heads = 255; 3951 ccg->secs_per_track = 63; 3952 } else { 3953 ccg->heads = 64; 3954 ccg->secs_per_track = 32; 3955 } 3956 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3957 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3958 ccg->ccb_h.status = CAM_REQ_CMP; 3959 #endif 3960 } 3961 3962 /****************************** Timeout Recovery ******************************/ 3963 static int 3964 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3965 { 3966 int error; 3967 3968 error = mpt_kthread_create(mpt_recovery_thread, mpt, 3969 &mpt->recovery_thread, /*flags*/0, 3970 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3971 return (error); 3972 } 3973 3974 static void 3975 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3976 { 3977 if (mpt->recovery_thread == NULL) { 3978 return; 3979 } 3980 mpt->shutdwn_recovery = 1; 3981 wakeup(mpt); 3982 /* 3983 * Sleep on a slightly different location 3984 * for this interlock just for added safety. 3985 */ 3986 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 3987 } 3988 3989 static void 3990 mpt_recovery_thread(void *arg) 3991 { 3992 struct mpt_softc *mpt; 3993 3994 mpt = (struct mpt_softc *)arg; 3995 MPT_LOCK(mpt); 3996 for (;;) { 3997 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3998 if (mpt->shutdwn_recovery == 0) { 3999 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 4000 } 4001 } 4002 if (mpt->shutdwn_recovery != 0) { 4003 break; 4004 } 4005 mpt_recover_commands(mpt); 4006 } 4007 mpt->recovery_thread = NULL; 4008 wakeup(&mpt->recovery_thread); 4009 MPT_UNLOCK(mpt); 4010 mpt_kthread_exit(0); 4011 } 4012 4013 static int 4014 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 4015 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 4016 { 4017 MSG_SCSI_TASK_MGMT *tmf_req; 4018 int error; 4019 4020 /* 4021 * Wait for any current TMF request to complete. 4022 * We're only allowed to issue one TMF at a time. 4023 */ 4024 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 4025 sleep_ok, MPT_TMF_MAX_TIMEOUT); 4026 if (error != 0) { 4027 mpt_reset(mpt, TRUE); 4028 return (ETIMEDOUT); 4029 } 4030 4031 mpt_assign_serno(mpt, mpt->tmf_req); 4032 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 4033 4034 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 4035 memset(tmf_req, 0, sizeof(*tmf_req)); 4036 tmf_req->TargetID = target; 4037 tmf_req->Bus = channel; 4038 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 4039 tmf_req->TaskType = type; 4040 tmf_req->MsgFlags = flags; 4041 tmf_req->MsgContext = 4042 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 4043 if (lun > MPT_MAX_LUNS) { 4044 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4045 tmf_req->LUN[1] = lun & 0xff; 4046 } else { 4047 tmf_req->LUN[1] = lun; 4048 } 4049 tmf_req->TaskMsgContext = abort_ctx; 4050 4051 mpt_lprt(mpt, MPT_PRT_DEBUG, 4052 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 4053 mpt->tmf_req->serno, tmf_req->MsgContext); 4054 if (mpt->verbose > MPT_PRT_DEBUG) { 4055 mpt_print_request(tmf_req); 4056 } 4057 4058 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 4059 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 4060 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 4061 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 4062 if (error != MPT_OK) { 4063 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 4064 mpt->tmf_req->state = REQ_STATE_FREE; 4065 mpt_reset(mpt, TRUE); 4066 } 4067 return (error); 4068 } 4069 4070 /* 4071 * When a command times out, it is placed on the requeust_timeout_list 4072 * and we wake our recovery thread. The MPT-Fusion architecture supports 4073 * only a single TMF operation at a time, so we serially abort/bdr, etc, 4074 * the timedout transactions. The next TMF is issued either by the 4075 * completion handler of the current TMF waking our recovery thread, 4076 * or the TMF timeout handler causing a hard reset sequence. 4077 */ 4078 static void 4079 mpt_recover_commands(struct mpt_softc *mpt) 4080 { 4081 request_t *req; 4082 union ccb *ccb; 4083 int error; 4084 4085 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4086 /* 4087 * No work to do- leave. 4088 */ 4089 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 4090 return; 4091 } 4092 4093 /* 4094 * Flush any commands whose completion coincides with their timeout. 4095 */ 4096 mpt_intr(mpt); 4097 4098 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4099 /* 4100 * The timedout commands have already 4101 * completed. This typically means 4102 * that either the timeout value was on 4103 * the hairy edge of what the device 4104 * requires or - more likely - interrupts 4105 * are not happening. 4106 */ 4107 mpt_prt(mpt, "Timedout requests already complete. " 4108 "Interrupts may not be functioning.\n"); 4109 mpt_enable_ints(mpt); 4110 return; 4111 } 4112 4113 /* 4114 * We have no visibility into the current state of the 4115 * controller, so attempt to abort the commands in the 4116 * order they timed-out. For initiator commands, we 4117 * depend on the reply handler pulling requests off 4118 * the timeout list. 4119 */ 4120 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4121 uint16_t status; 4122 uint8_t response; 4123 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4124 4125 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4126 req, req->serno, hdrp->Function); 4127 ccb = req->ccb; 4128 if (ccb == NULL) { 4129 mpt_prt(mpt, "null ccb in timed out request. " 4130 "Resetting Controller.\n"); 4131 mpt_reset(mpt, TRUE); 4132 continue; 4133 } 4134 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4135 4136 /* 4137 * Check to see if this is not an initiator command and 4138 * deal with it differently if it is. 4139 */ 4140 switch (hdrp->Function) { 4141 case MPI_FUNCTION_SCSI_IO_REQUEST: 4142 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4143 break; 4144 default: 4145 /* 4146 * XXX: FIX ME: need to abort target assists... 4147 */ 4148 mpt_prt(mpt, "just putting it back on the pend q\n"); 4149 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4150 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4151 links); 4152 continue; 4153 } 4154 4155 error = mpt_scsi_send_tmf(mpt, 4156 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4157 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4158 htole32(req->index | scsi_io_handler_id), TRUE); 4159 4160 if (error != 0) { 4161 /* 4162 * mpt_scsi_send_tmf hard resets on failure, so no 4163 * need to do so here. Our queue should be emptied 4164 * by the hard reset. 4165 */ 4166 continue; 4167 } 4168 4169 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4170 REQ_STATE_DONE, TRUE, 500); 4171 4172 status = le16toh(mpt->tmf_req->IOCStatus); 4173 response = mpt->tmf_req->ResponseCode; 4174 mpt->tmf_req->state = REQ_STATE_FREE; 4175 4176 if (error != 0) { 4177 /* 4178 * If we've errored out,, reset the controller. 4179 */ 4180 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4181 "Resetting controller\n"); 4182 mpt_reset(mpt, TRUE); 4183 continue; 4184 } 4185 4186 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4187 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4188 "Resetting controller.\n", status); 4189 mpt_reset(mpt, TRUE); 4190 continue; 4191 } 4192 4193 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4194 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4195 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4196 "Resetting controller.\n", response); 4197 mpt_reset(mpt, TRUE); 4198 continue; 4199 } 4200 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4201 } 4202 } 4203 4204 /************************ Target Mode Support ****************************/ 4205 static void 4206 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4207 { 4208 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4209 PTR_SGE_TRANSACTION32 tep; 4210 PTR_SGE_SIMPLE32 se; 4211 bus_addr_t paddr; 4212 uint32_t fl; 4213 4214 paddr = req->req_pbuf; 4215 paddr += MPT_RQSL(mpt); 4216 4217 fc = req->req_vbuf; 4218 memset(fc, 0, MPT_REQUEST_AREA); 4219 fc->BufferCount = 1; 4220 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4221 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4222 4223 /* 4224 * Okay, set up ELS buffer pointers. ELS buffer pointers 4225 * consist of a TE SGL element (with details length of zero) 4226 * followed by a SIMPLE SGL element which holds the address 4227 * of the buffer. 4228 */ 4229 4230 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4231 4232 tep->ContextSize = 4; 4233 tep->Flags = 0; 4234 tep->TransactionContext[0] = htole32(ioindex); 4235 4236 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4237 fl = 4238 MPI_SGE_FLAGS_HOST_TO_IOC | 4239 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4240 MPI_SGE_FLAGS_LAST_ELEMENT | 4241 MPI_SGE_FLAGS_END_OF_LIST | 4242 MPI_SGE_FLAGS_END_OF_BUFFER; 4243 fl <<= MPI_SGE_FLAGS_SHIFT; 4244 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4245 se->FlagsLength = htole32(fl); 4246 se->Address = htole32((uint32_t) paddr); 4247 mpt_lprt(mpt, MPT_PRT_DEBUG, 4248 "add ELS index %d ioindex %d for %p:%u\n", 4249 req->index, ioindex, req, req->serno); 4250 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4251 ("mpt_fc_post_els: request not locked")); 4252 mpt_send_cmd(mpt, req); 4253 } 4254 4255 static void 4256 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4257 { 4258 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4259 PTR_CMD_BUFFER_DESCRIPTOR cb; 4260 bus_addr_t paddr; 4261 4262 paddr = req->req_pbuf; 4263 paddr += MPT_RQSL(mpt); 4264 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4265 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4266 4267 fc = req->req_vbuf; 4268 fc->BufferCount = 1; 4269 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4270 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4271 4272 cb = &fc->Buffer[0]; 4273 cb->IoIndex = htole16(ioindex); 4274 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4275 4276 mpt_check_doorbell(mpt); 4277 mpt_send_cmd(mpt, req); 4278 } 4279 4280 static int 4281 mpt_add_els_buffers(struct mpt_softc *mpt) 4282 { 4283 int i; 4284 4285 if (mpt->is_fc == 0) { 4286 return (TRUE); 4287 } 4288 4289 if (mpt->els_cmds_allocated) { 4290 return (TRUE); 4291 } 4292 4293 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), 4294 M_DEVBUF, M_NOWAIT | M_ZERO); 4295 4296 if (mpt->els_cmd_ptrs == NULL) { 4297 return (FALSE); 4298 } 4299 4300 /* 4301 * Feed the chip some ELS buffer resources 4302 */ 4303 for (i = 0; i < MPT_MAX_ELS; i++) { 4304 request_t *req = mpt_get_request(mpt, FALSE); 4305 if (req == NULL) { 4306 break; 4307 } 4308 req->state |= REQ_STATE_LOCKED; 4309 mpt->els_cmd_ptrs[i] = req; 4310 mpt_fc_post_els(mpt, req, i); 4311 } 4312 4313 if (i == 0) { 4314 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4315 free(mpt->els_cmd_ptrs, M_DEVBUF); 4316 mpt->els_cmd_ptrs = NULL; 4317 return (FALSE); 4318 } 4319 if (i != MPT_MAX_ELS) { 4320 mpt_lprt(mpt, MPT_PRT_INFO, 4321 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4322 } 4323 mpt->els_cmds_allocated = i; 4324 return(TRUE); 4325 } 4326 4327 static int 4328 mpt_add_target_commands(struct mpt_softc *mpt) 4329 { 4330 int i, max; 4331 4332 if (mpt->tgt_cmd_ptrs) { 4333 return (TRUE); 4334 } 4335 4336 max = MPT_MAX_REQUESTS(mpt) >> 1; 4337 if (max > mpt->mpt_max_tgtcmds) { 4338 max = mpt->mpt_max_tgtcmds; 4339 } 4340 mpt->tgt_cmd_ptrs = 4341 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4342 if (mpt->tgt_cmd_ptrs == NULL) { 4343 mpt_prt(mpt, 4344 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4345 return (FALSE); 4346 } 4347 4348 for (i = 0; i < max; i++) { 4349 request_t *req; 4350 4351 req = mpt_get_request(mpt, FALSE); 4352 if (req == NULL) { 4353 break; 4354 } 4355 req->state |= REQ_STATE_LOCKED; 4356 mpt->tgt_cmd_ptrs[i] = req; 4357 mpt_post_target_command(mpt, req, i); 4358 } 4359 4360 4361 if (i == 0) { 4362 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4363 free(mpt->tgt_cmd_ptrs, M_DEVBUF); 4364 mpt->tgt_cmd_ptrs = NULL; 4365 return (FALSE); 4366 } 4367 4368 mpt->tgt_cmds_allocated = i; 4369 4370 if (i < max) { 4371 mpt_lprt(mpt, MPT_PRT_INFO, 4372 "added %d of %d target bufs\n", i, max); 4373 } 4374 return (i); 4375 } 4376 4377 static int 4378 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4379 { 4380 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4381 mpt->twildcard = 1; 4382 } else if (lun >= MPT_MAX_LUNS) { 4383 return (EINVAL); 4384 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4385 return (EINVAL); 4386 } 4387 if (mpt->tenabled == 0) { 4388 if (mpt->is_fc) { 4389 (void) mpt_fc_reset_link(mpt, 0); 4390 } 4391 mpt->tenabled = 1; 4392 } 4393 if (lun == CAM_LUN_WILDCARD) { 4394 mpt->trt_wildcard.enabled = 1; 4395 } else { 4396 mpt->trt[lun].enabled = 1; 4397 } 4398 return (0); 4399 } 4400 4401 static int 4402 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4403 { 4404 int i; 4405 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4406 mpt->twildcard = 0; 4407 } else if (lun >= MPT_MAX_LUNS) { 4408 return (EINVAL); 4409 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4410 return (EINVAL); 4411 } 4412 if (lun == CAM_LUN_WILDCARD) { 4413 mpt->trt_wildcard.enabled = 0; 4414 } else { 4415 mpt->trt[lun].enabled = 0; 4416 } 4417 for (i = 0; i < MPT_MAX_LUNS; i++) { 4418 if (mpt->trt[lun].enabled) { 4419 break; 4420 } 4421 } 4422 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4423 if (mpt->is_fc) { 4424 (void) mpt_fc_reset_link(mpt, 0); 4425 } 4426 mpt->tenabled = 0; 4427 } 4428 return (0); 4429 } 4430 4431 /* 4432 * Called with MPT lock held 4433 */ 4434 static void 4435 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4436 { 4437 struct ccb_scsiio *csio = &ccb->csio; 4438 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4439 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4440 4441 switch (tgt->state) { 4442 case TGT_STATE_IN_CAM: 4443 break; 4444 case TGT_STATE_MOVING_DATA: 4445 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4446 xpt_freeze_simq(mpt->sim, 1); 4447 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4448 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4449 MPTLOCK_2_CAMLOCK(mpt); 4450 xpt_done(ccb); 4451 CAMLOCK_2_MPTLOCK(mpt); 4452 return; 4453 default: 4454 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4455 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4456 mpt_tgt_dump_req_state(mpt, cmd_req); 4457 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4458 MPTLOCK_2_CAMLOCK(mpt); 4459 xpt_done(ccb); 4460 CAMLOCK_2_MPTLOCK(mpt); 4461 return; 4462 } 4463 4464 if (csio->dxfer_len) { 4465 bus_dmamap_callback_t *cb; 4466 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4467 request_t *req; 4468 4469 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4470 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len)); 4471 4472 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4473 if (mpt->outofbeer == 0) { 4474 mpt->outofbeer = 1; 4475 xpt_freeze_simq(mpt->sim, 1); 4476 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4477 } 4478 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4479 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4480 MPTLOCK_2_CAMLOCK(mpt); 4481 xpt_done(ccb); 4482 CAMLOCK_2_MPTLOCK(mpt); 4483 return; 4484 } 4485 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4486 if (sizeof (bus_addr_t) > 4) { 4487 cb = mpt_execute_req_a64; 4488 } else { 4489 cb = mpt_execute_req; 4490 } 4491 4492 req->ccb = ccb; 4493 ccb->ccb_h.ccb_req_ptr = req; 4494 4495 /* 4496 * Record the currently active ccb and the 4497 * request for it in our target state area. 4498 */ 4499 tgt->ccb = ccb; 4500 tgt->req = req; 4501 4502 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4503 ta = req->req_vbuf; 4504 4505 if (mpt->is_sas) { 4506 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4507 cmd_req->req_vbuf; 4508 ta->QueueTag = ssp->InitiatorTag; 4509 } else if (mpt->is_spi) { 4510 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4511 cmd_req->req_vbuf; 4512 ta->QueueTag = sp->Tag; 4513 } 4514 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4515 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4516 ta->ReplyWord = htole32(tgt->reply_desc); 4517 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { 4518 ta->LUN[0] = 4519 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4520 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4521 } else { 4522 ta->LUN[1] = csio->ccb_h.target_lun; 4523 } 4524 4525 ta->RelativeOffset = tgt->bytes_xfered; 4526 ta->DataLength = ccb->csio.dxfer_len; 4527 if (ta->DataLength > tgt->resid) { 4528 ta->DataLength = tgt->resid; 4529 } 4530 4531 /* 4532 * XXX Should be done after data transfer completes? 4533 */ 4534 tgt->resid -= csio->dxfer_len; 4535 tgt->bytes_xfered += csio->dxfer_len; 4536 4537 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4538 ta->TargetAssistFlags |= 4539 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4540 } 4541 4542 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4543 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4544 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4545 ta->TargetAssistFlags |= 4546 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4547 } 4548 #endif 4549 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4550 4551 mpt_lprt(mpt, MPT_PRT_DEBUG, 4552 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4553 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4554 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4555 4556 MPTLOCK_2_CAMLOCK(mpt); 4557 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4558 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4559 int error; 4560 int s = splsoftvm(); 4561 error = bus_dmamap_load(mpt->buffer_dmat, 4562 req->dmap, csio->data_ptr, csio->dxfer_len, 4563 cb, req, 0); 4564 splx(s); 4565 if (error == EINPROGRESS) { 4566 xpt_freeze_simq(mpt->sim, 1); 4567 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4568 } 4569 } else { 4570 /* 4571 * We have been given a pointer to single 4572 * physical buffer. 4573 */ 4574 struct bus_dma_segment seg; 4575 seg.ds_addr = (bus_addr_t) 4576 (vm_offset_t)csio->data_ptr; 4577 seg.ds_len = csio->dxfer_len; 4578 (*cb)(req, &seg, 1, 0); 4579 } 4580 } else { 4581 /* 4582 * We have been given a list of addresses. 4583 * This case could be easily supported but they are not 4584 * currently generated by the CAM subsystem so there 4585 * is no point in wasting the time right now. 4586 */ 4587 struct bus_dma_segment *sgs; 4588 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4589 (*cb)(req, NULL, 0, EFAULT); 4590 } else { 4591 /* Just use the segments provided */ 4592 sgs = (struct bus_dma_segment *)csio->data_ptr; 4593 (*cb)(req, sgs, csio->sglist_cnt, 0); 4594 } 4595 } 4596 CAMLOCK_2_MPTLOCK(mpt); 4597 } else { 4598 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4599 4600 /* 4601 * XXX: I don't know why this seems to happen, but 4602 * XXX: completing the CCB seems to make things happy. 4603 * XXX: This seems to happen if the initiator requests 4604 * XXX: enough data that we have to do multiple CTIOs. 4605 */ 4606 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4607 mpt_lprt(mpt, MPT_PRT_DEBUG, 4608 "Meaningless STATUS CCB (%p): flags %x status %x " 4609 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4610 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4611 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4612 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4613 MPTLOCK_2_CAMLOCK(mpt); 4614 xpt_done(ccb); 4615 CAMLOCK_2_MPTLOCK(mpt); 4616 return; 4617 } 4618 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4619 sp = sense; 4620 memcpy(sp, &csio->sense_data, 4621 min(csio->sense_len, MPT_SENSE_SIZE)); 4622 } 4623 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4624 } 4625 } 4626 4627 static void 4628 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4629 uint32_t lun, int send, uint8_t *data, size_t length) 4630 { 4631 mpt_tgt_state_t *tgt; 4632 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4633 SGE_SIMPLE32 *se; 4634 uint32_t flags; 4635 uint8_t *dptr; 4636 bus_addr_t pptr; 4637 request_t *req; 4638 4639 /* 4640 * We enter with resid set to the data load for the command. 4641 */ 4642 tgt = MPT_TGT_STATE(mpt, cmd_req); 4643 if (length == 0 || tgt->resid == 0) { 4644 tgt->resid = 0; 4645 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4646 return; 4647 } 4648 4649 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4650 mpt_prt(mpt, "out of resources- dropping local response\n"); 4651 return; 4652 } 4653 tgt->is_local = 1; 4654 4655 4656 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4657 ta = req->req_vbuf; 4658 4659 if (mpt->is_sas) { 4660 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4661 ta->QueueTag = ssp->InitiatorTag; 4662 } else if (mpt->is_spi) { 4663 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4664 ta->QueueTag = sp->Tag; 4665 } 4666 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4667 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4668 ta->ReplyWord = htole32(tgt->reply_desc); 4669 if (lun > MPT_MAX_LUNS) { 4670 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4671 ta->LUN[1] = lun & 0xff; 4672 } else { 4673 ta->LUN[1] = lun; 4674 } 4675 ta->RelativeOffset = 0; 4676 ta->DataLength = length; 4677 4678 dptr = req->req_vbuf; 4679 dptr += MPT_RQSL(mpt); 4680 pptr = req->req_pbuf; 4681 pptr += MPT_RQSL(mpt); 4682 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4683 4684 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4685 memset(se, 0,sizeof (*se)); 4686 4687 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4688 if (send) { 4689 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4690 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4691 } 4692 se->Address = pptr; 4693 MPI_pSGE_SET_LENGTH(se, length); 4694 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4695 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4696 MPI_pSGE_SET_FLAGS(se, flags); 4697 4698 tgt->ccb = NULL; 4699 tgt->req = req; 4700 tgt->resid -= length; 4701 tgt->bytes_xfered = length; 4702 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4703 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4704 #else 4705 tgt->state = TGT_STATE_MOVING_DATA; 4706 #endif 4707 mpt_send_cmd(mpt, req); 4708 } 4709 4710 /* 4711 * Abort queued up CCBs 4712 */ 4713 static cam_status 4714 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4715 { 4716 struct mpt_hdr_stailq *lp; 4717 struct ccb_hdr *srch; 4718 int found = 0; 4719 union ccb *accb = ccb->cab.abort_ccb; 4720 tgt_resource_t *trtp; 4721 4722 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4723 4724 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4725 trtp = &mpt->trt_wildcard; 4726 } else { 4727 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4728 } 4729 4730 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4731 lp = &trtp->atios; 4732 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 4733 lp = &trtp->inots; 4734 } else { 4735 return (CAM_REQ_INVALID); 4736 } 4737 4738 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4739 if (srch == &accb->ccb_h) { 4740 found = 1; 4741 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4742 break; 4743 } 4744 } 4745 if (found) { 4746 accb->ccb_h.status = CAM_REQ_ABORTED; 4747 xpt_done(accb); 4748 return (CAM_REQ_CMP); 4749 } 4750 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4751 return (CAM_PATH_INVALID); 4752 } 4753 4754 /* 4755 * Ask the MPT to abort the current target command 4756 */ 4757 static int 4758 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4759 { 4760 int error; 4761 request_t *req; 4762 PTR_MSG_TARGET_MODE_ABORT abtp; 4763 4764 req = mpt_get_request(mpt, FALSE); 4765 if (req == NULL) { 4766 return (-1); 4767 } 4768 abtp = req->req_vbuf; 4769 memset(abtp, 0, sizeof (*abtp)); 4770 4771 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4772 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4773 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4774 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4775 error = 0; 4776 if (mpt->is_fc || mpt->is_sas) { 4777 mpt_send_cmd(mpt, req); 4778 } else { 4779 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4780 } 4781 return (error); 4782 } 4783 4784 /* 4785 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4786 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4787 * FC929 to set bogus FC_RSP fields (nonzero residuals 4788 * but w/o RESID fields set). This causes QLogic initiators 4789 * to think maybe that a frame was lost. 4790 * 4791 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4792 * we use allocated requests to do TARGET_ASSIST and we 4793 * need to know when to release them. 4794 */ 4795 4796 static void 4797 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4798 uint8_t status, uint8_t const *sense_data) 4799 { 4800 uint8_t *cmd_vbuf; 4801 mpt_tgt_state_t *tgt; 4802 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4803 request_t *req; 4804 bus_addr_t paddr; 4805 int resplen = 0; 4806 uint32_t fl; 4807 4808 cmd_vbuf = cmd_req->req_vbuf; 4809 cmd_vbuf += MPT_RQSL(mpt); 4810 tgt = MPT_TGT_STATE(mpt, cmd_req); 4811 4812 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4813 if (mpt->outofbeer == 0) { 4814 mpt->outofbeer = 1; 4815 xpt_freeze_simq(mpt->sim, 1); 4816 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4817 } 4818 if (ccb) { 4819 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4820 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4821 MPTLOCK_2_CAMLOCK(mpt); 4822 xpt_done(ccb); 4823 CAMLOCK_2_MPTLOCK(mpt); 4824 } else { 4825 mpt_prt(mpt, 4826 "could not allocate status request- dropping\n"); 4827 } 4828 return; 4829 } 4830 req->ccb = ccb; 4831 if (ccb) { 4832 ccb->ccb_h.ccb_mpt_ptr = mpt; 4833 ccb->ccb_h.ccb_req_ptr = req; 4834 } 4835 4836 /* 4837 * Record the currently active ccb, if any, and the 4838 * request for it in our target state area. 4839 */ 4840 tgt->ccb = ccb; 4841 tgt->req = req; 4842 tgt->state = TGT_STATE_SENDING_STATUS; 4843 4844 tp = req->req_vbuf; 4845 paddr = req->req_pbuf; 4846 paddr += MPT_RQSL(mpt); 4847 4848 memset(tp, 0, sizeof (*tp)); 4849 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4850 if (mpt->is_fc) { 4851 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4852 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4853 uint8_t *sts_vbuf; 4854 uint32_t *rsp; 4855 4856 sts_vbuf = req->req_vbuf; 4857 sts_vbuf += MPT_RQSL(mpt); 4858 rsp = (uint32_t *) sts_vbuf; 4859 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4860 4861 /* 4862 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4863 * It has to be big-endian in memory and is organized 4864 * in 32 bit words, which are much easier to deal with 4865 * as words which are swizzled as needed. 4866 * 4867 * All we're filling here is the FC_RSP payload. 4868 * We may just have the chip synthesize it if 4869 * we have no residual and an OK status. 4870 * 4871 */ 4872 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4873 4874 rsp[2] = status; 4875 if (tgt->resid) { 4876 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4877 rsp[3] = htobe32(tgt->resid); 4878 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4879 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4880 #endif 4881 } 4882 if (status == SCSI_STATUS_CHECK_COND) { 4883 int i; 4884 4885 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4886 rsp[4] = htobe32(MPT_SENSE_SIZE); 4887 if (sense_data) { 4888 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4889 } else { 4890 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4891 "TION but no sense data?\n"); 4892 memset(&rsp, 0, MPT_SENSE_SIZE); 4893 } 4894 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4895 rsp[i] = htobe32(rsp[i]); 4896 } 4897 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4898 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4899 #endif 4900 } 4901 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4902 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4903 #endif 4904 rsp[2] = htobe32(rsp[2]); 4905 } else if (mpt->is_sas) { 4906 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4907 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4908 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4909 } else { 4910 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4911 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4912 tp->StatusCode = status; 4913 tp->QueueTag = htole16(sp->Tag); 4914 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4915 } 4916 4917 tp->ReplyWord = htole32(tgt->reply_desc); 4918 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4919 4920 #ifdef WE_CAN_USE_AUTO_REPOST 4921 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4922 #endif 4923 if (status == SCSI_STATUS_OK && resplen == 0) { 4924 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4925 } else { 4926 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4927 fl = 4928 MPI_SGE_FLAGS_HOST_TO_IOC | 4929 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4930 MPI_SGE_FLAGS_LAST_ELEMENT | 4931 MPI_SGE_FLAGS_END_OF_LIST | 4932 MPI_SGE_FLAGS_END_OF_BUFFER; 4933 fl <<= MPI_SGE_FLAGS_SHIFT; 4934 fl |= resplen; 4935 tp->StatusDataSGE.FlagsLength = htole32(fl); 4936 } 4937 4938 mpt_lprt(mpt, MPT_PRT_DEBUG, 4939 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 4940 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 4941 req->serno, tgt->resid); 4942 if (ccb) { 4943 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4944 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); 4945 } 4946 mpt_send_cmd(mpt, req); 4947 } 4948 4949 static void 4950 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4951 tgt_resource_t *trtp, int init_id) 4952 { 4953 struct ccb_immed_notify *inot; 4954 mpt_tgt_state_t *tgt; 4955 4956 tgt = MPT_TGT_STATE(mpt, req); 4957 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); 4958 if (inot == NULL) { 4959 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4960 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 4961 return; 4962 } 4963 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4964 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4965 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 4966 4967 memset(&inot->sense_data, 0, sizeof (inot->sense_data)); 4968 inot->sense_len = 0; 4969 memset(inot->message_args, 0, sizeof (inot->message_args)); 4970 inot->initiator_id = init_id; /* XXX */ 4971 4972 /* 4973 * This is a somewhat grotesque attempt to map from task management 4974 * to old style SCSI messages. God help us all. 4975 */ 4976 switch (fc) { 4977 case MPT_ABORT_TASK_SET: 4978 inot->message_args[0] = MSG_ABORT_TAG; 4979 break; 4980 case MPT_CLEAR_TASK_SET: 4981 inot->message_args[0] = MSG_CLEAR_TASK_SET; 4982 break; 4983 case MPT_TARGET_RESET: 4984 inot->message_args[0] = MSG_TARGET_RESET; 4985 break; 4986 case MPT_CLEAR_ACA: 4987 inot->message_args[0] = MSG_CLEAR_ACA; 4988 break; 4989 case MPT_TERMINATE_TASK: 4990 inot->message_args[0] = MSG_ABORT_TAG; 4991 break; 4992 default: 4993 inot->message_args[0] = MSG_NOOP; 4994 break; 4995 } 4996 tgt->ccb = (union ccb *) inot; 4997 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 4998 MPTLOCK_2_CAMLOCK(mpt); 4999 xpt_done((union ccb *)inot); 5000 CAMLOCK_2_MPTLOCK(mpt); 5001 } 5002 5003 static void 5004 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 5005 { 5006 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 5007 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 5008 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 5009 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 5010 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 5011 '0', '0', '0', '1' 5012 }; 5013 struct ccb_accept_tio *atiop; 5014 lun_id_t lun; 5015 int tag_action = 0; 5016 mpt_tgt_state_t *tgt; 5017 tgt_resource_t *trtp = NULL; 5018 U8 *lunptr; 5019 U8 *vbuf; 5020 U16 itag; 5021 U16 ioindex; 5022 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 5023 uint8_t *cdbp; 5024 5025 /* 5026 * Stash info for the current command where we can get at it later. 5027 */ 5028 vbuf = req->req_vbuf; 5029 vbuf += MPT_RQSL(mpt); 5030 5031 /* 5032 * Get our state pointer set up. 5033 */ 5034 tgt = MPT_TGT_STATE(mpt, req); 5035 if (tgt->state != TGT_STATE_LOADED) { 5036 mpt_tgt_dump_req_state(mpt, req); 5037 panic("bad target state in mpt_scsi_tgt_atio"); 5038 } 5039 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 5040 tgt->state = TGT_STATE_IN_CAM; 5041 tgt->reply_desc = reply_desc; 5042 ioindex = GET_IO_INDEX(reply_desc); 5043 if (mpt->verbose >= MPT_PRT_DEBUG) { 5044 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 5045 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 5046 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 5047 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 5048 } 5049 if (mpt->is_fc) { 5050 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 5051 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 5052 if (fc->FcpCntl[2]) { 5053 /* 5054 * Task Management Request 5055 */ 5056 switch (fc->FcpCntl[2]) { 5057 case 0x2: 5058 fct = MPT_ABORT_TASK_SET; 5059 break; 5060 case 0x4: 5061 fct = MPT_CLEAR_TASK_SET; 5062 break; 5063 case 0x20: 5064 fct = MPT_TARGET_RESET; 5065 break; 5066 case 0x40: 5067 fct = MPT_CLEAR_ACA; 5068 break; 5069 case 0x80: 5070 fct = MPT_TERMINATE_TASK; 5071 break; 5072 default: 5073 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 5074 fc->FcpCntl[2]); 5075 mpt_scsi_tgt_status(mpt, 0, req, 5076 SCSI_STATUS_OK, 0); 5077 return; 5078 } 5079 } else { 5080 switch (fc->FcpCntl[1]) { 5081 case 0: 5082 tag_action = MSG_SIMPLE_Q_TAG; 5083 break; 5084 case 1: 5085 tag_action = MSG_HEAD_OF_Q_TAG; 5086 break; 5087 case 2: 5088 tag_action = MSG_ORDERED_Q_TAG; 5089 break; 5090 default: 5091 /* 5092 * Bah. Ignore Untagged Queing and ACA 5093 */ 5094 tag_action = MSG_SIMPLE_Q_TAG; 5095 break; 5096 } 5097 } 5098 tgt->resid = be32toh(fc->FcpDl); 5099 cdbp = fc->FcpCdb; 5100 lunptr = fc->FcpLun; 5101 itag = be16toh(fc->OptionalOxid); 5102 } else if (mpt->is_sas) { 5103 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 5104 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 5105 cdbp = ssp->CDB; 5106 lunptr = ssp->LogicalUnitNumber; 5107 itag = ssp->InitiatorTag; 5108 } else { 5109 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 5110 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 5111 cdbp = sp->CDB; 5112 lunptr = sp->LogicalUnitNumber; 5113 itag = sp->Tag; 5114 } 5115 5116 /* 5117 * Generate a simple lun 5118 */ 5119 switch (lunptr[0] & 0xc0) { 5120 case 0x40: 5121 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 5122 break; 5123 case 0: 5124 lun = lunptr[1]; 5125 break; 5126 default: 5127 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 5128 lun = 0xffff; 5129 break; 5130 } 5131 5132 /* 5133 * Deal with non-enabled or bad luns here. 5134 */ 5135 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 5136 mpt->trt[lun].enabled == 0) { 5137 if (mpt->twildcard) { 5138 trtp = &mpt->trt_wildcard; 5139 } else if (fct == MPT_NIL_TMT_VALUE) { 5140 /* 5141 * In this case, we haven't got an upstream listener 5142 * for either a specific lun or wildcard luns. We 5143 * have to make some sensible response. For regular 5144 * inquiry, just return some NOT HERE inquiry data. 5145 * For VPD inquiry, report illegal field in cdb. 5146 * For REQUEST SENSE, just return NO SENSE data. 5147 * REPORT LUNS gets illegal command. 5148 * All other commands get 'no such device'. 5149 */ 5150 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 5151 size_t len; 5152 5153 memset(buf, 0, MPT_SENSE_SIZE); 5154 cond = SCSI_STATUS_CHECK_COND; 5155 buf[0] = 0xf0; 5156 buf[2] = 0x5; 5157 buf[7] = 0x8; 5158 sp = buf; 5159 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5160 5161 switch (cdbp[0]) { 5162 case INQUIRY: 5163 { 5164 if (cdbp[1] != 0) { 5165 buf[12] = 0x26; 5166 buf[13] = 0x01; 5167 break; 5168 } 5169 len = min(tgt->resid, cdbp[4]); 5170 len = min(len, sizeof (null_iqd)); 5171 mpt_lprt(mpt, MPT_PRT_DEBUG, 5172 "local inquiry %ld bytes\n", (long) len); 5173 mpt_scsi_tgt_local(mpt, req, lun, 1, 5174 null_iqd, len); 5175 return; 5176 } 5177 case REQUEST_SENSE: 5178 { 5179 buf[2] = 0x0; 5180 len = min(tgt->resid, cdbp[4]); 5181 len = min(len, sizeof (buf)); 5182 mpt_lprt(mpt, MPT_PRT_DEBUG, 5183 "local reqsense %ld bytes\n", (long) len); 5184 mpt_scsi_tgt_local(mpt, req, lun, 1, 5185 buf, len); 5186 return; 5187 } 5188 case REPORT_LUNS: 5189 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5190 buf[12] = 0x26; 5191 return; 5192 default: 5193 mpt_lprt(mpt, MPT_PRT_DEBUG, 5194 "CMD 0x%x to unmanaged lun %u\n", 5195 cdbp[0], lun); 5196 buf[12] = 0x25; 5197 break; 5198 } 5199 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 5200 return; 5201 } 5202 /* otherwise, leave trtp NULL */ 5203 } else { 5204 trtp = &mpt->trt[lun]; 5205 } 5206 5207 /* 5208 * Deal with any task management 5209 */ 5210 if (fct != MPT_NIL_TMT_VALUE) { 5211 if (trtp == NULL) { 5212 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5213 fct); 5214 mpt_scsi_tgt_status(mpt, 0, req, 5215 SCSI_STATUS_OK, 0); 5216 } else { 5217 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5218 GET_INITIATOR_INDEX(reply_desc)); 5219 } 5220 return; 5221 } 5222 5223 5224 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5225 if (atiop == NULL) { 5226 mpt_lprt(mpt, MPT_PRT_WARN, 5227 "no ATIOs for lun %u- sending back %s\n", lun, 5228 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5229 mpt_scsi_tgt_status(mpt, NULL, req, 5230 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5231 NULL); 5232 return; 5233 } 5234 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5235 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5236 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 5237 atiop->ccb_h.ccb_mpt_ptr = mpt; 5238 atiop->ccb_h.status = CAM_CDB_RECVD; 5239 atiop->ccb_h.target_lun = lun; 5240 atiop->sense_len = 0; 5241 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5242 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 5243 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5244 5245 /* 5246 * The tag we construct here allows us to find the 5247 * original request that the command came in with. 5248 * 5249 * This way we don't have to depend on anything but the 5250 * tag to find things when CCBs show back up from CAM. 5251 */ 5252 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5253 tgt->tag_id = atiop->tag_id; 5254 if (tag_action) { 5255 atiop->tag_action = tag_action; 5256 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 5257 } 5258 if (mpt->verbose >= MPT_PRT_DEBUG) { 5259 int i; 5260 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 5261 atiop->ccb_h.target_lun); 5262 for (i = 0; i < atiop->cdb_len; i++) { 5263 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5264 (i == (atiop->cdb_len - 1))? '>' : ' '); 5265 } 5266 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5267 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 5268 } 5269 5270 MPTLOCK_2_CAMLOCK(mpt); 5271 xpt_done((union ccb *)atiop); 5272 CAMLOCK_2_MPTLOCK(mpt); 5273 } 5274 5275 static void 5276 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5277 { 5278 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5279 5280 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5281 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 5282 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 5283 tgt->tag_id, tgt->state); 5284 } 5285 5286 static void 5287 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5288 { 5289 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5290 req->index, req->index, req->state); 5291 mpt_tgt_dump_tgt_state(mpt, req); 5292 } 5293 5294 static int 5295 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5296 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5297 { 5298 int dbg; 5299 union ccb *ccb; 5300 U16 status; 5301 5302 if (reply_frame == NULL) { 5303 /* 5304 * Figure out what the state of the command is. 5305 */ 5306 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5307 5308 #ifdef INVARIANTS 5309 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5310 if (tgt->req) { 5311 mpt_req_not_spcl(mpt, tgt->req, 5312 "turbo scsi_tgt_reply associated req", __LINE__); 5313 } 5314 #endif 5315 switch(tgt->state) { 5316 case TGT_STATE_LOADED: 5317 /* 5318 * This is a new command starting. 5319 */ 5320 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5321 break; 5322 case TGT_STATE_MOVING_DATA: 5323 { 5324 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 5325 5326 ccb = tgt->ccb; 5327 if (tgt->req == NULL) { 5328 panic("mpt: turbo target reply with null " 5329 "associated request moving data"); 5330 /* NOTREACHED */ 5331 } 5332 if (ccb == NULL) { 5333 if (tgt->is_local == 0) { 5334 panic("mpt: turbo target reply with " 5335 "null associated ccb moving data"); 5336 /* NOTREACHED */ 5337 } 5338 mpt_lprt(mpt, MPT_PRT_DEBUG, 5339 "TARGET_ASSIST local done\n"); 5340 TAILQ_REMOVE(&mpt->request_pending_list, 5341 tgt->req, links); 5342 mpt_free_request(mpt, tgt->req); 5343 tgt->req = NULL; 5344 mpt_scsi_tgt_status(mpt, NULL, req, 5345 0, NULL); 5346 return (TRUE); 5347 } 5348 tgt->ccb = NULL; 5349 tgt->nxfers++; 5350 mpt_req_untimeout(req, mpt_timeout, ccb); 5351 mpt_lprt(mpt, MPT_PRT_DEBUG, 5352 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5353 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5354 /* 5355 * Free the Target Assist Request 5356 */ 5357 KASSERT(tgt->req->ccb == ccb, 5358 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5359 tgt->req->serno, tgt->req->ccb)); 5360 TAILQ_REMOVE(&mpt->request_pending_list, 5361 tgt->req, links); 5362 mpt_free_request(mpt, tgt->req); 5363 tgt->req = NULL; 5364 5365 /* 5366 * Do we need to send status now? That is, are 5367 * we done with all our data transfers? 5368 */ 5369 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5370 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5371 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5372 KASSERT(ccb->ccb_h.status, 5373 ("zero ccb sts at %d\n", __LINE__)); 5374 tgt->state = TGT_STATE_IN_CAM; 5375 if (mpt->outofbeer) { 5376 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5377 mpt->outofbeer = 0; 5378 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5379 } 5380 MPTLOCK_2_CAMLOCK(mpt); 5381 xpt_done(ccb); 5382 CAMLOCK_2_MPTLOCK(mpt); 5383 break; 5384 } 5385 /* 5386 * Otherwise, send status (and sense) 5387 */ 5388 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5389 sp = sense; 5390 memcpy(sp, &ccb->csio.sense_data, 5391 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5392 } 5393 mpt_scsi_tgt_status(mpt, ccb, req, 5394 ccb->csio.scsi_status, sp); 5395 break; 5396 } 5397 case TGT_STATE_SENDING_STATUS: 5398 case TGT_STATE_MOVING_DATA_AND_STATUS: 5399 { 5400 int ioindex; 5401 ccb = tgt->ccb; 5402 5403 if (tgt->req == NULL) { 5404 panic("mpt: turbo target reply with null " 5405 "associated request sending status"); 5406 /* NOTREACHED */ 5407 } 5408 5409 if (ccb) { 5410 tgt->ccb = NULL; 5411 if (tgt->state == 5412 TGT_STATE_MOVING_DATA_AND_STATUS) { 5413 tgt->nxfers++; 5414 } 5415 mpt_req_untimeout(req, mpt_timeout, ccb); 5416 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5417 ccb->ccb_h.status |= CAM_SENT_SENSE; 5418 } 5419 mpt_lprt(mpt, MPT_PRT_DEBUG, 5420 "TARGET_STATUS tag %x sts %x flgs %x req " 5421 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5422 ccb->ccb_h.flags, tgt->req); 5423 /* 5424 * Free the Target Send Status Request 5425 */ 5426 KASSERT(tgt->req->ccb == ccb, 5427 ("tgt->req %p:%u tgt->req->ccb %p", 5428 tgt->req, tgt->req->serno, tgt->req->ccb)); 5429 /* 5430 * Notify CAM that we're done 5431 */ 5432 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5433 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5434 KASSERT(ccb->ccb_h.status, 5435 ("ZERO ccb sts at %d\n", __LINE__)); 5436 tgt->ccb = NULL; 5437 } else { 5438 mpt_lprt(mpt, MPT_PRT_DEBUG, 5439 "TARGET_STATUS non-CAM for req %p:%u\n", 5440 tgt->req, tgt->req->serno); 5441 } 5442 TAILQ_REMOVE(&mpt->request_pending_list, 5443 tgt->req, links); 5444 mpt_free_request(mpt, tgt->req); 5445 tgt->req = NULL; 5446 5447 /* 5448 * And re-post the Command Buffer. 5449 * This will reset the state. 5450 */ 5451 ioindex = GET_IO_INDEX(reply_desc); 5452 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5453 tgt->is_local = 0; 5454 mpt_post_target_command(mpt, req, ioindex); 5455 5456 /* 5457 * And post a done for anyone who cares 5458 */ 5459 if (ccb) { 5460 if (mpt->outofbeer) { 5461 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5462 mpt->outofbeer = 0; 5463 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5464 } 5465 MPTLOCK_2_CAMLOCK(mpt); 5466 xpt_done(ccb); 5467 CAMLOCK_2_MPTLOCK(mpt); 5468 } 5469 break; 5470 } 5471 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5472 tgt->state = TGT_STATE_LOADED; 5473 break; 5474 default: 5475 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5476 "Reply Function\n", tgt->state); 5477 } 5478 return (TRUE); 5479 } 5480 5481 status = le16toh(reply_frame->IOCStatus); 5482 if (status != MPI_IOCSTATUS_SUCCESS) { 5483 dbg = MPT_PRT_ERROR; 5484 } else { 5485 dbg = MPT_PRT_DEBUG1; 5486 } 5487 5488 mpt_lprt(mpt, dbg, 5489 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5490 req, req->serno, reply_frame, reply_frame->Function, status); 5491 5492 switch (reply_frame->Function) { 5493 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5494 { 5495 mpt_tgt_state_t *tgt; 5496 #ifdef INVARIANTS 5497 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5498 #endif 5499 if (status != MPI_IOCSTATUS_SUCCESS) { 5500 /* 5501 * XXX What to do? 5502 */ 5503 break; 5504 } 5505 tgt = MPT_TGT_STATE(mpt, req); 5506 KASSERT(tgt->state == TGT_STATE_LOADING, 5507 ("bad state 0x%x on reply to buffer post\n", tgt->state)); 5508 mpt_assign_serno(mpt, req); 5509 tgt->state = TGT_STATE_LOADED; 5510 break; 5511 } 5512 case MPI_FUNCTION_TARGET_ASSIST: 5513 #ifdef INVARIANTS 5514 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5515 #endif 5516 mpt_prt(mpt, "target assist completion\n"); 5517 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5518 mpt_free_request(mpt, req); 5519 break; 5520 case MPI_FUNCTION_TARGET_STATUS_SEND: 5521 #ifdef INVARIANTS 5522 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5523 #endif 5524 mpt_prt(mpt, "status send completion\n"); 5525 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5526 mpt_free_request(mpt, req); 5527 break; 5528 case MPI_FUNCTION_TARGET_MODE_ABORT: 5529 { 5530 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5531 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5532 PTR_MSG_TARGET_MODE_ABORT abtp = 5533 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5534 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5535 #ifdef INVARIANTS 5536 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5537 #endif 5538 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5539 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5540 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5541 mpt_free_request(mpt, req); 5542 break; 5543 } 5544 default: 5545 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5546 "0x%x\n", reply_frame->Function); 5547 break; 5548 } 5549 return (TRUE); 5550 } 5551