1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 */ 96 #include <sys/cdefs.h> 97 __FBSDID("$FreeBSD$"); 98 99 #include <dev/mpt/mpt.h> 100 #include <dev/mpt/mpt_cam.h> 101 #include <dev/mpt/mpt_raid.h> 102 103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/mpt/mpilib/mpi_init.h" 105 #include "dev/mpt/mpilib/mpi_targ.h" 106 #include "dev/mpt/mpilib/mpi_fc.h" 107 #include "dev/mpt/mpilib/mpi_sas.h" 108 #if __FreeBSD_version >= 500000 109 #include <sys/sysctl.h> 110 #endif 111 #include <sys/callout.h> 112 #include <sys/kthread.h> 113 114 #if __FreeBSD_version >= 700025 115 #ifndef CAM_NEW_TRAN_CODE 116 #define CAM_NEW_TRAN_CODE 1 117 #endif 118 #endif 119 120 static void mpt_poll(struct cam_sim *); 121 static timeout_t mpt_timeout; 122 static void mpt_action(struct cam_sim *, union ccb *); 123 static int 124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 125 static void mpt_setwidth(struct mpt_softc *, int, int); 126 static void mpt_setsync(struct mpt_softc *, int, int, int); 127 static int mpt_update_spi_config(struct mpt_softc *, int); 128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); 129 130 static mpt_reply_handler_t mpt_scsi_reply_handler; 131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 132 static mpt_reply_handler_t mpt_fc_els_reply_handler; 133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 134 MSG_DEFAULT_REPLY *); 135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 136 static int mpt_fc_reset_link(struct mpt_softc *, int); 137 138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 140 static void mpt_recovery_thread(void *arg); 141 static void mpt_recover_commands(struct mpt_softc *mpt); 142 143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 144 u_int, u_int, u_int, int); 145 146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 148 static int mpt_add_els_buffers(struct mpt_softc *mpt); 149 static int mpt_add_target_commands(struct mpt_softc *mpt); 150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 152 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 156 uint8_t, uint8_t const *); 157 static void 158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 159 tgt_resource_t *, int); 160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 163 static mpt_reply_handler_t mpt_sata_pass_reply_handler; 164 165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 169 170 static mpt_probe_handler_t mpt_cam_probe; 171 static mpt_attach_handler_t mpt_cam_attach; 172 static mpt_enable_handler_t mpt_cam_enable; 173 static mpt_ready_handler_t mpt_cam_ready; 174 static mpt_event_handler_t mpt_cam_event; 175 static mpt_reset_handler_t mpt_cam_ioc_reset; 176 static mpt_detach_handler_t mpt_cam_detach; 177 178 static struct mpt_personality mpt_cam_personality = 179 { 180 .name = "mpt_cam", 181 .probe = mpt_cam_probe, 182 .attach = mpt_cam_attach, 183 .enable = mpt_cam_enable, 184 .ready = mpt_cam_ready, 185 .event = mpt_cam_event, 186 .reset = mpt_cam_ioc_reset, 187 .detach = mpt_cam_detach, 188 }; 189 190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 192 193 int mpt_enable_sata_wc = -1; 194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 195 196 int 197 mpt_cam_probe(struct mpt_softc *mpt) 198 { 199 int role; 200 201 /* 202 * Only attach to nodes that support the initiator or target role 203 * (or want to) or have RAID physical devices that need CAM pass-thru 204 * support. 205 */ 206 if (mpt->do_cfg_role) { 207 role = mpt->cfg_role; 208 } else { 209 role = mpt->role; 210 } 211 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 212 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 213 return (0); 214 } 215 return (ENODEV); 216 } 217 218 int 219 mpt_cam_attach(struct mpt_softc *mpt) 220 { 221 struct cam_devq *devq; 222 mpt_handler_t handler; 223 int maxq; 224 int error; 225 226 MPT_LOCK(mpt); 227 TAILQ_INIT(&mpt->request_timeout_list); 228 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 229 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 230 231 handler.reply_handler = mpt_scsi_reply_handler; 232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 233 &scsi_io_handler_id); 234 if (error != 0) { 235 MPT_UNLOCK(mpt); 236 goto cleanup; 237 } 238 239 handler.reply_handler = mpt_scsi_tmf_reply_handler; 240 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 241 &scsi_tmf_handler_id); 242 if (error != 0) { 243 MPT_UNLOCK(mpt); 244 goto cleanup; 245 } 246 247 /* 248 * If we're fibre channel and could support target mode, we register 249 * an ELS reply handler and give it resources. 250 */ 251 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 252 handler.reply_handler = mpt_fc_els_reply_handler; 253 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 254 &fc_els_handler_id); 255 if (error != 0) { 256 MPT_UNLOCK(mpt); 257 goto cleanup; 258 } 259 if (mpt_add_els_buffers(mpt) == FALSE) { 260 error = ENOMEM; 261 MPT_UNLOCK(mpt); 262 goto cleanup; 263 } 264 maxq -= mpt->els_cmds_allocated; 265 } 266 267 /* 268 * If we support target mode, we register a reply handler for it, 269 * but don't add command resources until we actually enable target 270 * mode. 271 */ 272 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 273 handler.reply_handler = mpt_scsi_tgt_reply_handler; 274 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 275 &mpt->scsi_tgt_handler_id); 276 if (error != 0) { 277 MPT_UNLOCK(mpt); 278 goto cleanup; 279 } 280 } 281 282 if (mpt->is_sas) { 283 handler.reply_handler = mpt_sata_pass_reply_handler; 284 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 285 &sata_pass_handler_id); 286 if (error != 0) { 287 MPT_UNLOCK(mpt); 288 goto cleanup; 289 } 290 } 291 292 /* 293 * We keep one request reserved for timeout TMF requests. 294 */ 295 mpt->tmf_req = mpt_get_request(mpt, FALSE); 296 if (mpt->tmf_req == NULL) { 297 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 298 error = ENOMEM; 299 MPT_UNLOCK(mpt); 300 goto cleanup; 301 } 302 303 /* 304 * Mark the request as free even though not on the free list. 305 * There is only one TMF request allowed to be outstanding at 306 * a time and the TMF routines perform their own allocation 307 * tracking using the standard state flags. 308 */ 309 mpt->tmf_req->state = REQ_STATE_FREE; 310 maxq--; 311 312 /* 313 * The rest of this is CAM foo, for which we need to drop our lock 314 */ 315 MPT_UNLOCK(mpt); 316 317 if (mpt_spawn_recovery_thread(mpt) != 0) { 318 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 319 error = ENOMEM; 320 goto cleanup; 321 } 322 323 /* 324 * Create the device queue for our SIM(s). 325 */ 326 devq = cam_simq_alloc(maxq); 327 if (devq == NULL) { 328 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 329 error = ENOMEM; 330 goto cleanup; 331 } 332 333 /* 334 * Construct our SIM entry. 335 */ 336 mpt->sim = 337 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 338 if (mpt->sim == NULL) { 339 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 340 cam_simq_free(devq); 341 error = ENOMEM; 342 goto cleanup; 343 } 344 345 /* 346 * Register exactly this bus. 347 */ 348 MPT_LOCK(mpt); 349 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { 350 mpt_prt(mpt, "Bus registration Failed!\n"); 351 error = ENOMEM; 352 MPT_UNLOCK(mpt); 353 goto cleanup; 354 } 355 356 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 357 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 358 mpt_prt(mpt, "Unable to allocate Path!\n"); 359 error = ENOMEM; 360 MPT_UNLOCK(mpt); 361 goto cleanup; 362 } 363 MPT_UNLOCK(mpt); 364 365 /* 366 * Only register a second bus for RAID physical 367 * devices if the controller supports RAID. 368 */ 369 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 370 return (0); 371 } 372 373 /* 374 * Create a "bus" to export all hidden disks to CAM. 375 */ 376 mpt->phydisk_sim = 377 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 378 if (mpt->phydisk_sim == NULL) { 379 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 380 error = ENOMEM; 381 goto cleanup; 382 } 383 384 /* 385 * Register this bus. 386 */ 387 MPT_LOCK(mpt); 388 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != 389 CAM_SUCCESS) { 390 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 391 error = ENOMEM; 392 MPT_UNLOCK(mpt); 393 goto cleanup; 394 } 395 396 if (xpt_create_path(&mpt->phydisk_path, NULL, 397 cam_sim_path(mpt->phydisk_sim), 398 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 399 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 400 error = ENOMEM; 401 MPT_UNLOCK(mpt); 402 goto cleanup; 403 } 404 MPT_UNLOCK(mpt); 405 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 406 return (0); 407 408 cleanup: 409 mpt_cam_detach(mpt); 410 return (error); 411 } 412 413 /* 414 * Read FC configuration information 415 */ 416 static int 417 mpt_read_config_info_fc(struct mpt_softc *mpt) 418 { 419 char *topology = NULL; 420 int rv; 421 422 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 423 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 424 if (rv) { 425 return (-1); 426 } 427 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 428 mpt->mpt_fcport_page0.Header.PageVersion, 429 mpt->mpt_fcport_page0.Header.PageLength, 430 mpt->mpt_fcport_page0.Header.PageNumber, 431 mpt->mpt_fcport_page0.Header.PageType); 432 433 434 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 435 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 436 if (rv) { 437 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 438 return (-1); 439 } 440 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 441 442 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 443 444 switch (mpt->mpt_fcport_page0.Flags & 445 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 447 mpt->mpt_fcport_speed = 0; 448 topology = "<NO LOOP>"; 449 break; 450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 451 topology = "N-Port"; 452 break; 453 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 454 topology = "NL-Port"; 455 break; 456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 457 topology = "F-Port"; 458 break; 459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 460 topology = "FL-Port"; 461 break; 462 default: 463 mpt->mpt_fcport_speed = 0; 464 topology = "?"; 465 break; 466 } 467 468 mpt_lprt(mpt, MPT_PRT_INFO, 469 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 470 "Speed %u-Gbit\n", topology, 471 mpt->mpt_fcport_page0.WWNN.High, 472 mpt->mpt_fcport_page0.WWNN.Low, 473 mpt->mpt_fcport_page0.WWPN.High, 474 mpt->mpt_fcport_page0.WWPN.Low, 475 mpt->mpt_fcport_speed); 476 #if __FreeBSD_version >= 500000 477 MPT_UNLOCK(mpt); 478 { 479 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 480 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 481 482 snprintf(mpt->scinfo.fc.wwnn, 483 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 484 mpt->mpt_fcport_page0.WWNN.High, 485 mpt->mpt_fcport_page0.WWNN.Low); 486 487 snprintf(mpt->scinfo.fc.wwpn, 488 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 489 mpt->mpt_fcport_page0.WWPN.High, 490 mpt->mpt_fcport_page0.WWPN.Low); 491 492 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 493 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 494 "World Wide Node Name"); 495 496 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 497 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 498 "World Wide Port Name"); 499 500 } 501 MPT_LOCK(mpt); 502 #endif 503 return (0); 504 } 505 506 /* 507 * Set FC configuration information. 508 */ 509 static int 510 mpt_set_initial_config_fc(struct mpt_softc *mpt) 511 { 512 513 CONFIG_PAGE_FC_PORT_1 fc; 514 U32 fl; 515 int r, doit = 0; 516 int role; 517 518 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 519 &fc.Header, FALSE, 5000); 520 if (r) { 521 mpt_prt(mpt, "failed to read FC page 1 header\n"); 522 return (mpt_fc_reset_link(mpt, 1)); 523 } 524 525 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 526 &fc.Header, sizeof (fc), FALSE, 5000); 527 if (r) { 528 mpt_prt(mpt, "failed to read FC page 1\n"); 529 return (mpt_fc_reset_link(mpt, 1)); 530 } 531 mpt2host_config_page_fc_port_1(&fc); 532 533 /* 534 * Check our flags to make sure we support the role we want. 535 */ 536 doit = 0; 537 role = 0; 538 fl = fc.Flags; 539 540 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 541 role |= MPT_ROLE_INITIATOR; 542 } 543 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 544 role |= MPT_ROLE_TARGET; 545 } 546 547 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 548 549 if (mpt->do_cfg_role == 0) { 550 role = mpt->cfg_role; 551 } else { 552 mpt->do_cfg_role = 0; 553 } 554 555 if (role != mpt->cfg_role) { 556 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 557 if ((role & MPT_ROLE_INITIATOR) == 0) { 558 mpt_prt(mpt, "adding initiator role\n"); 559 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 560 doit++; 561 } else { 562 mpt_prt(mpt, "keeping initiator role\n"); 563 } 564 } else if (role & MPT_ROLE_INITIATOR) { 565 mpt_prt(mpt, "removing initiator role\n"); 566 doit++; 567 } 568 if (mpt->cfg_role & MPT_ROLE_TARGET) { 569 if ((role & MPT_ROLE_TARGET) == 0) { 570 mpt_prt(mpt, "adding target role\n"); 571 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 572 doit++; 573 } else { 574 mpt_prt(mpt, "keeping target role\n"); 575 } 576 } else if (role & MPT_ROLE_TARGET) { 577 mpt_prt(mpt, "removing target role\n"); 578 doit++; 579 } 580 mpt->role = mpt->cfg_role; 581 } 582 583 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 584 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 585 mpt_prt(mpt, "adding OXID option\n"); 586 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 587 doit++; 588 } 589 } 590 591 if (doit) { 592 fc.Flags = fl; 593 host2mpt_config_page_fc_port_1(&fc); 594 r = mpt_write_cfg_page(mpt, 595 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 596 sizeof(fc), FALSE, 5000); 597 if (r != 0) { 598 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 599 return (0); 600 } 601 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 602 "effect until next reboot or IOC reset\n"); 603 } 604 return (0); 605 } 606 607 static int 608 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 609 { 610 ConfigExtendedPageHeader_t hdr; 611 struct mptsas_phyinfo *phyinfo; 612 SasIOUnitPage0_t *buffer; 613 int error, len, i; 614 615 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 616 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 617 &hdr, 0, 10000); 618 if (error) 619 goto out; 620 if (hdr.ExtPageLength == 0) { 621 error = ENXIO; 622 goto out; 623 } 624 625 len = hdr.ExtPageLength * 4; 626 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 627 if (buffer == NULL) { 628 error = ENOMEM; 629 goto out; 630 } 631 632 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 633 0, &hdr, buffer, len, 0, 10000); 634 if (error) { 635 free(buffer, M_DEVBUF); 636 goto out; 637 } 638 639 portinfo->num_phys = buffer->NumPhys; 640 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * 641 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 642 if (portinfo->phy_info == NULL) { 643 free(buffer, M_DEVBUF); 644 error = ENOMEM; 645 goto out; 646 } 647 648 for (i = 0; i < portinfo->num_phys; i++) { 649 phyinfo = &portinfo->phy_info[i]; 650 phyinfo->phy_num = i; 651 phyinfo->port_id = buffer->PhyData[i].Port; 652 phyinfo->negotiated_link_rate = 653 buffer->PhyData[i].NegotiatedLinkRate; 654 phyinfo->handle = 655 le16toh(buffer->PhyData[i].ControllerDevHandle); 656 } 657 658 free(buffer, M_DEVBUF); 659 out: 660 return (error); 661 } 662 663 static int 664 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 665 uint32_t form, uint32_t form_specific) 666 { 667 ConfigExtendedPageHeader_t hdr; 668 SasPhyPage0_t *buffer; 669 int error; 670 671 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 672 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 673 0, 10000); 674 if (error) 675 goto out; 676 if (hdr.ExtPageLength == 0) { 677 error = ENXIO; 678 goto out; 679 } 680 681 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 682 if (buffer == NULL) { 683 error = ENOMEM; 684 goto out; 685 } 686 687 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 688 form + form_specific, &hdr, buffer, 689 sizeof(SasPhyPage0_t), 0, 10000); 690 if (error) { 691 free(buffer, M_DEVBUF); 692 goto out; 693 } 694 695 phy_info->hw_link_rate = buffer->HwLinkRate; 696 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 697 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 698 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 699 700 free(buffer, M_DEVBUF); 701 out: 702 return (error); 703 } 704 705 static int 706 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 707 uint32_t form, uint32_t form_specific) 708 { 709 ConfigExtendedPageHeader_t hdr; 710 SasDevicePage0_t *buffer; 711 uint64_t sas_address; 712 int error = 0; 713 714 bzero(device_info, sizeof(*device_info)); 715 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 716 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 717 &hdr, 0, 10000); 718 if (error) 719 goto out; 720 if (hdr.ExtPageLength == 0) { 721 error = ENXIO; 722 goto out; 723 } 724 725 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 726 if (buffer == NULL) { 727 error = ENOMEM; 728 goto out; 729 } 730 731 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 732 form + form_specific, &hdr, buffer, 733 sizeof(SasDevicePage0_t), 0, 10000); 734 if (error) { 735 free(buffer, M_DEVBUF); 736 goto out; 737 } 738 739 device_info->dev_handle = le16toh(buffer->DevHandle); 740 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 741 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 742 device_info->slot = le16toh(buffer->Slot); 743 device_info->phy_num = buffer->PhyNum; 744 device_info->physical_port = buffer->PhysicalPort; 745 device_info->target_id = buffer->TargetID; 746 device_info->bus = buffer->Bus; 747 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 748 device_info->sas_address = le64toh(sas_address); 749 device_info->device_info = le32toh(buffer->DeviceInfo); 750 751 free(buffer, M_DEVBUF); 752 out: 753 return (error); 754 } 755 756 /* 757 * Read SAS configuration information. Nothing to do yet. 758 */ 759 static int 760 mpt_read_config_info_sas(struct mpt_softc *mpt) 761 { 762 struct mptsas_portinfo *portinfo; 763 struct mptsas_phyinfo *phyinfo; 764 int error, i; 765 766 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 767 if (portinfo == NULL) 768 return (ENOMEM); 769 770 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 771 if (error) { 772 free(portinfo, M_DEVBUF); 773 return (0); 774 } 775 776 for (i = 0; i < portinfo->num_phys; i++) { 777 phyinfo = &portinfo->phy_info[i]; 778 error = mptsas_sas_phy_pg0(mpt, phyinfo, 779 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 780 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 781 if (error) 782 break; 783 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 784 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 785 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 786 phyinfo->handle); 787 if (error) 788 break; 789 phyinfo->identify.phy_num = phyinfo->phy_num = i; 790 if (phyinfo->attached.dev_handle) 791 error = mptsas_sas_device_pg0(mpt, 792 &phyinfo->attached, 793 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 794 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 795 phyinfo->attached.dev_handle); 796 if (error) 797 break; 798 } 799 mpt->sas_portinfo = portinfo; 800 return (0); 801 } 802 803 static void 804 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 805 int enabled) 806 { 807 SataPassthroughRequest_t *pass; 808 request_t *req; 809 int error, status; 810 811 req = mpt_get_request(mpt, 0); 812 if (req == NULL) 813 return; 814 815 pass = req->req_vbuf; 816 bzero(pass, sizeof(SataPassthroughRequest_t)); 817 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 818 pass->TargetID = devinfo->target_id; 819 pass->Bus = devinfo->bus; 820 pass->PassthroughFlags = 0; 821 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 822 pass->DataLength = 0; 823 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 824 pass->CommandFIS[0] = 0x27; 825 pass->CommandFIS[1] = 0x80; 826 pass->CommandFIS[2] = 0xef; 827 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 828 pass->CommandFIS[7] = 0x40; 829 pass->CommandFIS[15] = 0x08; 830 831 mpt_check_doorbell(mpt); 832 mpt_send_cmd(mpt, req); 833 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 834 10 * 1000); 835 if (error) { 836 mpt_free_request(mpt, req); 837 printf("error %d sending passthrough\n", error); 838 return; 839 } 840 841 status = le16toh(req->IOCStatus); 842 if (status != MPI_IOCSTATUS_SUCCESS) { 843 mpt_free_request(mpt, req); 844 printf("IOCSTATUS %d\n", status); 845 return; 846 } 847 848 mpt_free_request(mpt, req); 849 } 850 851 /* 852 * Set SAS configuration information. Nothing to do yet. 853 */ 854 static int 855 mpt_set_initial_config_sas(struct mpt_softc *mpt) 856 { 857 struct mptsas_phyinfo *phyinfo; 858 int i; 859 860 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 861 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 862 phyinfo = &mpt->sas_portinfo->phy_info[i]; 863 if (phyinfo->attached.dev_handle == 0) 864 continue; 865 if ((phyinfo->attached.device_info & 866 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 867 continue; 868 if (bootverbose) 869 device_printf(mpt->dev, 870 "%sabling SATA WC on phy %d\n", 871 (mpt_enable_sata_wc) ? "En" : "Dis", i); 872 mptsas_set_sata_wc(mpt, &phyinfo->attached, 873 mpt_enable_sata_wc); 874 } 875 } 876 877 return (0); 878 } 879 880 static int 881 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 882 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 883 { 884 if (req != NULL) { 885 886 if (reply_frame != NULL) { 887 req->IOCStatus = le16toh(reply_frame->IOCStatus); 888 } 889 req->state &= ~REQ_STATE_QUEUED; 890 req->state |= REQ_STATE_DONE; 891 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 892 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 893 wakeup(req); 894 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 895 /* 896 * Whew- we can free this request (late completion) 897 */ 898 mpt_free_request(mpt, req); 899 } 900 } 901 902 return (TRUE); 903 } 904 905 /* 906 * Read SCSI configuration information 907 */ 908 static int 909 mpt_read_config_info_spi(struct mpt_softc *mpt) 910 { 911 int rv, i; 912 913 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 914 &mpt->mpt_port_page0.Header, FALSE, 5000); 915 if (rv) { 916 return (-1); 917 } 918 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 919 mpt->mpt_port_page0.Header.PageVersion, 920 mpt->mpt_port_page0.Header.PageLength, 921 mpt->mpt_port_page0.Header.PageNumber, 922 mpt->mpt_port_page0.Header.PageType); 923 924 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 925 &mpt->mpt_port_page1.Header, FALSE, 5000); 926 if (rv) { 927 return (-1); 928 } 929 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 930 mpt->mpt_port_page1.Header.PageVersion, 931 mpt->mpt_port_page1.Header.PageLength, 932 mpt->mpt_port_page1.Header.PageNumber, 933 mpt->mpt_port_page1.Header.PageType); 934 935 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 936 &mpt->mpt_port_page2.Header, FALSE, 5000); 937 if (rv) { 938 return (-1); 939 } 940 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 941 mpt->mpt_port_page2.Header.PageVersion, 942 mpt->mpt_port_page2.Header.PageLength, 943 mpt->mpt_port_page2.Header.PageNumber, 944 mpt->mpt_port_page2.Header.PageType); 945 946 for (i = 0; i < 16; i++) { 947 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 948 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 949 if (rv) { 950 return (-1); 951 } 952 mpt_lprt(mpt, MPT_PRT_DEBUG, 953 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 954 mpt->mpt_dev_page0[i].Header.PageVersion, 955 mpt->mpt_dev_page0[i].Header.PageLength, 956 mpt->mpt_dev_page0[i].Header.PageNumber, 957 mpt->mpt_dev_page0[i].Header.PageType); 958 959 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 960 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 961 if (rv) { 962 return (-1); 963 } 964 mpt_lprt(mpt, MPT_PRT_DEBUG, 965 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 966 mpt->mpt_dev_page1[i].Header.PageVersion, 967 mpt->mpt_dev_page1[i].Header.PageLength, 968 mpt->mpt_dev_page1[i].Header.PageNumber, 969 mpt->mpt_dev_page1[i].Header.PageType); 970 } 971 972 /* 973 * At this point, we don't *have* to fail. As long as we have 974 * valid config header information, we can (barely) lurch 975 * along. 976 */ 977 978 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 979 sizeof(mpt->mpt_port_page0), FALSE, 5000); 980 if (rv) { 981 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 982 } else { 983 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 984 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 985 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 986 mpt->mpt_port_page0.Capabilities, 987 mpt->mpt_port_page0.PhysicalInterface); 988 } 989 990 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 991 sizeof(mpt->mpt_port_page1), FALSE, 5000); 992 if (rv) { 993 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 994 } else { 995 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 996 mpt_lprt(mpt, MPT_PRT_DEBUG, 997 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 998 mpt->mpt_port_page1.Configuration, 999 mpt->mpt_port_page1.OnBusTimerValue); 1000 } 1001 1002 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 1003 sizeof(mpt->mpt_port_page2), FALSE, 5000); 1004 if (rv) { 1005 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 1006 } else { 1007 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1008 "Port Page 2: Flags %x Settings %x\n", 1009 mpt->mpt_port_page2.PortFlags, 1010 mpt->mpt_port_page2.PortSettings); 1011 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 1012 for (i = 0; i < 16; i++) { 1013 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1014 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1015 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1016 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1017 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1018 } 1019 } 1020 1021 for (i = 0; i < 16; i++) { 1022 rv = mpt_read_cur_cfg_page(mpt, i, 1023 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1024 FALSE, 5000); 1025 if (rv) { 1026 mpt_prt(mpt, 1027 "cannot read SPI Target %d Device Page 0\n", i); 1028 continue; 1029 } 1030 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 1031 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1032 "target %d page 0: Negotiated Params %x Information %x\n", 1033 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1034 mpt->mpt_dev_page0[i].Information); 1035 1036 rv = mpt_read_cur_cfg_page(mpt, i, 1037 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1038 FALSE, 5000); 1039 if (rv) { 1040 mpt_prt(mpt, 1041 "cannot read SPI Target %d Device Page 1\n", i); 1042 continue; 1043 } 1044 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 1045 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1046 "target %d page 1: Requested Params %x Configuration %x\n", 1047 i, mpt->mpt_dev_page1[i].RequestedParameters, 1048 mpt->mpt_dev_page1[i].Configuration); 1049 } 1050 return (0); 1051 } 1052 1053 /* 1054 * Validate SPI configuration information. 1055 * 1056 * In particular, validate SPI Port Page 1. 1057 */ 1058 static int 1059 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1060 { 1061 int error, i, pp1val; 1062 1063 mpt->mpt_disc_enable = 0xff; 1064 mpt->mpt_tag_enable = 0; 1065 1066 pp1val = ((1 << mpt->mpt_ini_id) << 1067 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; 1068 if (mpt->mpt_port_page1.Configuration != pp1val) { 1069 CONFIG_PAGE_SCSI_PORT_1 tmp; 1070 1071 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1072 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 1073 tmp = mpt->mpt_port_page1; 1074 tmp.Configuration = pp1val; 1075 host2mpt_config_page_scsi_port_1(&tmp); 1076 error = mpt_write_cur_cfg_page(mpt, 0, 1077 &tmp.Header, sizeof(tmp), FALSE, 5000); 1078 if (error) { 1079 return (-1); 1080 } 1081 error = mpt_read_cur_cfg_page(mpt, 0, 1082 &tmp.Header, sizeof(tmp), FALSE, 5000); 1083 if (error) { 1084 return (-1); 1085 } 1086 mpt2host_config_page_scsi_port_1(&tmp); 1087 if (tmp.Configuration != pp1val) { 1088 mpt_prt(mpt, 1089 "failed to reset SPI Port Page 1 Config value\n"); 1090 return (-1); 1091 } 1092 mpt->mpt_port_page1 = tmp; 1093 } 1094 1095 /* 1096 * The purpose of this exercise is to get 1097 * all targets back to async/narrow. 1098 * 1099 * We skip this step if the BIOS has already negotiated 1100 * speeds with the targets. 1101 */ 1102 i = mpt->mpt_port_page2.PortSettings & 1103 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1104 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 1105 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1106 "honoring BIOS transfer negotiations\n"); 1107 } else { 1108 for (i = 0; i < 16; i++) { 1109 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1110 mpt->mpt_dev_page1[i].Configuration = 0; 1111 (void) mpt_update_spi_config(mpt, i); 1112 } 1113 } 1114 return (0); 1115 } 1116 1117 int 1118 mpt_cam_enable(struct mpt_softc *mpt) 1119 { 1120 int error; 1121 1122 MPT_LOCK(mpt); 1123 1124 error = EIO; 1125 if (mpt->is_fc) { 1126 if (mpt_read_config_info_fc(mpt)) { 1127 goto out; 1128 } 1129 if (mpt_set_initial_config_fc(mpt)) { 1130 goto out; 1131 } 1132 } else if (mpt->is_sas) { 1133 if (mpt_read_config_info_sas(mpt)) { 1134 goto out; 1135 } 1136 if (mpt_set_initial_config_sas(mpt)) { 1137 goto out; 1138 } 1139 } else if (mpt->is_spi) { 1140 if (mpt_read_config_info_spi(mpt)) { 1141 goto out; 1142 } 1143 if (mpt_set_initial_config_spi(mpt)) { 1144 goto out; 1145 } 1146 } 1147 error = 0; 1148 1149 out: 1150 MPT_UNLOCK(mpt); 1151 return (error); 1152 } 1153 1154 void 1155 mpt_cam_ready(struct mpt_softc *mpt) 1156 { 1157 /* 1158 * If we're in target mode, hang out resources now 1159 * so we don't cause the world to hang talking to us. 1160 */ 1161 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1162 /* 1163 * Try to add some target command resources 1164 */ 1165 MPT_LOCK(mpt); 1166 if (mpt_add_target_commands(mpt) == FALSE) { 1167 mpt_prt(mpt, "failed to add target commands\n"); 1168 } 1169 MPT_UNLOCK(mpt); 1170 } 1171 mpt->ready = 1; 1172 } 1173 1174 void 1175 mpt_cam_detach(struct mpt_softc *mpt) 1176 { 1177 mpt_handler_t handler; 1178 1179 MPT_LOCK(mpt); 1180 mpt->ready = 0; 1181 mpt_terminate_recovery_thread(mpt); 1182 1183 handler.reply_handler = mpt_scsi_reply_handler; 1184 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1185 scsi_io_handler_id); 1186 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1188 scsi_tmf_handler_id); 1189 handler.reply_handler = mpt_fc_els_reply_handler; 1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1191 fc_els_handler_id); 1192 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1193 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1194 mpt->scsi_tgt_handler_id); 1195 handler.reply_handler = mpt_sata_pass_reply_handler; 1196 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1197 sata_pass_handler_id); 1198 1199 if (mpt->tmf_req != NULL) { 1200 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1201 mpt_free_request(mpt, mpt->tmf_req); 1202 mpt->tmf_req = NULL; 1203 } 1204 if (mpt->sas_portinfo != NULL) { 1205 free(mpt->sas_portinfo, M_DEVBUF); 1206 mpt->sas_portinfo = NULL; 1207 } 1208 MPT_UNLOCK(mpt); 1209 1210 if (mpt->sim != NULL) { 1211 xpt_free_path(mpt->path); 1212 MPT_LOCK(mpt); 1213 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1214 MPT_UNLOCK(mpt); 1215 cam_sim_free(mpt->sim, TRUE); 1216 mpt->sim = NULL; 1217 } 1218 1219 if (mpt->phydisk_sim != NULL) { 1220 xpt_free_path(mpt->phydisk_path); 1221 MPT_LOCK(mpt); 1222 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1223 MPT_UNLOCK(mpt); 1224 cam_sim_free(mpt->phydisk_sim, TRUE); 1225 mpt->phydisk_sim = NULL; 1226 } 1227 } 1228 1229 /* This routine is used after a system crash to dump core onto the swap device. 1230 */ 1231 static void 1232 mpt_poll(struct cam_sim *sim) 1233 { 1234 struct mpt_softc *mpt; 1235 1236 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1237 mpt_intr(mpt); 1238 } 1239 1240 /* 1241 * Watchdog timeout routine for SCSI requests. 1242 */ 1243 static void 1244 mpt_timeout(void *arg) 1245 { 1246 union ccb *ccb; 1247 struct mpt_softc *mpt; 1248 request_t *req; 1249 1250 ccb = (union ccb *)arg; 1251 mpt = ccb->ccb_h.ccb_mpt_ptr; 1252 1253 #if __FreeBSD_version < 500000 1254 MPT_LOCK(mpt); 1255 #endif 1256 MPT_LOCK_ASSERT(mpt); 1257 req = ccb->ccb_h.ccb_req_ptr; 1258 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1259 req->serno, ccb, req->ccb); 1260 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1261 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1262 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1263 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1264 req->state |= REQ_STATE_TIMEDOUT; 1265 mpt_wakeup_recovery_thread(mpt); 1266 } 1267 #if __FreeBSD_version < 500000 1268 MPT_UNLOCK(mpt); 1269 #endif 1270 } 1271 1272 /* 1273 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 1274 * 1275 * Takes a list of physical segments and builds the SGL for SCSI IO command 1276 * and forwards the commard to the IOC after one last check that CAM has not 1277 * aborted the transaction. 1278 */ 1279 static void 1280 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1281 { 1282 request_t *req, *trq; 1283 char *mpt_off; 1284 union ccb *ccb; 1285 struct mpt_softc *mpt; 1286 int seg, first_lim; 1287 uint32_t flags, nxt_off; 1288 void *sglp = NULL; 1289 MSG_REQUEST_HEADER *hdrp; 1290 SGE_SIMPLE64 *se; 1291 SGE_CHAIN64 *ce; 1292 int istgt = 0; 1293 1294 req = (request_t *)arg; 1295 ccb = req->ccb; 1296 1297 mpt = ccb->ccb_h.ccb_mpt_ptr; 1298 req = ccb->ccb_h.ccb_req_ptr; 1299 1300 hdrp = req->req_vbuf; 1301 mpt_off = req->req_vbuf; 1302 1303 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1304 error = EFBIG; 1305 } 1306 1307 if (error == 0) { 1308 switch (hdrp->Function) { 1309 case MPI_FUNCTION_SCSI_IO_REQUEST: 1310 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1311 istgt = 0; 1312 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1313 break; 1314 case MPI_FUNCTION_TARGET_ASSIST: 1315 istgt = 1; 1316 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1317 break; 1318 default: 1319 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1320 hdrp->Function); 1321 error = EINVAL; 1322 break; 1323 } 1324 } 1325 1326 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1327 error = EFBIG; 1328 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1329 nseg, mpt->max_seg_cnt); 1330 } 1331 1332 bad: 1333 if (error != 0) { 1334 if (error != EFBIG && error != ENOMEM) { 1335 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1336 } 1337 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1338 cam_status status; 1339 mpt_freeze_ccb(ccb); 1340 if (error == EFBIG) { 1341 status = CAM_REQ_TOO_BIG; 1342 } else if (error == ENOMEM) { 1343 if (mpt->outofbeer == 0) { 1344 mpt->outofbeer = 1; 1345 xpt_freeze_simq(mpt->sim, 1); 1346 mpt_lprt(mpt, MPT_PRT_DEBUG, 1347 "FREEZEQ\n"); 1348 } 1349 status = CAM_REQUEUE_REQ; 1350 } else { 1351 status = CAM_REQ_CMP_ERR; 1352 } 1353 mpt_set_ccb_status(ccb, status); 1354 } 1355 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1356 request_t *cmd_req = 1357 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1358 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1359 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1360 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1361 } 1362 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1363 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1364 xpt_done(ccb); 1365 CAMLOCK_2_MPTLOCK(mpt); 1366 mpt_free_request(mpt, req); 1367 MPTLOCK_2_CAMLOCK(mpt); 1368 return; 1369 } 1370 1371 /* 1372 * No data to transfer? 1373 * Just make a single simple SGL with zero length. 1374 */ 1375 1376 if (mpt->verbose >= MPT_PRT_DEBUG) { 1377 int tidx = ((char *)sglp) - mpt_off; 1378 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1379 } 1380 1381 if (nseg == 0) { 1382 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1383 MPI_pSGE_SET_FLAGS(se1, 1384 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1385 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1386 se1->FlagsLength = htole32(se1->FlagsLength); 1387 goto out; 1388 } 1389 1390 1391 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1392 if (istgt == 0) { 1393 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1394 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1395 } 1396 } else { 1397 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1398 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1399 } 1400 } 1401 1402 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1403 bus_dmasync_op_t op; 1404 if (istgt == 0) { 1405 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1406 op = BUS_DMASYNC_PREREAD; 1407 } else { 1408 op = BUS_DMASYNC_PREWRITE; 1409 } 1410 } else { 1411 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1412 op = BUS_DMASYNC_PREWRITE; 1413 } else { 1414 op = BUS_DMASYNC_PREREAD; 1415 } 1416 } 1417 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1418 } 1419 1420 /* 1421 * Okay, fill in what we can at the end of the command frame. 1422 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1423 * the command frame. 1424 * 1425 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1426 * SIMPLE64 pointers and start doing CHAIN64 entries after 1427 * that. 1428 */ 1429 1430 if (nseg < MPT_NSGL_FIRST(mpt)) { 1431 first_lim = nseg; 1432 } else { 1433 /* 1434 * Leave room for CHAIN element 1435 */ 1436 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1437 } 1438 1439 se = (SGE_SIMPLE64 *) sglp; 1440 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1441 uint32_t tf; 1442 1443 memset(se, 0, sizeof (*se)); 1444 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1445 if (sizeof(bus_addr_t) > 4) { 1446 se->Address.High = 1447 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1448 } 1449 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1450 tf = flags; 1451 if (seg == first_lim - 1) { 1452 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1453 } 1454 if (seg == nseg - 1) { 1455 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1456 MPI_SGE_FLAGS_END_OF_BUFFER; 1457 } 1458 MPI_pSGE_SET_FLAGS(se, tf); 1459 se->FlagsLength = htole32(se->FlagsLength); 1460 } 1461 1462 if (seg == nseg) { 1463 goto out; 1464 } 1465 1466 /* 1467 * Tell the IOC where to find the first chain element. 1468 */ 1469 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1470 nxt_off = MPT_RQSL(mpt); 1471 trq = req; 1472 1473 /* 1474 * Make up the rest of the data segments out of a chain element 1475 * (contiained in the current request frame) which points to 1476 * SIMPLE64 elements in the next request frame, possibly ending 1477 * with *another* chain element (if there's more). 1478 */ 1479 while (seg < nseg) { 1480 int this_seg_lim; 1481 uint32_t tf, cur_off; 1482 bus_addr_t chain_list_addr; 1483 1484 /* 1485 * Point to the chain descriptor. Note that the chain 1486 * descriptor is at the end of the *previous* list (whether 1487 * chain or simple). 1488 */ 1489 ce = (SGE_CHAIN64 *) se; 1490 1491 /* 1492 * Before we change our current pointer, make sure we won't 1493 * overflow the request area with this frame. Note that we 1494 * test against 'greater than' here as it's okay in this case 1495 * to have next offset be just outside the request area. 1496 */ 1497 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1498 nxt_off = MPT_REQUEST_AREA; 1499 goto next_chain; 1500 } 1501 1502 /* 1503 * Set our SGE element pointer to the beginning of the chain 1504 * list and update our next chain list offset. 1505 */ 1506 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1507 cur_off = nxt_off; 1508 nxt_off += MPT_RQSL(mpt); 1509 1510 /* 1511 * Now initialized the chain descriptor. 1512 */ 1513 memset(ce, 0, sizeof (*ce)); 1514 1515 /* 1516 * Get the physical address of the chain list. 1517 */ 1518 chain_list_addr = trq->req_pbuf; 1519 chain_list_addr += cur_off; 1520 if (sizeof (bus_addr_t) > 4) { 1521 ce->Address.High = 1522 htole32(((uint64_t)chain_list_addr) >> 32); 1523 } 1524 ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 1525 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1526 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1527 1528 /* 1529 * If we have more than a frame's worth of segments left, 1530 * set up the chain list to have the last element be another 1531 * chain descriptor. 1532 */ 1533 if ((nseg - seg) > MPT_NSGL(mpt)) { 1534 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1535 /* 1536 * The length of the chain is the length in bytes of the 1537 * number of segments plus the next chain element. 1538 * 1539 * The next chain descriptor offset is the length, 1540 * in words, of the number of segments. 1541 */ 1542 ce->Length = (this_seg_lim - seg) * 1543 sizeof (SGE_SIMPLE64); 1544 ce->NextChainOffset = ce->Length >> 2; 1545 ce->Length += sizeof (SGE_CHAIN64); 1546 } else { 1547 this_seg_lim = nseg; 1548 ce->Length = (this_seg_lim - seg) * 1549 sizeof (SGE_SIMPLE64); 1550 } 1551 ce->Length = htole16(ce->Length); 1552 1553 /* 1554 * Fill in the chain list SGE elements with our segment data. 1555 * 1556 * If we're the last element in this chain list, set the last 1557 * element flag. If we're the completely last element period, 1558 * set the end of list and end of buffer flags. 1559 */ 1560 while (seg < this_seg_lim) { 1561 memset(se, 0, sizeof (*se)); 1562 se->Address.Low = htole32(dm_segs->ds_addr & 1563 0xffffffff); 1564 if (sizeof (bus_addr_t) > 4) { 1565 se->Address.High = 1566 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1567 } 1568 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1569 tf = flags; 1570 if (seg == this_seg_lim - 1) { 1571 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1572 } 1573 if (seg == nseg - 1) { 1574 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1575 MPI_SGE_FLAGS_END_OF_BUFFER; 1576 } 1577 MPI_pSGE_SET_FLAGS(se, tf); 1578 se->FlagsLength = htole32(se->FlagsLength); 1579 se++; 1580 seg++; 1581 dm_segs++; 1582 } 1583 1584 next_chain: 1585 /* 1586 * If we have more segments to do and we've used up all of 1587 * the space in a request area, go allocate another one 1588 * and chain to that. 1589 */ 1590 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1591 request_t *nrq; 1592 1593 CAMLOCK_2_MPTLOCK(mpt); 1594 nrq = mpt_get_request(mpt, FALSE); 1595 MPTLOCK_2_CAMLOCK(mpt); 1596 1597 if (nrq == NULL) { 1598 error = ENOMEM; 1599 goto bad; 1600 } 1601 1602 /* 1603 * Append the new request area on the tail of our list. 1604 */ 1605 if ((trq = req->chain) == NULL) { 1606 req->chain = nrq; 1607 } else { 1608 while (trq->chain != NULL) { 1609 trq = trq->chain; 1610 } 1611 trq->chain = nrq; 1612 } 1613 trq = nrq; 1614 mpt_off = trq->req_vbuf; 1615 if (mpt->verbose >= MPT_PRT_DEBUG) { 1616 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1617 } 1618 nxt_off = 0; 1619 } 1620 } 1621 out: 1622 1623 /* 1624 * Last time we need to check if this CCB needs to be aborted. 1625 */ 1626 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1627 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1628 request_t *cmd_req = 1629 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1630 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1631 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1632 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1633 } 1634 mpt_prt(mpt, 1635 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1636 ccb->ccb_h.status & CAM_STATUS_MASK); 1637 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1638 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1639 } 1640 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1641 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1642 xpt_done(ccb); 1643 CAMLOCK_2_MPTLOCK(mpt); 1644 mpt_free_request(mpt, req); 1645 MPTLOCK_2_CAMLOCK(mpt); 1646 return; 1647 } 1648 1649 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1650 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1651 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 1652 mpt_timeout, ccb); 1653 } 1654 if (mpt->verbose > MPT_PRT_DEBUG) { 1655 int nc = 0; 1656 mpt_print_request(req->req_vbuf); 1657 for (trq = req->chain; trq; trq = trq->chain) { 1658 printf(" Additional Chain Area %d\n", nc++); 1659 mpt_dump_sgl(trq->req_vbuf, 0); 1660 } 1661 } 1662 1663 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1664 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1665 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1666 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1667 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1668 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1669 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1670 } else { 1671 tgt->state = TGT_STATE_MOVING_DATA; 1672 } 1673 #else 1674 tgt->state = TGT_STATE_MOVING_DATA; 1675 #endif 1676 } 1677 CAMLOCK_2_MPTLOCK(mpt); 1678 mpt_send_cmd(mpt, req); 1679 MPTLOCK_2_CAMLOCK(mpt); 1680 } 1681 1682 static void 1683 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1684 { 1685 request_t *req, *trq; 1686 char *mpt_off; 1687 union ccb *ccb; 1688 struct mpt_softc *mpt; 1689 int seg, first_lim; 1690 uint32_t flags, nxt_off; 1691 void *sglp = NULL; 1692 MSG_REQUEST_HEADER *hdrp; 1693 SGE_SIMPLE32 *se; 1694 SGE_CHAIN32 *ce; 1695 int istgt = 0; 1696 1697 req = (request_t *)arg; 1698 ccb = req->ccb; 1699 1700 mpt = ccb->ccb_h.ccb_mpt_ptr; 1701 req = ccb->ccb_h.ccb_req_ptr; 1702 1703 hdrp = req->req_vbuf; 1704 mpt_off = req->req_vbuf; 1705 1706 1707 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1708 error = EFBIG; 1709 } 1710 1711 if (error == 0) { 1712 switch (hdrp->Function) { 1713 case MPI_FUNCTION_SCSI_IO_REQUEST: 1714 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1715 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1716 break; 1717 case MPI_FUNCTION_TARGET_ASSIST: 1718 istgt = 1; 1719 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1720 break; 1721 default: 1722 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1723 hdrp->Function); 1724 error = EINVAL; 1725 break; 1726 } 1727 } 1728 1729 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1730 error = EFBIG; 1731 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1732 nseg, mpt->max_seg_cnt); 1733 } 1734 1735 bad: 1736 if (error != 0) { 1737 if (error != EFBIG && error != ENOMEM) { 1738 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1739 } 1740 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1741 cam_status status; 1742 mpt_freeze_ccb(ccb); 1743 if (error == EFBIG) { 1744 status = CAM_REQ_TOO_BIG; 1745 } else if (error == ENOMEM) { 1746 if (mpt->outofbeer == 0) { 1747 mpt->outofbeer = 1; 1748 xpt_freeze_simq(mpt->sim, 1); 1749 mpt_lprt(mpt, MPT_PRT_DEBUG, 1750 "FREEZEQ\n"); 1751 } 1752 status = CAM_REQUEUE_REQ; 1753 } else { 1754 status = CAM_REQ_CMP_ERR; 1755 } 1756 mpt_set_ccb_status(ccb, status); 1757 } 1758 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1759 request_t *cmd_req = 1760 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1761 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1762 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1763 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1764 } 1765 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1766 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1767 xpt_done(ccb); 1768 CAMLOCK_2_MPTLOCK(mpt); 1769 mpt_free_request(mpt, req); 1770 MPTLOCK_2_CAMLOCK(mpt); 1771 return; 1772 } 1773 1774 /* 1775 * No data to transfer? 1776 * Just make a single simple SGL with zero length. 1777 */ 1778 1779 if (mpt->verbose >= MPT_PRT_DEBUG) { 1780 int tidx = ((char *)sglp) - mpt_off; 1781 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1782 } 1783 1784 if (nseg == 0) { 1785 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1786 MPI_pSGE_SET_FLAGS(se1, 1787 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1788 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1789 se1->FlagsLength = htole32(se1->FlagsLength); 1790 goto out; 1791 } 1792 1793 1794 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1795 if (istgt == 0) { 1796 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1797 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1798 } 1799 } else { 1800 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1801 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1802 } 1803 } 1804 1805 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1806 bus_dmasync_op_t op; 1807 if (istgt) { 1808 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1809 op = BUS_DMASYNC_PREREAD; 1810 } else { 1811 op = BUS_DMASYNC_PREWRITE; 1812 } 1813 } else { 1814 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1815 op = BUS_DMASYNC_PREWRITE; 1816 } else { 1817 op = BUS_DMASYNC_PREREAD; 1818 } 1819 } 1820 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1821 } 1822 1823 /* 1824 * Okay, fill in what we can at the end of the command frame. 1825 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1826 * the command frame. 1827 * 1828 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1829 * SIMPLE32 pointers and start doing CHAIN32 entries after 1830 * that. 1831 */ 1832 1833 if (nseg < MPT_NSGL_FIRST(mpt)) { 1834 first_lim = nseg; 1835 } else { 1836 /* 1837 * Leave room for CHAIN element 1838 */ 1839 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1840 } 1841 1842 se = (SGE_SIMPLE32 *) sglp; 1843 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1844 uint32_t tf; 1845 1846 memset(se, 0,sizeof (*se)); 1847 se->Address = htole32(dm_segs->ds_addr); 1848 1849 1850 1851 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1852 tf = flags; 1853 if (seg == first_lim - 1) { 1854 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1855 } 1856 if (seg == nseg - 1) { 1857 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1858 MPI_SGE_FLAGS_END_OF_BUFFER; 1859 } 1860 MPI_pSGE_SET_FLAGS(se, tf); 1861 se->FlagsLength = htole32(se->FlagsLength); 1862 } 1863 1864 if (seg == nseg) { 1865 goto out; 1866 } 1867 1868 /* 1869 * Tell the IOC where to find the first chain element. 1870 */ 1871 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1872 nxt_off = MPT_RQSL(mpt); 1873 trq = req; 1874 1875 /* 1876 * Make up the rest of the data segments out of a chain element 1877 * (contiained in the current request frame) which points to 1878 * SIMPLE32 elements in the next request frame, possibly ending 1879 * with *another* chain element (if there's more). 1880 */ 1881 while (seg < nseg) { 1882 int this_seg_lim; 1883 uint32_t tf, cur_off; 1884 bus_addr_t chain_list_addr; 1885 1886 /* 1887 * Point to the chain descriptor. Note that the chain 1888 * descriptor is at the end of the *previous* list (whether 1889 * chain or simple). 1890 */ 1891 ce = (SGE_CHAIN32 *) se; 1892 1893 /* 1894 * Before we change our current pointer, make sure we won't 1895 * overflow the request area with this frame. Note that we 1896 * test against 'greater than' here as it's okay in this case 1897 * to have next offset be just outside the request area. 1898 */ 1899 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1900 nxt_off = MPT_REQUEST_AREA; 1901 goto next_chain; 1902 } 1903 1904 /* 1905 * Set our SGE element pointer to the beginning of the chain 1906 * list and update our next chain list offset. 1907 */ 1908 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1909 cur_off = nxt_off; 1910 nxt_off += MPT_RQSL(mpt); 1911 1912 /* 1913 * Now initialized the chain descriptor. 1914 */ 1915 memset(ce, 0, sizeof (*ce)); 1916 1917 /* 1918 * Get the physical address of the chain list. 1919 */ 1920 chain_list_addr = trq->req_pbuf; 1921 chain_list_addr += cur_off; 1922 1923 1924 1925 ce->Address = htole32(chain_list_addr); 1926 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1927 1928 1929 /* 1930 * If we have more than a frame's worth of segments left, 1931 * set up the chain list to have the last element be another 1932 * chain descriptor. 1933 */ 1934 if ((nseg - seg) > MPT_NSGL(mpt)) { 1935 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1936 /* 1937 * The length of the chain is the length in bytes of the 1938 * number of segments plus the next chain element. 1939 * 1940 * The next chain descriptor offset is the length, 1941 * in words, of the number of segments. 1942 */ 1943 ce->Length = (this_seg_lim - seg) * 1944 sizeof (SGE_SIMPLE32); 1945 ce->NextChainOffset = ce->Length >> 2; 1946 ce->Length += sizeof (SGE_CHAIN32); 1947 } else { 1948 this_seg_lim = nseg; 1949 ce->Length = (this_seg_lim - seg) * 1950 sizeof (SGE_SIMPLE32); 1951 } 1952 ce->Length = htole16(ce->Length); 1953 1954 /* 1955 * Fill in the chain list SGE elements with our segment data. 1956 * 1957 * If we're the last element in this chain list, set the last 1958 * element flag. If we're the completely last element period, 1959 * set the end of list and end of buffer flags. 1960 */ 1961 while (seg < this_seg_lim) { 1962 memset(se, 0, sizeof (*se)); 1963 se->Address = htole32(dm_segs->ds_addr); 1964 1965 1966 1967 1968 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1969 tf = flags; 1970 if (seg == this_seg_lim - 1) { 1971 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1972 } 1973 if (seg == nseg - 1) { 1974 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1975 MPI_SGE_FLAGS_END_OF_BUFFER; 1976 } 1977 MPI_pSGE_SET_FLAGS(se, tf); 1978 se->FlagsLength = htole32(se->FlagsLength); 1979 se++; 1980 seg++; 1981 dm_segs++; 1982 } 1983 1984 next_chain: 1985 /* 1986 * If we have more segments to do and we've used up all of 1987 * the space in a request area, go allocate another one 1988 * and chain to that. 1989 */ 1990 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1991 request_t *nrq; 1992 1993 CAMLOCK_2_MPTLOCK(mpt); 1994 nrq = mpt_get_request(mpt, FALSE); 1995 MPTLOCK_2_CAMLOCK(mpt); 1996 1997 if (nrq == NULL) { 1998 error = ENOMEM; 1999 goto bad; 2000 } 2001 2002 /* 2003 * Append the new request area on the tail of our list. 2004 */ 2005 if ((trq = req->chain) == NULL) { 2006 req->chain = nrq; 2007 } else { 2008 while (trq->chain != NULL) { 2009 trq = trq->chain; 2010 } 2011 trq->chain = nrq; 2012 } 2013 trq = nrq; 2014 mpt_off = trq->req_vbuf; 2015 if (mpt->verbose >= MPT_PRT_DEBUG) { 2016 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 2017 } 2018 nxt_off = 0; 2019 } 2020 } 2021 out: 2022 2023 /* 2024 * Last time we need to check if this CCB needs to be aborted. 2025 */ 2026 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2027 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2028 request_t *cmd_req = 2029 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2030 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2031 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2032 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2033 } 2034 mpt_prt(mpt, 2035 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2036 ccb->ccb_h.status & CAM_STATUS_MASK); 2037 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2038 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2039 } 2040 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2041 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2042 xpt_done(ccb); 2043 CAMLOCK_2_MPTLOCK(mpt); 2044 mpt_free_request(mpt, req); 2045 MPTLOCK_2_CAMLOCK(mpt); 2046 return; 2047 } 2048 2049 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2050 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2051 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 2052 mpt_timeout, ccb); 2053 } 2054 if (mpt->verbose > MPT_PRT_DEBUG) { 2055 int nc = 0; 2056 mpt_print_request(req->req_vbuf); 2057 for (trq = req->chain; trq; trq = trq->chain) { 2058 printf(" Additional Chain Area %d\n", nc++); 2059 mpt_dump_sgl(trq->req_vbuf, 0); 2060 } 2061 } 2062 2063 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2064 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2065 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2066 #ifdef WE_TRUST_AUTO_GOOD_STATUS 2067 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2068 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2069 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2070 } else { 2071 tgt->state = TGT_STATE_MOVING_DATA; 2072 } 2073 #else 2074 tgt->state = TGT_STATE_MOVING_DATA; 2075 #endif 2076 } 2077 CAMLOCK_2_MPTLOCK(mpt); 2078 mpt_send_cmd(mpt, req); 2079 MPTLOCK_2_CAMLOCK(mpt); 2080 } 2081 2082 static void 2083 mpt_start(struct cam_sim *sim, union ccb *ccb) 2084 { 2085 request_t *req; 2086 struct mpt_softc *mpt; 2087 MSG_SCSI_IO_REQUEST *mpt_req; 2088 struct ccb_scsiio *csio = &ccb->csio; 2089 struct ccb_hdr *ccbh = &ccb->ccb_h; 2090 bus_dmamap_callback_t *cb; 2091 target_id_t tgt; 2092 int raid_passthru; 2093 2094 /* Get the pointer for the physical addapter */ 2095 mpt = ccb->ccb_h.ccb_mpt_ptr; 2096 raid_passthru = (sim == mpt->phydisk_sim); 2097 2098 CAMLOCK_2_MPTLOCK(mpt); 2099 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2100 if (mpt->outofbeer == 0) { 2101 mpt->outofbeer = 1; 2102 xpt_freeze_simq(mpt->sim, 1); 2103 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2104 } 2105 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2106 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2107 MPTLOCK_2_CAMLOCK(mpt); 2108 xpt_done(ccb); 2109 return; 2110 } 2111 #ifdef INVARIANTS 2112 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2113 #endif 2114 MPTLOCK_2_CAMLOCK(mpt); 2115 2116 if (sizeof (bus_addr_t) > 4) { 2117 cb = mpt_execute_req_a64; 2118 } else { 2119 cb = mpt_execute_req; 2120 } 2121 2122 /* 2123 * Link the ccb and the request structure so we can find 2124 * the other knowing either the request or the ccb 2125 */ 2126 req->ccb = ccb; 2127 ccb->ccb_h.ccb_req_ptr = req; 2128 2129 /* Now we build the command for the IOC */ 2130 mpt_req = req->req_vbuf; 2131 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2132 2133 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2134 if (raid_passthru) { 2135 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2136 CAMLOCK_2_MPTLOCK(mpt); 2137 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2138 MPTLOCK_2_CAMLOCK(mpt); 2139 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2140 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2141 xpt_done(ccb); 2142 return; 2143 } 2144 MPTLOCK_2_CAMLOCK(mpt); 2145 mpt_req->Bus = 0; /* we never set bus here */ 2146 } else { 2147 tgt = ccb->ccb_h.target_id; 2148 mpt_req->Bus = 0; /* XXX */ 2149 2150 } 2151 mpt_req->SenseBufferLength = 2152 (csio->sense_len < MPT_SENSE_SIZE) ? 2153 csio->sense_len : MPT_SENSE_SIZE; 2154 2155 /* 2156 * We use the message context to find the request structure when we 2157 * Get the command completion interrupt from the IOC. 2158 */ 2159 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2160 2161 /* Which physical device to do the I/O on */ 2162 mpt_req->TargetID = tgt; 2163 2164 /* We assume a single level LUN type */ 2165 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { 2166 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 2167 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 2168 } else { 2169 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 2170 } 2171 2172 /* Set the direction of the transfer */ 2173 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2174 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2175 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2176 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2177 } else { 2178 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2179 } 2180 2181 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2182 switch(ccb->csio.tag_action) { 2183 case MSG_HEAD_OF_Q_TAG: 2184 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2185 break; 2186 case MSG_ACA_TASK: 2187 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2188 break; 2189 case MSG_ORDERED_Q_TAG: 2190 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2191 break; 2192 case MSG_SIMPLE_Q_TAG: 2193 default: 2194 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2195 break; 2196 } 2197 } else { 2198 if (mpt->is_fc || mpt->is_sas) { 2199 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2200 } else { 2201 /* XXX No such thing for a target doing packetized. */ 2202 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2203 } 2204 } 2205 2206 if (mpt->is_spi) { 2207 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2208 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2209 } 2210 } 2211 mpt_req->Control = htole32(mpt_req->Control); 2212 2213 /* Copy the scsi command block into place */ 2214 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2215 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2216 } else { 2217 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2218 } 2219 2220 mpt_req->CDBLength = csio->cdb_len; 2221 mpt_req->DataLength = htole32(csio->dxfer_len); 2222 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2223 2224 /* 2225 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2226 */ 2227 if (mpt->verbose == MPT_PRT_DEBUG) { 2228 U32 df; 2229 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2230 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2231 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2232 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2233 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2234 mpt_prtc(mpt, "(%s %u byte%s ", 2235 (df == MPI_SCSIIO_CONTROL_READ)? 2236 "read" : "write", csio->dxfer_len, 2237 (csio->dxfer_len == 1)? ")" : "s)"); 2238 } 2239 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 2240 ccb->ccb_h.target_lun, req, req->serno); 2241 } 2242 2243 /* 2244 * If we have any data to send with this command map it into bus space. 2245 */ 2246 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2247 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 2248 /* 2249 * We've been given a pointer to a single buffer. 2250 */ 2251 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 2252 /* 2253 * Virtual address that needs to translated into 2254 * one or more physical address ranges. 2255 */ 2256 int error; 2257 int s = splsoftvm(); 2258 error = bus_dmamap_load(mpt->buffer_dmat, 2259 req->dmap, csio->data_ptr, csio->dxfer_len, 2260 cb, req, 0); 2261 splx(s); 2262 if (error == EINPROGRESS) { 2263 /* 2264 * So as to maintain ordering, 2265 * freeze the controller queue 2266 * until our mapping is 2267 * returned. 2268 */ 2269 xpt_freeze_simq(mpt->sim, 1); 2270 ccbh->status |= CAM_RELEASE_SIMQ; 2271 } 2272 } else { 2273 /* 2274 * We have been given a pointer to single 2275 * physical buffer. 2276 */ 2277 struct bus_dma_segment seg; 2278 seg.ds_addr = 2279 (bus_addr_t)(vm_offset_t)csio->data_ptr; 2280 seg.ds_len = csio->dxfer_len; 2281 (*cb)(req, &seg, 1, 0); 2282 } 2283 } else { 2284 /* 2285 * We have been given a list of addresses. 2286 * This case could be easily supported but they are not 2287 * currently generated by the CAM subsystem so there 2288 * is no point in wasting the time right now. 2289 */ 2290 struct bus_dma_segment *segs; 2291 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 2292 (*cb)(req, NULL, 0, EFAULT); 2293 } else { 2294 /* Just use the segments provided */ 2295 segs = (struct bus_dma_segment *)csio->data_ptr; 2296 (*cb)(req, segs, csio->sglist_cnt, 0); 2297 } 2298 } 2299 } else { 2300 (*cb)(req, NULL, 0, 0); 2301 } 2302 } 2303 2304 static int 2305 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2306 int sleep_ok) 2307 { 2308 int error; 2309 uint16_t status; 2310 uint8_t response; 2311 2312 error = mpt_scsi_send_tmf(mpt, 2313 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2314 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2315 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2316 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2317 0, /* XXX How do I get the channel ID? */ 2318 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2319 lun != CAM_LUN_WILDCARD ? lun : 0, 2320 0, sleep_ok); 2321 2322 if (error != 0) { 2323 /* 2324 * mpt_scsi_send_tmf hard resets on failure, so no 2325 * need to do so here. 2326 */ 2327 mpt_prt(mpt, 2328 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2329 return (EIO); 2330 } 2331 2332 /* Wait for bus reset to be processed by the IOC. */ 2333 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2334 REQ_STATE_DONE, sleep_ok, 5000); 2335 2336 status = le16toh(mpt->tmf_req->IOCStatus); 2337 response = mpt->tmf_req->ResponseCode; 2338 mpt->tmf_req->state = REQ_STATE_FREE; 2339 2340 if (error) { 2341 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2342 "Resetting controller.\n"); 2343 mpt_reset(mpt, TRUE); 2344 return (ETIMEDOUT); 2345 } 2346 2347 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2348 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2349 "Resetting controller.\n", status); 2350 mpt_reset(mpt, TRUE); 2351 return (EIO); 2352 } 2353 2354 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2355 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2356 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2357 "Resetting controller.\n", response); 2358 mpt_reset(mpt, TRUE); 2359 return (EIO); 2360 } 2361 return (0); 2362 } 2363 2364 static int 2365 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2366 { 2367 int r = 0; 2368 request_t *req; 2369 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2370 2371 req = mpt_get_request(mpt, FALSE); 2372 if (req == NULL) { 2373 return (ENOMEM); 2374 } 2375 fc = req->req_vbuf; 2376 memset(fc, 0, sizeof(*fc)); 2377 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2378 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2379 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2380 mpt_send_cmd(mpt, req); 2381 if (dowait) { 2382 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2383 REQ_STATE_DONE, FALSE, 60 * 1000); 2384 if (r == 0) { 2385 mpt_free_request(mpt, req); 2386 } 2387 } 2388 return (r); 2389 } 2390 2391 static int 2392 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2393 MSG_EVENT_NOTIFY_REPLY *msg) 2394 { 2395 uint32_t data0, data1; 2396 2397 data0 = le32toh(msg->Data[0]); 2398 data1 = le32toh(msg->Data[1]); 2399 switch(msg->Event & 0xFF) { 2400 case MPI_EVENT_UNIT_ATTENTION: 2401 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2402 (data0 >> 8) & 0xff, data0 & 0xff); 2403 break; 2404 2405 case MPI_EVENT_IOC_BUS_RESET: 2406 /* We generated a bus reset */ 2407 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2408 (data0 >> 8) & 0xff); 2409 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2410 break; 2411 2412 case MPI_EVENT_EXT_BUS_RESET: 2413 /* Someone else generated a bus reset */ 2414 mpt_prt(mpt, "External Bus Reset Detected\n"); 2415 /* 2416 * These replies don't return EventData like the MPI 2417 * spec says they do 2418 */ 2419 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2420 break; 2421 2422 case MPI_EVENT_RESCAN: 2423 #if __FreeBSD_version >= 600000 2424 { 2425 union ccb *ccb; 2426 uint32_t pathid; 2427 /* 2428 * In general this means a device has been added to the loop. 2429 */ 2430 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2431 if (mpt->ready == 0) { 2432 break; 2433 } 2434 if (mpt->phydisk_sim) { 2435 pathid = cam_sim_path(mpt->phydisk_sim); 2436 } else { 2437 pathid = cam_sim_path(mpt->sim); 2438 } 2439 MPTLOCK_2_CAMLOCK(mpt); 2440 /* 2441 * Allocate a CCB, create a wildcard path for this bus, 2442 * and schedule a rescan. 2443 */ 2444 ccb = xpt_alloc_ccb_nowait(); 2445 if (ccb == NULL) { 2446 mpt_prt(mpt, "unable to alloc CCB for rescan\n"); 2447 CAMLOCK_2_MPTLOCK(mpt); 2448 break; 2449 } 2450 2451 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 2452 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2453 CAMLOCK_2_MPTLOCK(mpt); 2454 mpt_prt(mpt, "unable to create path for rescan\n"); 2455 xpt_free_ccb(ccb); 2456 break; 2457 } 2458 xpt_rescan(ccb); 2459 CAMLOCK_2_MPTLOCK(mpt); 2460 break; 2461 } 2462 #else 2463 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2464 break; 2465 #endif 2466 case MPI_EVENT_LINK_STATUS_CHANGE: 2467 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2468 (data1 >> 8) & 0xff, 2469 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2470 break; 2471 2472 case MPI_EVENT_LOOP_STATE_CHANGE: 2473 switch ((data0 >> 16) & 0xff) { 2474 case 0x01: 2475 mpt_prt(mpt, 2476 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2477 "(Loop Initialization)\n", 2478 (data1 >> 8) & 0xff, 2479 (data0 >> 8) & 0xff, 2480 (data0 ) & 0xff); 2481 switch ((data0 >> 8) & 0xff) { 2482 case 0xF7: 2483 if ((data0 & 0xff) == 0xF7) { 2484 mpt_prt(mpt, "Device needs AL_PA\n"); 2485 } else { 2486 mpt_prt(mpt, "Device %02x doesn't like " 2487 "FC performance\n", 2488 data0 & 0xFF); 2489 } 2490 break; 2491 case 0xF8: 2492 if ((data0 & 0xff) == 0xF7) { 2493 mpt_prt(mpt, "Device had loop failure " 2494 "at its receiver prior to acquiring" 2495 " AL_PA\n"); 2496 } else { 2497 mpt_prt(mpt, "Device %02x detected loop" 2498 " failure at its receiver\n", 2499 data0 & 0xFF); 2500 } 2501 break; 2502 default: 2503 mpt_prt(mpt, "Device %02x requests that device " 2504 "%02x reset itself\n", 2505 data0 & 0xFF, 2506 (data0 >> 8) & 0xFF); 2507 break; 2508 } 2509 break; 2510 case 0x02: 2511 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2512 "LPE(%02x,%02x) (Loop Port Enable)\n", 2513 (data1 >> 8) & 0xff, /* Port */ 2514 (data0 >> 8) & 0xff, /* Character 3 */ 2515 (data0 ) & 0xff /* Character 4 */); 2516 break; 2517 case 0x03: 2518 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2519 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2520 (data1 >> 8) & 0xff, /* Port */ 2521 (data0 >> 8) & 0xff, /* Character 3 */ 2522 (data0 ) & 0xff /* Character 4 */); 2523 break; 2524 default: 2525 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2526 "FC event (%02x %02x %02x)\n", 2527 (data1 >> 8) & 0xff, /* Port */ 2528 (data0 >> 16) & 0xff, /* Event */ 2529 (data0 >> 8) & 0xff, /* Character 3 */ 2530 (data0 ) & 0xff /* Character 4 */); 2531 } 2532 break; 2533 2534 case MPI_EVENT_LOGOUT: 2535 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2536 (data1 >> 8) & 0xff, data0); 2537 break; 2538 case MPI_EVENT_QUEUE_FULL: 2539 { 2540 struct cam_sim *sim; 2541 struct cam_path *tmppath; 2542 struct ccb_relsim crs; 2543 PTR_EVENT_DATA_QUEUE_FULL pqf; 2544 lun_id_t lun_id; 2545 2546 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 2547 pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 2548 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2549 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2550 if (mpt->phydisk_sim) { 2551 sim = mpt->phydisk_sim; 2552 } else { 2553 sim = mpt->sim; 2554 } 2555 MPTLOCK_2_CAMLOCK(mpt); 2556 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2557 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2558 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2559 mpt_prt(mpt, "unable to create a path to send " 2560 "XPT_REL_SIMQ"); 2561 CAMLOCK_2_MPTLOCK(mpt); 2562 break; 2563 } 2564 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2565 crs.ccb_h.func_code = XPT_REL_SIMQ; 2566 crs.ccb_h.flags = CAM_DEV_QFREEZE; 2567 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2568 crs.openings = pqf->CurrentDepth - 1; 2569 xpt_action((union ccb *)&crs); 2570 if (crs.ccb_h.status != CAM_REQ_CMP) { 2571 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2572 } 2573 xpt_free_path(tmppath); 2574 } 2575 CAMLOCK_2_MPTLOCK(mpt); 2576 break; 2577 } 2578 case MPI_EVENT_IR_RESYNC_UPDATE: 2579 mpt_prt(mpt, "IR resync update %d completed\n", 2580 (data0 >> 16) & 0xff); 2581 break; 2582 case MPI_EVENT_EVENT_CHANGE: 2583 case MPI_EVENT_INTEGRATED_RAID: 2584 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2585 case MPI_EVENT_SAS_SES: 2586 break; 2587 default: 2588 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2589 msg->Event & 0xFF); 2590 return (0); 2591 } 2592 return (1); 2593 } 2594 2595 /* 2596 * Reply path for all SCSI I/O requests, called from our 2597 * interrupt handler by extracting our handler index from 2598 * the MsgContext field of the reply from the IOC. 2599 * 2600 * This routine is optimized for the common case of a 2601 * completion without error. All exception handling is 2602 * offloaded to non-inlined helper routines to minimize 2603 * cache footprint. 2604 */ 2605 static int 2606 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2607 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2608 { 2609 MSG_SCSI_IO_REQUEST *scsi_req; 2610 union ccb *ccb; 2611 2612 if (req->state == REQ_STATE_FREE) { 2613 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2614 return (TRUE); 2615 } 2616 2617 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2618 ccb = req->ccb; 2619 if (ccb == NULL) { 2620 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2621 req, req->serno); 2622 return (TRUE); 2623 } 2624 2625 mpt_req_untimeout(req, mpt_timeout, ccb); 2626 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2627 2628 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2629 bus_dmasync_op_t op; 2630 2631 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2632 op = BUS_DMASYNC_POSTREAD; 2633 else 2634 op = BUS_DMASYNC_POSTWRITE; 2635 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2636 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2637 } 2638 2639 if (reply_frame == NULL) { 2640 /* 2641 * Context only reply, completion without error status. 2642 */ 2643 ccb->csio.resid = 0; 2644 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2645 ccb->csio.scsi_status = SCSI_STATUS_OK; 2646 } else { 2647 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2648 } 2649 2650 if (mpt->outofbeer) { 2651 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2652 mpt->outofbeer = 0; 2653 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2654 } 2655 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2656 struct scsi_inquiry_data *iq = 2657 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2658 if (scsi_req->Function == 2659 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2660 /* 2661 * Fake out the device type so that only the 2662 * pass-thru device will attach. 2663 */ 2664 iq->device &= ~0x1F; 2665 iq->device |= T_NODEVICE; 2666 } 2667 } 2668 if (mpt->verbose == MPT_PRT_DEBUG) { 2669 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2670 req, req->serno); 2671 } 2672 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2673 MPTLOCK_2_CAMLOCK(mpt); 2674 xpt_done(ccb); 2675 CAMLOCK_2_MPTLOCK(mpt); 2676 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2677 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2678 } else { 2679 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2680 req, req->serno); 2681 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2682 } 2683 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2684 ("CCB req needed wakeup")); 2685 #ifdef INVARIANTS 2686 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2687 #endif 2688 mpt_free_request(mpt, req); 2689 return (TRUE); 2690 } 2691 2692 static int 2693 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2694 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2695 { 2696 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2697 2698 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2699 #ifdef INVARIANTS 2700 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2701 #endif 2702 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2703 /* Record IOC Status and Response Code of TMF for any waiters. */ 2704 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2705 req->ResponseCode = tmf_reply->ResponseCode; 2706 2707 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2708 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2709 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2710 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2711 req->state |= REQ_STATE_DONE; 2712 wakeup(req); 2713 } else { 2714 mpt->tmf_req->state = REQ_STATE_FREE; 2715 } 2716 return (TRUE); 2717 } 2718 2719 /* 2720 * XXX: Move to definitions file 2721 */ 2722 #define ELS 0x22 2723 #define FC4LS 0x32 2724 #define ABTS 0x81 2725 #define BA_ACC 0x84 2726 2727 #define LS_RJT 0x01 2728 #define LS_ACC 0x02 2729 #define PLOGI 0x03 2730 #define LOGO 0x05 2731 #define SRR 0x14 2732 #define PRLI 0x20 2733 #define PRLO 0x21 2734 #define ADISC 0x52 2735 #define RSCN 0x61 2736 2737 static void 2738 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2739 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2740 { 2741 uint32_t fl; 2742 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2743 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2744 2745 /* 2746 * We are going to reuse the ELS request to send this response back. 2747 */ 2748 rsp = &tmp; 2749 memset(rsp, 0, sizeof(*rsp)); 2750 2751 #ifdef USE_IMMEDIATE_LINK_DATA 2752 /* 2753 * Apparently the IMMEDIATE stuff doesn't seem to work. 2754 */ 2755 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2756 #endif 2757 rsp->RspLength = length; 2758 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2759 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2760 2761 /* 2762 * Copy over information from the original reply frame to 2763 * it's correct place in the response. 2764 */ 2765 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2766 2767 /* 2768 * And now copy back the temporary area to the original frame. 2769 */ 2770 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2771 rsp = req->req_vbuf; 2772 2773 #ifdef USE_IMMEDIATE_LINK_DATA 2774 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2775 #else 2776 { 2777 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2778 bus_addr_t paddr = req->req_pbuf; 2779 paddr += MPT_RQSL(mpt); 2780 2781 fl = 2782 MPI_SGE_FLAGS_HOST_TO_IOC | 2783 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2784 MPI_SGE_FLAGS_LAST_ELEMENT | 2785 MPI_SGE_FLAGS_END_OF_LIST | 2786 MPI_SGE_FLAGS_END_OF_BUFFER; 2787 fl <<= MPI_SGE_FLAGS_SHIFT; 2788 fl |= (length); 2789 se->FlagsLength = htole32(fl); 2790 se->Address = htole32((uint32_t) paddr); 2791 } 2792 #endif 2793 2794 /* 2795 * Send it on... 2796 */ 2797 mpt_send_cmd(mpt, req); 2798 } 2799 2800 static int 2801 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2802 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2803 { 2804 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2805 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2806 U8 rctl; 2807 U8 type; 2808 U8 cmd; 2809 U16 status = le16toh(reply_frame->IOCStatus); 2810 U32 *elsbuf; 2811 int ioindex; 2812 int do_refresh = TRUE; 2813 2814 #ifdef INVARIANTS 2815 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2816 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2817 req, req->serno, rp->Function)); 2818 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2819 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2820 } else { 2821 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2822 } 2823 #endif 2824 mpt_lprt(mpt, MPT_PRT_DEBUG, 2825 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2826 req, req->serno, reply_frame, reply_frame->Function); 2827 2828 if (status != MPI_IOCSTATUS_SUCCESS) { 2829 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2830 status, reply_frame->Function); 2831 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2832 /* 2833 * XXX: to get around shutdown issue 2834 */ 2835 mpt->disabled = 1; 2836 return (TRUE); 2837 } 2838 return (TRUE); 2839 } 2840 2841 /* 2842 * If the function of a link service response, we recycle the 2843 * response to be a refresh for a new link service request. 2844 * 2845 * The request pointer is bogus in this case and we have to fetch 2846 * it based upon the TransactionContext. 2847 */ 2848 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2849 /* Freddie Uncle Charlie Katie */ 2850 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2851 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2852 if (mpt->els_cmd_ptrs[ioindex] == req) { 2853 break; 2854 } 2855 2856 KASSERT(ioindex < mpt->els_cmds_allocated, 2857 ("can't find my mommie!")); 2858 2859 /* remove from active list as we're going to re-post it */ 2860 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2861 req->state &= ~REQ_STATE_QUEUED; 2862 req->state |= REQ_STATE_DONE; 2863 mpt_fc_post_els(mpt, req, ioindex); 2864 return (TRUE); 2865 } 2866 2867 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2868 /* remove from active list as we're done */ 2869 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2870 req->state &= ~REQ_STATE_QUEUED; 2871 req->state |= REQ_STATE_DONE; 2872 if (req->state & REQ_STATE_TIMEDOUT) { 2873 mpt_lprt(mpt, MPT_PRT_DEBUG, 2874 "Sync Primitive Send Completed After Timeout\n"); 2875 mpt_free_request(mpt, req); 2876 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2877 mpt_lprt(mpt, MPT_PRT_DEBUG, 2878 "Async Primitive Send Complete\n"); 2879 mpt_free_request(mpt, req); 2880 } else { 2881 mpt_lprt(mpt, MPT_PRT_DEBUG, 2882 "Sync Primitive Send Complete- Waking Waiter\n"); 2883 wakeup(req); 2884 } 2885 return (TRUE); 2886 } 2887 2888 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2889 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2890 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2891 rp->MsgLength, rp->MsgFlags); 2892 return (TRUE); 2893 } 2894 2895 if (rp->MsgLength <= 5) { 2896 /* 2897 * This is just a ack of an original ELS buffer post 2898 */ 2899 mpt_lprt(mpt, MPT_PRT_DEBUG, 2900 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2901 return (TRUE); 2902 } 2903 2904 2905 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2906 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2907 2908 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2909 cmd = be32toh(elsbuf[0]) >> 24; 2910 2911 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2912 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2913 return (TRUE); 2914 } 2915 2916 ioindex = le32toh(rp->TransactionContext); 2917 req = mpt->els_cmd_ptrs[ioindex]; 2918 2919 if (rctl == ELS && type == 1) { 2920 switch (cmd) { 2921 case PRLI: 2922 /* 2923 * Send back a PRLI ACC 2924 */ 2925 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2926 le32toh(rp->Wwn.PortNameHigh), 2927 le32toh(rp->Wwn.PortNameLow)); 2928 elsbuf[0] = htobe32(0x02100014); 2929 elsbuf[1] |= htobe32(0x00000100); 2930 elsbuf[4] = htobe32(0x00000002); 2931 if (mpt->role & MPT_ROLE_TARGET) 2932 elsbuf[4] |= htobe32(0x00000010); 2933 if (mpt->role & MPT_ROLE_INITIATOR) 2934 elsbuf[4] |= htobe32(0x00000020); 2935 /* remove from active list as we're done */ 2936 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2937 req->state &= ~REQ_STATE_QUEUED; 2938 req->state |= REQ_STATE_DONE; 2939 mpt_fc_els_send_response(mpt, req, rp, 20); 2940 do_refresh = FALSE; 2941 break; 2942 case PRLO: 2943 memset(elsbuf, 0, 5 * (sizeof (U32))); 2944 elsbuf[0] = htobe32(0x02100014); 2945 elsbuf[1] = htobe32(0x08000100); 2946 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2947 le32toh(rp->Wwn.PortNameHigh), 2948 le32toh(rp->Wwn.PortNameLow)); 2949 /* remove from active list as we're done */ 2950 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2951 req->state &= ~REQ_STATE_QUEUED; 2952 req->state |= REQ_STATE_DONE; 2953 mpt_fc_els_send_response(mpt, req, rp, 20); 2954 do_refresh = FALSE; 2955 break; 2956 default: 2957 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2958 break; 2959 } 2960 } else if (rctl == ABTS && type == 0) { 2961 uint16_t rx_id = le16toh(rp->Rxid); 2962 uint16_t ox_id = le16toh(rp->Oxid); 2963 request_t *tgt_req = NULL; 2964 2965 mpt_prt(mpt, 2966 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2967 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2968 le32toh(rp->Wwn.PortNameLow)); 2969 if (rx_id >= mpt->mpt_max_tgtcmds) { 2970 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2971 } else if (mpt->tgt_cmd_ptrs == NULL) { 2972 mpt_prt(mpt, "No TGT CMD PTRS\n"); 2973 } else { 2974 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 2975 } 2976 if (tgt_req) { 2977 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 2978 union ccb *ccb = tgt->ccb; 2979 uint32_t ct_id; 2980 2981 /* 2982 * Check to make sure we have the correct command 2983 * The reply descriptor in the target state should 2984 * should contain an IoIndex that should match the 2985 * RX_ID. 2986 * 2987 * It'd be nice to have OX_ID to crosscheck with 2988 * as well. 2989 */ 2990 ct_id = GET_IO_INDEX(tgt->reply_desc); 2991 2992 if (ct_id != rx_id) { 2993 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2994 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 2995 rx_id, ct_id); 2996 goto skip; 2997 } 2998 2999 ccb = tgt->ccb; 3000 if (ccb) { 3001 mpt_prt(mpt, 3002 "CCB (%p): lun %u flags %x status %x\n", 3003 ccb, ccb->ccb_h.target_lun, 3004 ccb->ccb_h.flags, ccb->ccb_h.status); 3005 } 3006 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 3007 "%x nxfers %x\n", tgt->state, 3008 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 3009 tgt->nxfers); 3010 skip: 3011 if (mpt_abort_target_cmd(mpt, tgt_req)) { 3012 mpt_prt(mpt, "unable to start TargetAbort\n"); 3013 } 3014 } else { 3015 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 3016 } 3017 memset(elsbuf, 0, 5 * (sizeof (U32))); 3018 elsbuf[0] = htobe32(0); 3019 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 3020 elsbuf[2] = htobe32(0x000ffff); 3021 /* 3022 * Dork with the reply frame so that the reponse to it 3023 * will be correct. 3024 */ 3025 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 3026 /* remove from active list as we're done */ 3027 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3028 req->state &= ~REQ_STATE_QUEUED; 3029 req->state |= REQ_STATE_DONE; 3030 mpt_fc_els_send_response(mpt, req, rp, 12); 3031 do_refresh = FALSE; 3032 } else { 3033 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 3034 } 3035 if (do_refresh == TRUE) { 3036 /* remove from active list as we're done */ 3037 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3038 req->state &= ~REQ_STATE_QUEUED; 3039 req->state |= REQ_STATE_DONE; 3040 mpt_fc_post_els(mpt, req, ioindex); 3041 } 3042 return (TRUE); 3043 } 3044 3045 /* 3046 * Clean up all SCSI Initiator personality state in response 3047 * to a controller reset. 3048 */ 3049 static void 3050 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3051 { 3052 /* 3053 * The pending list is already run down by 3054 * the generic handler. Perform the same 3055 * operation on the timed out request list. 3056 */ 3057 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3058 MPI_IOCSTATUS_INVALID_STATE); 3059 3060 /* 3061 * XXX: We need to repost ELS and Target Command Buffers? 3062 */ 3063 3064 /* 3065 * Inform the XPT that a bus reset has occurred. 3066 */ 3067 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3068 } 3069 3070 /* 3071 * Parse additional completion information in the reply 3072 * frame for SCSI I/O requests. 3073 */ 3074 static int 3075 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3076 MSG_DEFAULT_REPLY *reply_frame) 3077 { 3078 union ccb *ccb; 3079 MSG_SCSI_IO_REPLY *scsi_io_reply; 3080 u_int ioc_status; 3081 u_int sstate; 3082 3083 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3084 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3085 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3086 ("MPT SCSI I/O Handler called with incorrect reply type")); 3087 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3088 ("MPT SCSI I/O Handler called with continuation reply")); 3089 3090 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3091 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3092 ioc_status &= MPI_IOCSTATUS_MASK; 3093 sstate = scsi_io_reply->SCSIState; 3094 3095 ccb = req->ccb; 3096 ccb->csio.resid = 3097 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3098 3099 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3100 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3101 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3102 ccb->csio.sense_resid = 3103 ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount); 3104 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3105 min(ccb->csio.sense_len, 3106 le32toh(scsi_io_reply->SenseCount))); 3107 } 3108 3109 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3110 /* 3111 * Tag messages rejected, but non-tagged retry 3112 * was successful. 3113 XXXX 3114 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3115 */ 3116 } 3117 3118 switch(ioc_status) { 3119 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3120 /* 3121 * XXX 3122 * Linux driver indicates that a zero 3123 * transfer length with this error code 3124 * indicates a CRC error. 3125 * 3126 * No need to swap the bytes for checking 3127 * against zero. 3128 */ 3129 if (scsi_io_reply->TransferCount == 0) { 3130 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3131 break; 3132 } 3133 /* FALLTHROUGH */ 3134 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3135 case MPI_IOCSTATUS_SUCCESS: 3136 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3137 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3138 /* 3139 * Status was never returned for this transaction. 3140 */ 3141 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3142 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3143 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3144 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3145 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3146 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3147 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3148 3149 /* XXX Handle SPI-Packet and FCP-2 reponse info. */ 3150 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3151 } else 3152 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3153 break; 3154 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3155 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3156 break; 3157 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3158 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3159 break; 3160 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3161 /* 3162 * Since selection timeouts and "device really not 3163 * there" are grouped into this error code, report 3164 * selection timeout. Selection timeouts are 3165 * typically retried before giving up on the device 3166 * whereas "device not there" errors are considered 3167 * unretryable. 3168 */ 3169 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3170 break; 3171 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3172 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3173 break; 3174 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3175 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3176 break; 3177 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3178 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3179 break; 3180 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3181 ccb->ccb_h.status = CAM_UA_TERMIO; 3182 break; 3183 case MPI_IOCSTATUS_INVALID_STATE: 3184 /* 3185 * The IOC has been reset. Emulate a bus reset. 3186 */ 3187 /* FALLTHROUGH */ 3188 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3189 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3190 break; 3191 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3192 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3193 /* 3194 * Don't clobber any timeout status that has 3195 * already been set for this transaction. We 3196 * want the SCSI layer to be able to differentiate 3197 * between the command we aborted due to timeout 3198 * and any innocent bystanders. 3199 */ 3200 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3201 break; 3202 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3203 break; 3204 3205 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3206 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3207 break; 3208 case MPI_IOCSTATUS_BUSY: 3209 mpt_set_ccb_status(ccb, CAM_BUSY); 3210 break; 3211 case MPI_IOCSTATUS_INVALID_FUNCTION: 3212 case MPI_IOCSTATUS_INVALID_SGL: 3213 case MPI_IOCSTATUS_INTERNAL_ERROR: 3214 case MPI_IOCSTATUS_INVALID_FIELD: 3215 default: 3216 /* XXX 3217 * Some of the above may need to kick 3218 * of a recovery action!!!! 3219 */ 3220 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3221 break; 3222 } 3223 3224 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3225 mpt_freeze_ccb(ccb); 3226 } 3227 3228 return (TRUE); 3229 } 3230 3231 static void 3232 mpt_action(struct cam_sim *sim, union ccb *ccb) 3233 { 3234 struct mpt_softc *mpt; 3235 struct ccb_trans_settings *cts; 3236 target_id_t tgt; 3237 lun_id_t lun; 3238 int raid_passthru; 3239 3240 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3241 3242 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3243 raid_passthru = (sim == mpt->phydisk_sim); 3244 MPT_LOCK_ASSERT(mpt); 3245 3246 tgt = ccb->ccb_h.target_id; 3247 lun = ccb->ccb_h.target_lun; 3248 if (raid_passthru && 3249 ccb->ccb_h.func_code != XPT_PATH_INQ && 3250 ccb->ccb_h.func_code != XPT_RESET_BUS && 3251 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3252 CAMLOCK_2_MPTLOCK(mpt); 3253 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3254 MPTLOCK_2_CAMLOCK(mpt); 3255 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3256 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3257 xpt_done(ccb); 3258 return; 3259 } 3260 MPTLOCK_2_CAMLOCK(mpt); 3261 } 3262 ccb->ccb_h.ccb_mpt_ptr = mpt; 3263 3264 switch (ccb->ccb_h.func_code) { 3265 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3266 /* 3267 * Do a couple of preliminary checks... 3268 */ 3269 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3270 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3271 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3272 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3273 break; 3274 } 3275 } 3276 /* Max supported CDB length is 16 bytes */ 3277 /* XXX Unless we implement the new 32byte message type */ 3278 if (ccb->csio.cdb_len > 3279 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3280 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3281 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3282 break; 3283 } 3284 #ifdef MPT_TEST_MULTIPATH 3285 if (mpt->failure_id == ccb->ccb_h.target_id) { 3286 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3287 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3288 break; 3289 } 3290 #endif 3291 ccb->csio.scsi_status = SCSI_STATUS_OK; 3292 mpt_start(sim, ccb); 3293 return; 3294 3295 case XPT_RESET_BUS: 3296 if (raid_passthru) { 3297 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3298 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3299 break; 3300 } 3301 case XPT_RESET_DEV: 3302 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3303 if (bootverbose) { 3304 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3305 } 3306 } else { 3307 xpt_print(ccb->ccb_h.path, "reset device\n"); 3308 } 3309 CAMLOCK_2_MPTLOCK(mpt); 3310 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3311 MPTLOCK_2_CAMLOCK(mpt); 3312 3313 /* 3314 * mpt_bus_reset is always successful in that it 3315 * will fall back to a hard reset should a bus 3316 * reset attempt fail. 3317 */ 3318 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3319 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3320 break; 3321 3322 case XPT_ABORT: 3323 { 3324 union ccb *accb = ccb->cab.abort_ccb; 3325 CAMLOCK_2_MPTLOCK(mpt); 3326 switch (accb->ccb_h.func_code) { 3327 case XPT_ACCEPT_TARGET_IO: 3328 case XPT_IMMED_NOTIFY: 3329 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3330 break; 3331 case XPT_CONT_TARGET_IO: 3332 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3333 ccb->ccb_h.status = CAM_UA_ABORT; 3334 break; 3335 case XPT_SCSI_IO: 3336 ccb->ccb_h.status = CAM_UA_ABORT; 3337 break; 3338 default: 3339 ccb->ccb_h.status = CAM_REQ_INVALID; 3340 break; 3341 } 3342 MPTLOCK_2_CAMLOCK(mpt); 3343 break; 3344 } 3345 3346 #ifdef CAM_NEW_TRAN_CODE 3347 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3348 #else 3349 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) 3350 #endif 3351 #define DP_DISC_ENABLE 0x1 3352 #define DP_DISC_DISABL 0x2 3353 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3354 3355 #define DP_TQING_ENABLE 0x4 3356 #define DP_TQING_DISABL 0x8 3357 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3358 3359 #define DP_WIDE 0x10 3360 #define DP_NARROW 0x20 3361 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3362 3363 #define DP_SYNC 0x40 3364 3365 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3366 { 3367 #ifdef CAM_NEW_TRAN_CODE 3368 struct ccb_trans_settings_scsi *scsi; 3369 struct ccb_trans_settings_spi *spi; 3370 #endif 3371 uint8_t dval; 3372 u_int period; 3373 u_int offset; 3374 int i, j; 3375 3376 cts = &ccb->cts; 3377 3378 if (mpt->is_fc || mpt->is_sas) { 3379 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3380 break; 3381 } 3382 3383 #ifdef CAM_NEW_TRAN_CODE 3384 scsi = &cts->proto_specific.scsi; 3385 spi = &cts->xport_specific.spi; 3386 3387 /* 3388 * We can be called just to valid transport and proto versions 3389 */ 3390 if (scsi->valid == 0 && spi->valid == 0) { 3391 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3392 break; 3393 } 3394 #endif 3395 3396 /* 3397 * Skip attempting settings on RAID volume disks. 3398 * Other devices on the bus get the normal treatment. 3399 */ 3400 if (mpt->phydisk_sim && raid_passthru == 0 && 3401 mpt_is_raid_volume(mpt, tgt) != 0) { 3402 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3403 "no transfer settings for RAID vols\n"); 3404 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3405 break; 3406 } 3407 3408 i = mpt->mpt_port_page2.PortSettings & 3409 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3410 j = mpt->mpt_port_page2.PortFlags & 3411 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3412 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3413 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3414 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3415 "honoring BIOS transfer negotiations\n"); 3416 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3417 break; 3418 } 3419 3420 dval = 0; 3421 period = 0; 3422 offset = 0; 3423 3424 #ifndef CAM_NEW_TRAN_CODE 3425 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 3426 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? 3427 DP_DISC_ENABLE : DP_DISC_DISABL; 3428 } 3429 3430 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 3431 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? 3432 DP_TQING_ENABLE : DP_TQING_DISABL; 3433 } 3434 3435 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 3436 dval |= cts->bus_width ? DP_WIDE : DP_NARROW; 3437 } 3438 3439 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 3440 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { 3441 dval |= DP_SYNC; 3442 period = cts->sync_period; 3443 offset = cts->sync_offset; 3444 } 3445 #else 3446 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3447 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3448 DP_DISC_ENABLE : DP_DISC_DISABL; 3449 } 3450 3451 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3452 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3453 DP_TQING_ENABLE : DP_TQING_DISABL; 3454 } 3455 3456 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3457 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3458 DP_WIDE : DP_NARROW; 3459 } 3460 3461 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3462 dval |= DP_SYNC; 3463 offset = spi->sync_offset; 3464 } else { 3465 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3466 &mpt->mpt_dev_page1[tgt]; 3467 offset = ptr->RequestedParameters; 3468 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3469 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3470 } 3471 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3472 dval |= DP_SYNC; 3473 period = spi->sync_period; 3474 } else { 3475 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3476 &mpt->mpt_dev_page1[tgt]; 3477 period = ptr->RequestedParameters; 3478 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3479 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3480 } 3481 #endif 3482 CAMLOCK_2_MPTLOCK(mpt); 3483 if (dval & DP_DISC_ENABLE) { 3484 mpt->mpt_disc_enable |= (1 << tgt); 3485 } else if (dval & DP_DISC_DISABL) { 3486 mpt->mpt_disc_enable &= ~(1 << tgt); 3487 } 3488 if (dval & DP_TQING_ENABLE) { 3489 mpt->mpt_tag_enable |= (1 << tgt); 3490 } else if (dval & DP_TQING_DISABL) { 3491 mpt->mpt_tag_enable &= ~(1 << tgt); 3492 } 3493 if (dval & DP_WIDTH) { 3494 mpt_setwidth(mpt, tgt, 1); 3495 } 3496 if (dval & DP_SYNC) { 3497 mpt_setsync(mpt, tgt, period, offset); 3498 } 3499 if (dval == 0) { 3500 MPTLOCK_2_CAMLOCK(mpt); 3501 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3502 break; 3503 } 3504 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3505 "set [%d]: 0x%x period 0x%x offset %d\n", 3506 tgt, dval, period, offset); 3507 if (mpt_update_spi_config(mpt, tgt)) { 3508 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3509 } else { 3510 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3511 } 3512 MPTLOCK_2_CAMLOCK(mpt); 3513 break; 3514 } 3515 case XPT_GET_TRAN_SETTINGS: 3516 { 3517 #ifdef CAM_NEW_TRAN_CODE 3518 struct ccb_trans_settings_scsi *scsi; 3519 cts = &ccb->cts; 3520 cts->protocol = PROTO_SCSI; 3521 if (mpt->is_fc) { 3522 struct ccb_trans_settings_fc *fc = 3523 &cts->xport_specific.fc; 3524 cts->protocol_version = SCSI_REV_SPC; 3525 cts->transport = XPORT_FC; 3526 cts->transport_version = 0; 3527 fc->valid = CTS_FC_VALID_SPEED; 3528 fc->bitrate = 100000; 3529 } else if (mpt->is_sas) { 3530 struct ccb_trans_settings_sas *sas = 3531 &cts->xport_specific.sas; 3532 cts->protocol_version = SCSI_REV_SPC2; 3533 cts->transport = XPORT_SAS; 3534 cts->transport_version = 0; 3535 sas->valid = CTS_SAS_VALID_SPEED; 3536 sas->bitrate = 300000; 3537 } else { 3538 cts->protocol_version = SCSI_REV_2; 3539 cts->transport = XPORT_SPI; 3540 cts->transport_version = 2; 3541 if (mpt_get_spi_settings(mpt, cts) != 0) { 3542 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3543 break; 3544 } 3545 } 3546 scsi = &cts->proto_specific.scsi; 3547 scsi->valid = CTS_SCSI_VALID_TQ; 3548 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3549 #else 3550 cts = &ccb->cts; 3551 if (mpt->is_fc) { 3552 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3553 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3554 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3555 } else if (mpt->is_sas) { 3556 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3557 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3558 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3559 } else if (mpt_get_spi_settings(mpt, cts) != 0) { 3560 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3561 break; 3562 } 3563 #endif 3564 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3565 break; 3566 } 3567 case XPT_CALC_GEOMETRY: 3568 { 3569 struct ccb_calc_geometry *ccg; 3570 3571 ccg = &ccb->ccg; 3572 if (ccg->block_size == 0) { 3573 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3574 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3575 break; 3576 } 3577 mpt_calc_geometry(ccg, /*extended*/1); 3578 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 3579 break; 3580 } 3581 case XPT_PATH_INQ: /* Path routing inquiry */ 3582 { 3583 struct ccb_pathinq *cpi = &ccb->cpi; 3584 3585 cpi->version_num = 1; 3586 cpi->target_sprt = 0; 3587 cpi->hba_eng_cnt = 0; 3588 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3589 /* 3590 * FC cards report MAX_DEVICES of 512, but 3591 * the MSG_SCSI_IO_REQUEST target id field 3592 * is only 8 bits. Until we fix the driver 3593 * to support 'channels' for bus overflow, 3594 * just limit it. 3595 */ 3596 if (cpi->max_target > 255) { 3597 cpi->max_target = 255; 3598 } 3599 3600 /* 3601 * VMware ESX reports > 16 devices and then dies when we probe. 3602 */ 3603 if (mpt->is_spi && cpi->max_target > 15) { 3604 cpi->max_target = 15; 3605 } 3606 if (mpt->is_spi) 3607 cpi->max_lun = 7; 3608 else 3609 cpi->max_lun = MPT_MAX_LUNS; 3610 cpi->initiator_id = mpt->mpt_ini_id; 3611 cpi->bus_id = cam_sim_bus(sim); 3612 3613 /* 3614 * The base speed is the speed of the underlying connection. 3615 */ 3616 #ifdef CAM_NEW_TRAN_CODE 3617 cpi->protocol = PROTO_SCSI; 3618 if (mpt->is_fc) { 3619 cpi->hba_misc = PIM_NOBUSRESET; 3620 cpi->base_transfer_speed = 100000; 3621 cpi->hba_inquiry = PI_TAG_ABLE; 3622 cpi->transport = XPORT_FC; 3623 cpi->transport_version = 0; 3624 cpi->protocol_version = SCSI_REV_SPC; 3625 } else if (mpt->is_sas) { 3626 cpi->hba_misc = PIM_NOBUSRESET; 3627 cpi->base_transfer_speed = 300000; 3628 cpi->hba_inquiry = PI_TAG_ABLE; 3629 cpi->transport = XPORT_SAS; 3630 cpi->transport_version = 0; 3631 cpi->protocol_version = SCSI_REV_SPC2; 3632 } else { 3633 cpi->hba_misc = PIM_SEQSCAN; 3634 cpi->base_transfer_speed = 3300; 3635 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3636 cpi->transport = XPORT_SPI; 3637 cpi->transport_version = 2; 3638 cpi->protocol_version = SCSI_REV_2; 3639 } 3640 #else 3641 if (mpt->is_fc) { 3642 cpi->hba_misc = PIM_NOBUSRESET; 3643 cpi->base_transfer_speed = 100000; 3644 cpi->hba_inquiry = PI_TAG_ABLE; 3645 } else if (mpt->is_sas) { 3646 cpi->hba_misc = PIM_NOBUSRESET; 3647 cpi->base_transfer_speed = 300000; 3648 cpi->hba_inquiry = PI_TAG_ABLE; 3649 } else { 3650 cpi->hba_misc = PIM_SEQSCAN; 3651 cpi->base_transfer_speed = 3300; 3652 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3653 } 3654 #endif 3655 3656 /* 3657 * We give our fake RAID passhtru bus a width that is MaxVolumes 3658 * wide and restrict it to one lun. 3659 */ 3660 if (raid_passthru) { 3661 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3662 cpi->initiator_id = cpi->max_target + 1; 3663 cpi->max_lun = 0; 3664 } 3665 3666 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3667 cpi->hba_misc |= PIM_NOINITIATOR; 3668 } 3669 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3670 cpi->target_sprt = 3671 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3672 } else { 3673 cpi->target_sprt = 0; 3674 } 3675 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3676 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3677 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3678 cpi->unit_number = cam_sim_unit(sim); 3679 cpi->ccb_h.status = CAM_REQ_CMP; 3680 break; 3681 } 3682 case XPT_EN_LUN: /* Enable LUN as a target */ 3683 { 3684 int result; 3685 3686 CAMLOCK_2_MPTLOCK(mpt); 3687 if (ccb->cel.enable) 3688 result = mpt_enable_lun(mpt, 3689 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3690 else 3691 result = mpt_disable_lun(mpt, 3692 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3693 MPTLOCK_2_CAMLOCK(mpt); 3694 if (result == 0) { 3695 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3696 } else { 3697 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3698 } 3699 break; 3700 } 3701 case XPT_NOTIFY_ACK: /* recycle notify ack */ 3702 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 3703 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3704 { 3705 tgt_resource_t *trtp; 3706 lun_id_t lun = ccb->ccb_h.target_lun; 3707 ccb->ccb_h.sim_priv.entries[0].field = 0; 3708 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3709 ccb->ccb_h.flags = 0; 3710 3711 if (lun == CAM_LUN_WILDCARD) { 3712 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3713 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3714 break; 3715 } 3716 trtp = &mpt->trt_wildcard; 3717 } else if (lun >= MPT_MAX_LUNS) { 3718 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3719 break; 3720 } else { 3721 trtp = &mpt->trt[lun]; 3722 } 3723 CAMLOCK_2_MPTLOCK(mpt); 3724 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3725 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3726 "Put FREE ATIO %p lun %d\n", ccb, lun); 3727 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3728 sim_links.stqe); 3729 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 3730 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3731 "Put FREE INOT lun %d\n", lun); 3732 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3733 sim_links.stqe); 3734 } else { 3735 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3736 } 3737 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3738 MPTLOCK_2_CAMLOCK(mpt); 3739 return; 3740 } 3741 case XPT_CONT_TARGET_IO: 3742 CAMLOCK_2_MPTLOCK(mpt); 3743 mpt_target_start_io(mpt, ccb); 3744 MPTLOCK_2_CAMLOCK(mpt); 3745 return; 3746 3747 default: 3748 ccb->ccb_h.status = CAM_REQ_INVALID; 3749 break; 3750 } 3751 xpt_done(ccb); 3752 } 3753 3754 static int 3755 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3756 { 3757 #ifdef CAM_NEW_TRAN_CODE 3758 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3759 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3760 #endif 3761 target_id_t tgt; 3762 uint32_t dval, pval, oval; 3763 int rv; 3764 3765 if (IS_CURRENT_SETTINGS(cts) == 0) { 3766 tgt = cts->ccb_h.target_id; 3767 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3768 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3769 return (-1); 3770 } 3771 } else { 3772 tgt = cts->ccb_h.target_id; 3773 } 3774 3775 /* 3776 * We aren't looking at Port Page 2 BIOS settings here- 3777 * sometimes these have been known to be bogus XXX. 3778 * 3779 * For user settings, we pick the max from port page 0 3780 * 3781 * For current settings we read the current settings out from 3782 * device page 0 for that target. 3783 */ 3784 if (IS_CURRENT_SETTINGS(cts)) { 3785 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3786 dval = 0; 3787 3788 CAMLOCK_2_MPTLOCK(mpt); 3789 tmp = mpt->mpt_dev_page0[tgt]; 3790 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3791 sizeof(tmp), FALSE, 5000); 3792 if (rv) { 3793 MPTLOCK_2_CAMLOCK(mpt); 3794 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3795 return (rv); 3796 } 3797 mpt2host_config_page_scsi_device_0(&tmp); 3798 3799 MPTLOCK_2_CAMLOCK(mpt); 3800 mpt_lprt(mpt, MPT_PRT_DEBUG, 3801 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3802 tmp.NegotiatedParameters, tmp.Information); 3803 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3804 DP_WIDE : DP_NARROW; 3805 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3806 DP_DISC_ENABLE : DP_DISC_DISABL; 3807 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3808 DP_TQING_ENABLE : DP_TQING_DISABL; 3809 oval = tmp.NegotiatedParameters; 3810 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3811 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3812 pval = tmp.NegotiatedParameters; 3813 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3814 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3815 mpt->mpt_dev_page0[tgt] = tmp; 3816 } else { 3817 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3818 oval = mpt->mpt_port_page0.Capabilities; 3819 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3820 pval = mpt->mpt_port_page0.Capabilities; 3821 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3822 } 3823 3824 #ifndef CAM_NEW_TRAN_CODE 3825 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 3826 cts->valid = 0; 3827 cts->sync_period = pval; 3828 cts->sync_offset = oval; 3829 cts->valid |= CCB_TRANS_SYNC_RATE_VALID; 3830 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; 3831 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; 3832 if (dval & DP_WIDE) { 3833 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3834 } else { 3835 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3836 } 3837 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3838 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3839 if (dval & DP_DISC_ENABLE) { 3840 cts->flags |= CCB_TRANS_DISC_ENB; 3841 } 3842 if (dval & DP_TQING_ENABLE) { 3843 cts->flags |= CCB_TRANS_TAG_ENB; 3844 } 3845 } 3846 #else 3847 spi->valid = 0; 3848 scsi->valid = 0; 3849 spi->flags = 0; 3850 scsi->flags = 0; 3851 spi->sync_offset = oval; 3852 spi->sync_period = pval; 3853 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3854 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3855 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3856 if (dval & DP_WIDE) { 3857 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3858 } else { 3859 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3860 } 3861 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3862 scsi->valid = CTS_SCSI_VALID_TQ; 3863 if (dval & DP_TQING_ENABLE) { 3864 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3865 } 3866 spi->valid |= CTS_SPI_VALID_DISC; 3867 if (dval & DP_DISC_ENABLE) { 3868 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3869 } 3870 } 3871 #endif 3872 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3873 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3874 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); 3875 return (0); 3876 } 3877 3878 static void 3879 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3880 { 3881 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3882 3883 ptr = &mpt->mpt_dev_page1[tgt]; 3884 if (onoff) { 3885 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3886 } else { 3887 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3888 } 3889 } 3890 3891 static void 3892 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3893 { 3894 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3895 3896 ptr = &mpt->mpt_dev_page1[tgt]; 3897 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3898 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3899 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3900 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3901 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3902 if (period == 0) { 3903 return; 3904 } 3905 ptr->RequestedParameters |= 3906 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3907 ptr->RequestedParameters |= 3908 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3909 if (period < 0xa) { 3910 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3911 } 3912 if (period < 0x9) { 3913 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3914 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3915 } 3916 } 3917 3918 static int 3919 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3920 { 3921 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3922 int rv; 3923 3924 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3925 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3926 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3927 tmp = mpt->mpt_dev_page1[tgt]; 3928 host2mpt_config_page_scsi_device_1(&tmp); 3929 rv = mpt_write_cur_cfg_page(mpt, tgt, 3930 &tmp.Header, sizeof(tmp), FALSE, 5000); 3931 if (rv) { 3932 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3933 return (-1); 3934 } 3935 return (0); 3936 } 3937 3938 static void 3939 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) 3940 { 3941 #if __FreeBSD_version >= 500000 3942 cam_calc_geometry(ccg, extended); 3943 #else 3944 uint32_t size_mb; 3945 uint32_t secs_per_cylinder; 3946 3947 if (ccg->block_size == 0) { 3948 ccg->ccb_h.status = CAM_REQ_INVALID; 3949 return; 3950 } 3951 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); 3952 if (size_mb > 1024 && extended) { 3953 ccg->heads = 255; 3954 ccg->secs_per_track = 63; 3955 } else { 3956 ccg->heads = 64; 3957 ccg->secs_per_track = 32; 3958 } 3959 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3960 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3961 ccg->ccb_h.status = CAM_REQ_CMP; 3962 #endif 3963 } 3964 3965 /****************************** Timeout Recovery ******************************/ 3966 static int 3967 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3968 { 3969 int error; 3970 3971 error = mpt_kthread_create(mpt_recovery_thread, mpt, 3972 &mpt->recovery_thread, /*flags*/0, 3973 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3974 return (error); 3975 } 3976 3977 static void 3978 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3979 { 3980 if (mpt->recovery_thread == NULL) { 3981 return; 3982 } 3983 mpt->shutdwn_recovery = 1; 3984 wakeup(mpt); 3985 /* 3986 * Sleep on a slightly different location 3987 * for this interlock just for added safety. 3988 */ 3989 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 3990 } 3991 3992 static void 3993 mpt_recovery_thread(void *arg) 3994 { 3995 struct mpt_softc *mpt; 3996 3997 mpt = (struct mpt_softc *)arg; 3998 MPT_LOCK(mpt); 3999 for (;;) { 4000 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4001 if (mpt->shutdwn_recovery == 0) { 4002 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 4003 } 4004 } 4005 if (mpt->shutdwn_recovery != 0) { 4006 break; 4007 } 4008 mpt_recover_commands(mpt); 4009 } 4010 mpt->recovery_thread = NULL; 4011 wakeup(&mpt->recovery_thread); 4012 MPT_UNLOCK(mpt); 4013 mpt_kthread_exit(0); 4014 } 4015 4016 static int 4017 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 4018 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 4019 { 4020 MSG_SCSI_TASK_MGMT *tmf_req; 4021 int error; 4022 4023 /* 4024 * Wait for any current TMF request to complete. 4025 * We're only allowed to issue one TMF at a time. 4026 */ 4027 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 4028 sleep_ok, MPT_TMF_MAX_TIMEOUT); 4029 if (error != 0) { 4030 mpt_reset(mpt, TRUE); 4031 return (ETIMEDOUT); 4032 } 4033 4034 mpt_assign_serno(mpt, mpt->tmf_req); 4035 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 4036 4037 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 4038 memset(tmf_req, 0, sizeof(*tmf_req)); 4039 tmf_req->TargetID = target; 4040 tmf_req->Bus = channel; 4041 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 4042 tmf_req->TaskType = type; 4043 tmf_req->MsgFlags = flags; 4044 tmf_req->MsgContext = 4045 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 4046 if (lun > MPT_MAX_LUNS) { 4047 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4048 tmf_req->LUN[1] = lun & 0xff; 4049 } else { 4050 tmf_req->LUN[1] = lun; 4051 } 4052 tmf_req->TaskMsgContext = abort_ctx; 4053 4054 mpt_lprt(mpt, MPT_PRT_DEBUG, 4055 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 4056 mpt->tmf_req->serno, tmf_req->MsgContext); 4057 if (mpt->verbose > MPT_PRT_DEBUG) { 4058 mpt_print_request(tmf_req); 4059 } 4060 4061 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 4062 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 4063 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 4064 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 4065 if (error != MPT_OK) { 4066 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 4067 mpt->tmf_req->state = REQ_STATE_FREE; 4068 mpt_reset(mpt, TRUE); 4069 } 4070 return (error); 4071 } 4072 4073 /* 4074 * When a command times out, it is placed on the requeust_timeout_list 4075 * and we wake our recovery thread. The MPT-Fusion architecture supports 4076 * only a single TMF operation at a time, so we serially abort/bdr, etc, 4077 * the timedout transactions. The next TMF is issued either by the 4078 * completion handler of the current TMF waking our recovery thread, 4079 * or the TMF timeout handler causing a hard reset sequence. 4080 */ 4081 static void 4082 mpt_recover_commands(struct mpt_softc *mpt) 4083 { 4084 request_t *req; 4085 union ccb *ccb; 4086 int error; 4087 4088 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4089 /* 4090 * No work to do- leave. 4091 */ 4092 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 4093 return; 4094 } 4095 4096 /* 4097 * Flush any commands whose completion coincides with their timeout. 4098 */ 4099 mpt_intr(mpt); 4100 4101 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4102 /* 4103 * The timedout commands have already 4104 * completed. This typically means 4105 * that either the timeout value was on 4106 * the hairy edge of what the device 4107 * requires or - more likely - interrupts 4108 * are not happening. 4109 */ 4110 mpt_prt(mpt, "Timedout requests already complete. " 4111 "Interrupts may not be functioning.\n"); 4112 mpt_enable_ints(mpt); 4113 return; 4114 } 4115 4116 /* 4117 * We have no visibility into the current state of the 4118 * controller, so attempt to abort the commands in the 4119 * order they timed-out. For initiator commands, we 4120 * depend on the reply handler pulling requests off 4121 * the timeout list. 4122 */ 4123 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4124 uint16_t status; 4125 uint8_t response; 4126 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4127 4128 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4129 req, req->serno, hdrp->Function); 4130 ccb = req->ccb; 4131 if (ccb == NULL) { 4132 mpt_prt(mpt, "null ccb in timed out request. " 4133 "Resetting Controller.\n"); 4134 mpt_reset(mpt, TRUE); 4135 continue; 4136 } 4137 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4138 4139 /* 4140 * Check to see if this is not an initiator command and 4141 * deal with it differently if it is. 4142 */ 4143 switch (hdrp->Function) { 4144 case MPI_FUNCTION_SCSI_IO_REQUEST: 4145 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4146 break; 4147 default: 4148 /* 4149 * XXX: FIX ME: need to abort target assists... 4150 */ 4151 mpt_prt(mpt, "just putting it back on the pend q\n"); 4152 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4153 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4154 links); 4155 continue; 4156 } 4157 4158 error = mpt_scsi_send_tmf(mpt, 4159 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4160 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4161 htole32(req->index | scsi_io_handler_id), TRUE); 4162 4163 if (error != 0) { 4164 /* 4165 * mpt_scsi_send_tmf hard resets on failure, so no 4166 * need to do so here. Our queue should be emptied 4167 * by the hard reset. 4168 */ 4169 continue; 4170 } 4171 4172 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4173 REQ_STATE_DONE, TRUE, 500); 4174 4175 status = le16toh(mpt->tmf_req->IOCStatus); 4176 response = mpt->tmf_req->ResponseCode; 4177 mpt->tmf_req->state = REQ_STATE_FREE; 4178 4179 if (error != 0) { 4180 /* 4181 * If we've errored out,, reset the controller. 4182 */ 4183 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4184 "Resetting controller\n"); 4185 mpt_reset(mpt, TRUE); 4186 continue; 4187 } 4188 4189 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4190 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4191 "Resetting controller.\n", status); 4192 mpt_reset(mpt, TRUE); 4193 continue; 4194 } 4195 4196 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4197 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4198 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4199 "Resetting controller.\n", response); 4200 mpt_reset(mpt, TRUE); 4201 continue; 4202 } 4203 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4204 } 4205 } 4206 4207 /************************ Target Mode Support ****************************/ 4208 static void 4209 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4210 { 4211 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4212 PTR_SGE_TRANSACTION32 tep; 4213 PTR_SGE_SIMPLE32 se; 4214 bus_addr_t paddr; 4215 uint32_t fl; 4216 4217 paddr = req->req_pbuf; 4218 paddr += MPT_RQSL(mpt); 4219 4220 fc = req->req_vbuf; 4221 memset(fc, 0, MPT_REQUEST_AREA); 4222 fc->BufferCount = 1; 4223 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4224 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4225 4226 /* 4227 * Okay, set up ELS buffer pointers. ELS buffer pointers 4228 * consist of a TE SGL element (with details length of zero) 4229 * followe by a SIMPLE SGL element which holds the address 4230 * of the buffer. 4231 */ 4232 4233 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4234 4235 tep->ContextSize = 4; 4236 tep->Flags = 0; 4237 tep->TransactionContext[0] = htole32(ioindex); 4238 4239 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4240 fl = 4241 MPI_SGE_FLAGS_HOST_TO_IOC | 4242 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4243 MPI_SGE_FLAGS_LAST_ELEMENT | 4244 MPI_SGE_FLAGS_END_OF_LIST | 4245 MPI_SGE_FLAGS_END_OF_BUFFER; 4246 fl <<= MPI_SGE_FLAGS_SHIFT; 4247 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4248 se->FlagsLength = htole32(fl); 4249 se->Address = htole32((uint32_t) paddr); 4250 mpt_lprt(mpt, MPT_PRT_DEBUG, 4251 "add ELS index %d ioindex %d for %p:%u\n", 4252 req->index, ioindex, req, req->serno); 4253 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4254 ("mpt_fc_post_els: request not locked")); 4255 mpt_send_cmd(mpt, req); 4256 } 4257 4258 static void 4259 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4260 { 4261 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4262 PTR_CMD_BUFFER_DESCRIPTOR cb; 4263 bus_addr_t paddr; 4264 4265 paddr = req->req_pbuf; 4266 paddr += MPT_RQSL(mpt); 4267 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4268 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4269 4270 fc = req->req_vbuf; 4271 fc->BufferCount = 1; 4272 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4273 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4274 4275 cb = &fc->Buffer[0]; 4276 cb->IoIndex = htole16(ioindex); 4277 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4278 4279 mpt_check_doorbell(mpt); 4280 mpt_send_cmd(mpt, req); 4281 } 4282 4283 static int 4284 mpt_add_els_buffers(struct mpt_softc *mpt) 4285 { 4286 int i; 4287 4288 if (mpt->is_fc == 0) { 4289 return (TRUE); 4290 } 4291 4292 if (mpt->els_cmds_allocated) { 4293 return (TRUE); 4294 } 4295 4296 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), 4297 M_DEVBUF, M_NOWAIT | M_ZERO); 4298 4299 if (mpt->els_cmd_ptrs == NULL) { 4300 return (FALSE); 4301 } 4302 4303 /* 4304 * Feed the chip some ELS buffer resources 4305 */ 4306 for (i = 0; i < MPT_MAX_ELS; i++) { 4307 request_t *req = mpt_get_request(mpt, FALSE); 4308 if (req == NULL) { 4309 break; 4310 } 4311 req->state |= REQ_STATE_LOCKED; 4312 mpt->els_cmd_ptrs[i] = req; 4313 mpt_fc_post_els(mpt, req, i); 4314 } 4315 4316 if (i == 0) { 4317 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4318 free(mpt->els_cmd_ptrs, M_DEVBUF); 4319 mpt->els_cmd_ptrs = NULL; 4320 return (FALSE); 4321 } 4322 if (i != MPT_MAX_ELS) { 4323 mpt_lprt(mpt, MPT_PRT_INFO, 4324 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4325 } 4326 mpt->els_cmds_allocated = i; 4327 return(TRUE); 4328 } 4329 4330 static int 4331 mpt_add_target_commands(struct mpt_softc *mpt) 4332 { 4333 int i, max; 4334 4335 if (mpt->tgt_cmd_ptrs) { 4336 return (TRUE); 4337 } 4338 4339 max = MPT_MAX_REQUESTS(mpt) >> 1; 4340 if (max > mpt->mpt_max_tgtcmds) { 4341 max = mpt->mpt_max_tgtcmds; 4342 } 4343 mpt->tgt_cmd_ptrs = 4344 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4345 if (mpt->tgt_cmd_ptrs == NULL) { 4346 mpt_prt(mpt, 4347 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4348 return (FALSE); 4349 } 4350 4351 for (i = 0; i < max; i++) { 4352 request_t *req; 4353 4354 req = mpt_get_request(mpt, FALSE); 4355 if (req == NULL) { 4356 break; 4357 } 4358 req->state |= REQ_STATE_LOCKED; 4359 mpt->tgt_cmd_ptrs[i] = req; 4360 mpt_post_target_command(mpt, req, i); 4361 } 4362 4363 4364 if (i == 0) { 4365 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4366 free(mpt->tgt_cmd_ptrs, M_DEVBUF); 4367 mpt->tgt_cmd_ptrs = NULL; 4368 return (FALSE); 4369 } 4370 4371 mpt->tgt_cmds_allocated = i; 4372 4373 if (i < max) { 4374 mpt_lprt(mpt, MPT_PRT_INFO, 4375 "added %d of %d target bufs\n", i, max); 4376 } 4377 return (i); 4378 } 4379 4380 static int 4381 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4382 { 4383 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4384 mpt->twildcard = 1; 4385 } else if (lun >= MPT_MAX_LUNS) { 4386 return (EINVAL); 4387 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4388 return (EINVAL); 4389 } 4390 if (mpt->tenabled == 0) { 4391 if (mpt->is_fc) { 4392 (void) mpt_fc_reset_link(mpt, 0); 4393 } 4394 mpt->tenabled = 1; 4395 } 4396 if (lun == CAM_LUN_WILDCARD) { 4397 mpt->trt_wildcard.enabled = 1; 4398 } else { 4399 mpt->trt[lun].enabled = 1; 4400 } 4401 return (0); 4402 } 4403 4404 static int 4405 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4406 { 4407 int i; 4408 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4409 mpt->twildcard = 0; 4410 } else if (lun >= MPT_MAX_LUNS) { 4411 return (EINVAL); 4412 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4413 return (EINVAL); 4414 } 4415 if (lun == CAM_LUN_WILDCARD) { 4416 mpt->trt_wildcard.enabled = 0; 4417 } else { 4418 mpt->trt[lun].enabled = 0; 4419 } 4420 for (i = 0; i < MPT_MAX_LUNS; i++) { 4421 if (mpt->trt[lun].enabled) { 4422 break; 4423 } 4424 } 4425 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4426 if (mpt->is_fc) { 4427 (void) mpt_fc_reset_link(mpt, 0); 4428 } 4429 mpt->tenabled = 0; 4430 } 4431 return (0); 4432 } 4433 4434 /* 4435 * Called with MPT lock held 4436 */ 4437 static void 4438 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4439 { 4440 struct ccb_scsiio *csio = &ccb->csio; 4441 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4442 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4443 4444 switch (tgt->state) { 4445 case TGT_STATE_IN_CAM: 4446 break; 4447 case TGT_STATE_MOVING_DATA: 4448 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4449 xpt_freeze_simq(mpt->sim, 1); 4450 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4451 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4452 MPTLOCK_2_CAMLOCK(mpt); 4453 xpt_done(ccb); 4454 CAMLOCK_2_MPTLOCK(mpt); 4455 return; 4456 default: 4457 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4458 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4459 mpt_tgt_dump_req_state(mpt, cmd_req); 4460 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4461 MPTLOCK_2_CAMLOCK(mpt); 4462 xpt_done(ccb); 4463 CAMLOCK_2_MPTLOCK(mpt); 4464 return; 4465 } 4466 4467 if (csio->dxfer_len) { 4468 bus_dmamap_callback_t *cb; 4469 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4470 request_t *req; 4471 4472 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4473 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len)); 4474 4475 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4476 if (mpt->outofbeer == 0) { 4477 mpt->outofbeer = 1; 4478 xpt_freeze_simq(mpt->sim, 1); 4479 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4480 } 4481 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4482 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4483 MPTLOCK_2_CAMLOCK(mpt); 4484 xpt_done(ccb); 4485 CAMLOCK_2_MPTLOCK(mpt); 4486 return; 4487 } 4488 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4489 if (sizeof (bus_addr_t) > 4) { 4490 cb = mpt_execute_req_a64; 4491 } else { 4492 cb = mpt_execute_req; 4493 } 4494 4495 req->ccb = ccb; 4496 ccb->ccb_h.ccb_req_ptr = req; 4497 4498 /* 4499 * Record the currently active ccb and the 4500 * request for it in our target state area. 4501 */ 4502 tgt->ccb = ccb; 4503 tgt->req = req; 4504 4505 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4506 ta = req->req_vbuf; 4507 4508 if (mpt->is_sas) { 4509 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4510 cmd_req->req_vbuf; 4511 ta->QueueTag = ssp->InitiatorTag; 4512 } else if (mpt->is_spi) { 4513 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4514 cmd_req->req_vbuf; 4515 ta->QueueTag = sp->Tag; 4516 } 4517 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4518 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4519 ta->ReplyWord = htole32(tgt->reply_desc); 4520 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { 4521 ta->LUN[0] = 4522 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4523 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4524 } else { 4525 ta->LUN[1] = csio->ccb_h.target_lun; 4526 } 4527 4528 ta->RelativeOffset = tgt->bytes_xfered; 4529 ta->DataLength = ccb->csio.dxfer_len; 4530 if (ta->DataLength > tgt->resid) { 4531 ta->DataLength = tgt->resid; 4532 } 4533 4534 /* 4535 * XXX Should be done after data transfer completes? 4536 */ 4537 tgt->resid -= csio->dxfer_len; 4538 tgt->bytes_xfered += csio->dxfer_len; 4539 4540 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4541 ta->TargetAssistFlags |= 4542 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4543 } 4544 4545 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4546 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4547 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4548 ta->TargetAssistFlags |= 4549 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4550 } 4551 #endif 4552 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4553 4554 mpt_lprt(mpt, MPT_PRT_DEBUG, 4555 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4556 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4557 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4558 4559 MPTLOCK_2_CAMLOCK(mpt); 4560 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4561 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4562 int error; 4563 int s = splsoftvm(); 4564 error = bus_dmamap_load(mpt->buffer_dmat, 4565 req->dmap, csio->data_ptr, csio->dxfer_len, 4566 cb, req, 0); 4567 splx(s); 4568 if (error == EINPROGRESS) { 4569 xpt_freeze_simq(mpt->sim, 1); 4570 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4571 } 4572 } else { 4573 /* 4574 * We have been given a pointer to single 4575 * physical buffer. 4576 */ 4577 struct bus_dma_segment seg; 4578 seg.ds_addr = (bus_addr_t) 4579 (vm_offset_t)csio->data_ptr; 4580 seg.ds_len = csio->dxfer_len; 4581 (*cb)(req, &seg, 1, 0); 4582 } 4583 } else { 4584 /* 4585 * We have been given a list of addresses. 4586 * This case could be easily supported but they are not 4587 * currently generated by the CAM subsystem so there 4588 * is no point in wasting the time right now. 4589 */ 4590 struct bus_dma_segment *sgs; 4591 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4592 (*cb)(req, NULL, 0, EFAULT); 4593 } else { 4594 /* Just use the segments provided */ 4595 sgs = (struct bus_dma_segment *)csio->data_ptr; 4596 (*cb)(req, sgs, csio->sglist_cnt, 0); 4597 } 4598 } 4599 CAMLOCK_2_MPTLOCK(mpt); 4600 } else { 4601 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4602 4603 /* 4604 * XXX: I don't know why this seems to happen, but 4605 * XXX: completing the CCB seems to make things happy. 4606 * XXX: This seems to happen if the initiator requests 4607 * XXX: enough data that we have to do multiple CTIOs. 4608 */ 4609 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4610 mpt_lprt(mpt, MPT_PRT_DEBUG, 4611 "Meaningless STATUS CCB (%p): flags %x status %x " 4612 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4613 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4614 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4615 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4616 MPTLOCK_2_CAMLOCK(mpt); 4617 xpt_done(ccb); 4618 CAMLOCK_2_MPTLOCK(mpt); 4619 return; 4620 } 4621 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4622 sp = sense; 4623 memcpy(sp, &csio->sense_data, 4624 min(csio->sense_len, MPT_SENSE_SIZE)); 4625 } 4626 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4627 } 4628 } 4629 4630 static void 4631 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4632 uint32_t lun, int send, uint8_t *data, size_t length) 4633 { 4634 mpt_tgt_state_t *tgt; 4635 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4636 SGE_SIMPLE32 *se; 4637 uint32_t flags; 4638 uint8_t *dptr; 4639 bus_addr_t pptr; 4640 request_t *req; 4641 4642 /* 4643 * We enter with resid set to the data load for the command. 4644 */ 4645 tgt = MPT_TGT_STATE(mpt, cmd_req); 4646 if (length == 0 || tgt->resid == 0) { 4647 tgt->resid = 0; 4648 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4649 return; 4650 } 4651 4652 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4653 mpt_prt(mpt, "out of resources- dropping local response\n"); 4654 return; 4655 } 4656 tgt->is_local = 1; 4657 4658 4659 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4660 ta = req->req_vbuf; 4661 4662 if (mpt->is_sas) { 4663 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4664 ta->QueueTag = ssp->InitiatorTag; 4665 } else if (mpt->is_spi) { 4666 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4667 ta->QueueTag = sp->Tag; 4668 } 4669 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4670 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4671 ta->ReplyWord = htole32(tgt->reply_desc); 4672 if (lun > MPT_MAX_LUNS) { 4673 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4674 ta->LUN[1] = lun & 0xff; 4675 } else { 4676 ta->LUN[1] = lun; 4677 } 4678 ta->RelativeOffset = 0; 4679 ta->DataLength = length; 4680 4681 dptr = req->req_vbuf; 4682 dptr += MPT_RQSL(mpt); 4683 pptr = req->req_pbuf; 4684 pptr += MPT_RQSL(mpt); 4685 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4686 4687 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4688 memset(se, 0,sizeof (*se)); 4689 4690 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4691 if (send) { 4692 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4693 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4694 } 4695 se->Address = pptr; 4696 MPI_pSGE_SET_LENGTH(se, length); 4697 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4698 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4699 MPI_pSGE_SET_FLAGS(se, flags); 4700 4701 tgt->ccb = NULL; 4702 tgt->req = req; 4703 tgt->resid -= length; 4704 tgt->bytes_xfered = length; 4705 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4706 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4707 #else 4708 tgt->state = TGT_STATE_MOVING_DATA; 4709 #endif 4710 mpt_send_cmd(mpt, req); 4711 } 4712 4713 /* 4714 * Abort queued up CCBs 4715 */ 4716 static cam_status 4717 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4718 { 4719 struct mpt_hdr_stailq *lp; 4720 struct ccb_hdr *srch; 4721 int found = 0; 4722 union ccb *accb = ccb->cab.abort_ccb; 4723 tgt_resource_t *trtp; 4724 4725 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4726 4727 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4728 trtp = &mpt->trt_wildcard; 4729 } else { 4730 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4731 } 4732 4733 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4734 lp = &trtp->atios; 4735 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 4736 lp = &trtp->inots; 4737 } else { 4738 return (CAM_REQ_INVALID); 4739 } 4740 4741 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4742 if (srch == &accb->ccb_h) { 4743 found = 1; 4744 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4745 break; 4746 } 4747 } 4748 if (found) { 4749 accb->ccb_h.status = CAM_REQ_ABORTED; 4750 xpt_done(accb); 4751 return (CAM_REQ_CMP); 4752 } 4753 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4754 return (CAM_PATH_INVALID); 4755 } 4756 4757 /* 4758 * Ask the MPT to abort the current target command 4759 */ 4760 static int 4761 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4762 { 4763 int error; 4764 request_t *req; 4765 PTR_MSG_TARGET_MODE_ABORT abtp; 4766 4767 req = mpt_get_request(mpt, FALSE); 4768 if (req == NULL) { 4769 return (-1); 4770 } 4771 abtp = req->req_vbuf; 4772 memset(abtp, 0, sizeof (*abtp)); 4773 4774 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4775 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4776 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4777 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4778 error = 0; 4779 if (mpt->is_fc || mpt->is_sas) { 4780 mpt_send_cmd(mpt, req); 4781 } else { 4782 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4783 } 4784 return (error); 4785 } 4786 4787 /* 4788 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4789 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4790 * FC929 to set bogus FC_RSP fields (nonzero residuals 4791 * but w/o RESID fields set). This causes QLogic initiators 4792 * to think maybe that a frame was lost. 4793 * 4794 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4795 * we use allocated requests to do TARGET_ASSIST and we 4796 * need to know when to release them. 4797 */ 4798 4799 static void 4800 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4801 uint8_t status, uint8_t const *sense_data) 4802 { 4803 uint8_t *cmd_vbuf; 4804 mpt_tgt_state_t *tgt; 4805 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4806 request_t *req; 4807 bus_addr_t paddr; 4808 int resplen = 0; 4809 uint32_t fl; 4810 4811 cmd_vbuf = cmd_req->req_vbuf; 4812 cmd_vbuf += MPT_RQSL(mpt); 4813 tgt = MPT_TGT_STATE(mpt, cmd_req); 4814 4815 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4816 if (mpt->outofbeer == 0) { 4817 mpt->outofbeer = 1; 4818 xpt_freeze_simq(mpt->sim, 1); 4819 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4820 } 4821 if (ccb) { 4822 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4823 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4824 MPTLOCK_2_CAMLOCK(mpt); 4825 xpt_done(ccb); 4826 CAMLOCK_2_MPTLOCK(mpt); 4827 } else { 4828 mpt_prt(mpt, 4829 "could not allocate status request- dropping\n"); 4830 } 4831 return; 4832 } 4833 req->ccb = ccb; 4834 if (ccb) { 4835 ccb->ccb_h.ccb_mpt_ptr = mpt; 4836 ccb->ccb_h.ccb_req_ptr = req; 4837 } 4838 4839 /* 4840 * Record the currently active ccb, if any, and the 4841 * request for it in our target state area. 4842 */ 4843 tgt->ccb = ccb; 4844 tgt->req = req; 4845 tgt->state = TGT_STATE_SENDING_STATUS; 4846 4847 tp = req->req_vbuf; 4848 paddr = req->req_pbuf; 4849 paddr += MPT_RQSL(mpt); 4850 4851 memset(tp, 0, sizeof (*tp)); 4852 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4853 if (mpt->is_fc) { 4854 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4855 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4856 uint8_t *sts_vbuf; 4857 uint32_t *rsp; 4858 4859 sts_vbuf = req->req_vbuf; 4860 sts_vbuf += MPT_RQSL(mpt); 4861 rsp = (uint32_t *) sts_vbuf; 4862 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4863 4864 /* 4865 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4866 * It has to be big-endian in memory and is organized 4867 * in 32 bit words, which are much easier to deal with 4868 * as words which are swizzled as needed. 4869 * 4870 * All we're filling here is the FC_RSP payload. 4871 * We may just have the chip synthesize it if 4872 * we have no residual and an OK status. 4873 * 4874 */ 4875 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4876 4877 rsp[2] = status; 4878 if (tgt->resid) { 4879 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4880 rsp[3] = htobe32(tgt->resid); 4881 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4882 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4883 #endif 4884 } 4885 if (status == SCSI_STATUS_CHECK_COND) { 4886 int i; 4887 4888 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4889 rsp[4] = htobe32(MPT_SENSE_SIZE); 4890 if (sense_data) { 4891 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4892 } else { 4893 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4894 "TION but no sense data?\n"); 4895 memset(&rsp, 0, MPT_SENSE_SIZE); 4896 } 4897 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4898 rsp[i] = htobe32(rsp[i]); 4899 } 4900 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4901 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4902 #endif 4903 } 4904 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4905 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4906 #endif 4907 rsp[2] = htobe32(rsp[2]); 4908 } else if (mpt->is_sas) { 4909 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4910 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4911 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4912 } else { 4913 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4914 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4915 tp->StatusCode = status; 4916 tp->QueueTag = htole16(sp->Tag); 4917 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4918 } 4919 4920 tp->ReplyWord = htole32(tgt->reply_desc); 4921 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4922 4923 #ifdef WE_CAN_USE_AUTO_REPOST 4924 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4925 #endif 4926 if (status == SCSI_STATUS_OK && resplen == 0) { 4927 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4928 } else { 4929 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4930 fl = 4931 MPI_SGE_FLAGS_HOST_TO_IOC | 4932 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4933 MPI_SGE_FLAGS_LAST_ELEMENT | 4934 MPI_SGE_FLAGS_END_OF_LIST | 4935 MPI_SGE_FLAGS_END_OF_BUFFER; 4936 fl <<= MPI_SGE_FLAGS_SHIFT; 4937 fl |= resplen; 4938 tp->StatusDataSGE.FlagsLength = htole32(fl); 4939 } 4940 4941 mpt_lprt(mpt, MPT_PRT_DEBUG, 4942 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 4943 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 4944 req->serno, tgt->resid); 4945 if (ccb) { 4946 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4947 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); 4948 } 4949 mpt_send_cmd(mpt, req); 4950 } 4951 4952 static void 4953 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4954 tgt_resource_t *trtp, int init_id) 4955 { 4956 struct ccb_immed_notify *inot; 4957 mpt_tgt_state_t *tgt; 4958 4959 tgt = MPT_TGT_STATE(mpt, req); 4960 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); 4961 if (inot == NULL) { 4962 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4963 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 4964 return; 4965 } 4966 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4967 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4968 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 4969 4970 memset(&inot->sense_data, 0, sizeof (inot->sense_data)); 4971 inot->sense_len = 0; 4972 memset(inot->message_args, 0, sizeof (inot->message_args)); 4973 inot->initiator_id = init_id; /* XXX */ 4974 4975 /* 4976 * This is a somewhat grotesque attempt to map from task management 4977 * to old style SCSI messages. God help us all. 4978 */ 4979 switch (fc) { 4980 case MPT_ABORT_TASK_SET: 4981 inot->message_args[0] = MSG_ABORT_TAG; 4982 break; 4983 case MPT_CLEAR_TASK_SET: 4984 inot->message_args[0] = MSG_CLEAR_TASK_SET; 4985 break; 4986 case MPT_TARGET_RESET: 4987 inot->message_args[0] = MSG_TARGET_RESET; 4988 break; 4989 case MPT_CLEAR_ACA: 4990 inot->message_args[0] = MSG_CLEAR_ACA; 4991 break; 4992 case MPT_TERMINATE_TASK: 4993 inot->message_args[0] = MSG_ABORT_TAG; 4994 break; 4995 default: 4996 inot->message_args[0] = MSG_NOOP; 4997 break; 4998 } 4999 tgt->ccb = (union ccb *) inot; 5000 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 5001 MPTLOCK_2_CAMLOCK(mpt); 5002 xpt_done((union ccb *)inot); 5003 CAMLOCK_2_MPTLOCK(mpt); 5004 } 5005 5006 static void 5007 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 5008 { 5009 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 5010 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 5011 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 5012 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 5013 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 5014 '0', '0', '0', '1' 5015 }; 5016 struct ccb_accept_tio *atiop; 5017 lun_id_t lun; 5018 int tag_action = 0; 5019 mpt_tgt_state_t *tgt; 5020 tgt_resource_t *trtp = NULL; 5021 U8 *lunptr; 5022 U8 *vbuf; 5023 U16 itag; 5024 U16 ioindex; 5025 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 5026 uint8_t *cdbp; 5027 5028 /* 5029 * First, DMA sync the received command- 5030 * which is in the *request* * phys area. 5031 * 5032 * XXX: We could optimize this for a range 5033 */ 5034 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 5035 BUS_DMASYNC_POSTREAD); 5036 5037 /* 5038 * Stash info for the current command where we can get at it later. 5039 */ 5040 vbuf = req->req_vbuf; 5041 vbuf += MPT_RQSL(mpt); 5042 5043 /* 5044 * Get our state pointer set up. 5045 */ 5046 tgt = MPT_TGT_STATE(mpt, req); 5047 if (tgt->state != TGT_STATE_LOADED) { 5048 mpt_tgt_dump_req_state(mpt, req); 5049 panic("bad target state in mpt_scsi_tgt_atio"); 5050 } 5051 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 5052 tgt->state = TGT_STATE_IN_CAM; 5053 tgt->reply_desc = reply_desc; 5054 ioindex = GET_IO_INDEX(reply_desc); 5055 if (mpt->verbose >= MPT_PRT_DEBUG) { 5056 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 5057 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 5058 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 5059 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 5060 } 5061 if (mpt->is_fc) { 5062 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 5063 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 5064 if (fc->FcpCntl[2]) { 5065 /* 5066 * Task Management Request 5067 */ 5068 switch (fc->FcpCntl[2]) { 5069 case 0x2: 5070 fct = MPT_ABORT_TASK_SET; 5071 break; 5072 case 0x4: 5073 fct = MPT_CLEAR_TASK_SET; 5074 break; 5075 case 0x20: 5076 fct = MPT_TARGET_RESET; 5077 break; 5078 case 0x40: 5079 fct = MPT_CLEAR_ACA; 5080 break; 5081 case 0x80: 5082 fct = MPT_TERMINATE_TASK; 5083 break; 5084 default: 5085 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 5086 fc->FcpCntl[2]); 5087 mpt_scsi_tgt_status(mpt, 0, req, 5088 SCSI_STATUS_OK, 0); 5089 return; 5090 } 5091 } else { 5092 switch (fc->FcpCntl[1]) { 5093 case 0: 5094 tag_action = MSG_SIMPLE_Q_TAG; 5095 break; 5096 case 1: 5097 tag_action = MSG_HEAD_OF_Q_TAG; 5098 break; 5099 case 2: 5100 tag_action = MSG_ORDERED_Q_TAG; 5101 break; 5102 default: 5103 /* 5104 * Bah. Ignore Untagged Queing and ACA 5105 */ 5106 tag_action = MSG_SIMPLE_Q_TAG; 5107 break; 5108 } 5109 } 5110 tgt->resid = be32toh(fc->FcpDl); 5111 cdbp = fc->FcpCdb; 5112 lunptr = fc->FcpLun; 5113 itag = be16toh(fc->OptionalOxid); 5114 } else if (mpt->is_sas) { 5115 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 5116 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 5117 cdbp = ssp->CDB; 5118 lunptr = ssp->LogicalUnitNumber; 5119 itag = ssp->InitiatorTag; 5120 } else { 5121 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 5122 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 5123 cdbp = sp->CDB; 5124 lunptr = sp->LogicalUnitNumber; 5125 itag = sp->Tag; 5126 } 5127 5128 /* 5129 * Generate a simple lun 5130 */ 5131 switch (lunptr[0] & 0xc0) { 5132 case 0x40: 5133 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 5134 break; 5135 case 0: 5136 lun = lunptr[1]; 5137 break; 5138 default: 5139 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 5140 lun = 0xffff; 5141 break; 5142 } 5143 5144 /* 5145 * Deal with non-enabled or bad luns here. 5146 */ 5147 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 5148 mpt->trt[lun].enabled == 0) { 5149 if (mpt->twildcard) { 5150 trtp = &mpt->trt_wildcard; 5151 } else if (fct == MPT_NIL_TMT_VALUE) { 5152 /* 5153 * In this case, we haven't got an upstream listener 5154 * for either a specific lun or wildcard luns. We 5155 * have to make some sensible response. For regular 5156 * inquiry, just return some NOT HERE inquiry data. 5157 * For VPD inquiry, report illegal field in cdb. 5158 * For REQUEST SENSE, just return NO SENSE data. 5159 * REPORT LUNS gets illegal command. 5160 * All other commands get 'no such device'. 5161 */ 5162 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 5163 size_t len; 5164 5165 memset(buf, 0, MPT_SENSE_SIZE); 5166 cond = SCSI_STATUS_CHECK_COND; 5167 buf[0] = 0xf0; 5168 buf[2] = 0x5; 5169 buf[7] = 0x8; 5170 sp = buf; 5171 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5172 5173 switch (cdbp[0]) { 5174 case INQUIRY: 5175 { 5176 if (cdbp[1] != 0) { 5177 buf[12] = 0x26; 5178 buf[13] = 0x01; 5179 break; 5180 } 5181 len = min(tgt->resid, cdbp[4]); 5182 len = min(len, sizeof (null_iqd)); 5183 mpt_lprt(mpt, MPT_PRT_DEBUG, 5184 "local inquiry %ld bytes\n", (long) len); 5185 mpt_scsi_tgt_local(mpt, req, lun, 1, 5186 null_iqd, len); 5187 return; 5188 } 5189 case REQUEST_SENSE: 5190 { 5191 buf[2] = 0x0; 5192 len = min(tgt->resid, cdbp[4]); 5193 len = min(len, sizeof (buf)); 5194 mpt_lprt(mpt, MPT_PRT_DEBUG, 5195 "local reqsense %ld bytes\n", (long) len); 5196 mpt_scsi_tgt_local(mpt, req, lun, 1, 5197 buf, len); 5198 return; 5199 } 5200 case REPORT_LUNS: 5201 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5202 buf[12] = 0x26; 5203 return; 5204 default: 5205 mpt_lprt(mpt, MPT_PRT_DEBUG, 5206 "CMD 0x%x to unmanaged lun %u\n", 5207 cdbp[0], lun); 5208 buf[12] = 0x25; 5209 break; 5210 } 5211 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 5212 return; 5213 } 5214 /* otherwise, leave trtp NULL */ 5215 } else { 5216 trtp = &mpt->trt[lun]; 5217 } 5218 5219 /* 5220 * Deal with any task management 5221 */ 5222 if (fct != MPT_NIL_TMT_VALUE) { 5223 if (trtp == NULL) { 5224 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5225 fct); 5226 mpt_scsi_tgt_status(mpt, 0, req, 5227 SCSI_STATUS_OK, 0); 5228 } else { 5229 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5230 GET_INITIATOR_INDEX(reply_desc)); 5231 } 5232 return; 5233 } 5234 5235 5236 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5237 if (atiop == NULL) { 5238 mpt_lprt(mpt, MPT_PRT_WARN, 5239 "no ATIOs for lun %u- sending back %s\n", lun, 5240 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5241 mpt_scsi_tgt_status(mpt, NULL, req, 5242 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5243 NULL); 5244 return; 5245 } 5246 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5247 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5248 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 5249 atiop->ccb_h.ccb_mpt_ptr = mpt; 5250 atiop->ccb_h.status = CAM_CDB_RECVD; 5251 atiop->ccb_h.target_lun = lun; 5252 atiop->sense_len = 0; 5253 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5254 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 5255 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5256 5257 /* 5258 * The tag we construct here allows us to find the 5259 * original request that the command came in with. 5260 * 5261 * This way we don't have to depend on anything but the 5262 * tag to find things when CCBs show back up from CAM. 5263 */ 5264 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5265 tgt->tag_id = atiop->tag_id; 5266 if (tag_action) { 5267 atiop->tag_action = tag_action; 5268 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 5269 } 5270 if (mpt->verbose >= MPT_PRT_DEBUG) { 5271 int i; 5272 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 5273 atiop->ccb_h.target_lun); 5274 for (i = 0; i < atiop->cdb_len; i++) { 5275 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5276 (i == (atiop->cdb_len - 1))? '>' : ' '); 5277 } 5278 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5279 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 5280 } 5281 5282 MPTLOCK_2_CAMLOCK(mpt); 5283 xpt_done((union ccb *)atiop); 5284 CAMLOCK_2_MPTLOCK(mpt); 5285 } 5286 5287 static void 5288 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5289 { 5290 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5291 5292 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5293 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 5294 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 5295 tgt->tag_id, tgt->state); 5296 } 5297 5298 static void 5299 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5300 { 5301 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5302 req->index, req->index, req->state); 5303 mpt_tgt_dump_tgt_state(mpt, req); 5304 } 5305 5306 static int 5307 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5308 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5309 { 5310 int dbg; 5311 union ccb *ccb; 5312 U16 status; 5313 5314 if (reply_frame == NULL) { 5315 /* 5316 * Figure out what the state of the command is. 5317 */ 5318 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5319 5320 #ifdef INVARIANTS 5321 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5322 if (tgt->req) { 5323 mpt_req_not_spcl(mpt, tgt->req, 5324 "turbo scsi_tgt_reply associated req", __LINE__); 5325 } 5326 #endif 5327 switch(tgt->state) { 5328 case TGT_STATE_LOADED: 5329 /* 5330 * This is a new command starting. 5331 */ 5332 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5333 break; 5334 case TGT_STATE_MOVING_DATA: 5335 { 5336 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 5337 5338 ccb = tgt->ccb; 5339 if (tgt->req == NULL) { 5340 panic("mpt: turbo target reply with null " 5341 "associated request moving data"); 5342 /* NOTREACHED */ 5343 } 5344 if (ccb == NULL) { 5345 if (tgt->is_local == 0) { 5346 panic("mpt: turbo target reply with " 5347 "null associated ccb moving data"); 5348 /* NOTREACHED */ 5349 } 5350 mpt_lprt(mpt, MPT_PRT_DEBUG, 5351 "TARGET_ASSIST local done\n"); 5352 TAILQ_REMOVE(&mpt->request_pending_list, 5353 tgt->req, links); 5354 mpt_free_request(mpt, tgt->req); 5355 tgt->req = NULL; 5356 mpt_scsi_tgt_status(mpt, NULL, req, 5357 0, NULL); 5358 return (TRUE); 5359 } 5360 tgt->ccb = NULL; 5361 tgt->nxfers++; 5362 mpt_req_untimeout(req, mpt_timeout, ccb); 5363 mpt_lprt(mpt, MPT_PRT_DEBUG, 5364 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5365 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5366 /* 5367 * Free the Target Assist Request 5368 */ 5369 KASSERT(tgt->req->ccb == ccb, 5370 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5371 tgt->req->serno, tgt->req->ccb)); 5372 TAILQ_REMOVE(&mpt->request_pending_list, 5373 tgt->req, links); 5374 mpt_free_request(mpt, tgt->req); 5375 tgt->req = NULL; 5376 5377 /* 5378 * Do we need to send status now? That is, are 5379 * we done with all our data transfers? 5380 */ 5381 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5382 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5383 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5384 KASSERT(ccb->ccb_h.status, 5385 ("zero ccb sts at %d\n", __LINE__)); 5386 tgt->state = TGT_STATE_IN_CAM; 5387 if (mpt->outofbeer) { 5388 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5389 mpt->outofbeer = 0; 5390 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5391 } 5392 MPTLOCK_2_CAMLOCK(mpt); 5393 xpt_done(ccb); 5394 CAMLOCK_2_MPTLOCK(mpt); 5395 break; 5396 } 5397 /* 5398 * Otherwise, send status (and sense) 5399 */ 5400 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5401 sp = sense; 5402 memcpy(sp, &ccb->csio.sense_data, 5403 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5404 } 5405 mpt_scsi_tgt_status(mpt, ccb, req, 5406 ccb->csio.scsi_status, sp); 5407 break; 5408 } 5409 case TGT_STATE_SENDING_STATUS: 5410 case TGT_STATE_MOVING_DATA_AND_STATUS: 5411 { 5412 int ioindex; 5413 ccb = tgt->ccb; 5414 5415 if (tgt->req == NULL) { 5416 panic("mpt: turbo target reply with null " 5417 "associated request sending status"); 5418 /* NOTREACHED */ 5419 } 5420 5421 if (ccb) { 5422 tgt->ccb = NULL; 5423 if (tgt->state == 5424 TGT_STATE_MOVING_DATA_AND_STATUS) { 5425 tgt->nxfers++; 5426 } 5427 mpt_req_untimeout(req, mpt_timeout, ccb); 5428 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5429 ccb->ccb_h.status |= CAM_SENT_SENSE; 5430 } 5431 mpt_lprt(mpt, MPT_PRT_DEBUG, 5432 "TARGET_STATUS tag %x sts %x flgs %x req " 5433 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5434 ccb->ccb_h.flags, tgt->req); 5435 /* 5436 * Free the Target Send Status Request 5437 */ 5438 KASSERT(tgt->req->ccb == ccb, 5439 ("tgt->req %p:%u tgt->req->ccb %p", 5440 tgt->req, tgt->req->serno, tgt->req->ccb)); 5441 /* 5442 * Notify CAM that we're done 5443 */ 5444 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5445 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5446 KASSERT(ccb->ccb_h.status, 5447 ("ZERO ccb sts at %d\n", __LINE__)); 5448 tgt->ccb = NULL; 5449 } else { 5450 mpt_lprt(mpt, MPT_PRT_DEBUG, 5451 "TARGET_STATUS non-CAM for req %p:%u\n", 5452 tgt->req, tgt->req->serno); 5453 } 5454 TAILQ_REMOVE(&mpt->request_pending_list, 5455 tgt->req, links); 5456 mpt_free_request(mpt, tgt->req); 5457 tgt->req = NULL; 5458 5459 /* 5460 * And re-post the Command Buffer. 5461 * This will reset the state. 5462 */ 5463 ioindex = GET_IO_INDEX(reply_desc); 5464 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5465 tgt->is_local = 0; 5466 mpt_post_target_command(mpt, req, ioindex); 5467 5468 /* 5469 * And post a done for anyone who cares 5470 */ 5471 if (ccb) { 5472 if (mpt->outofbeer) { 5473 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5474 mpt->outofbeer = 0; 5475 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5476 } 5477 MPTLOCK_2_CAMLOCK(mpt); 5478 xpt_done(ccb); 5479 CAMLOCK_2_MPTLOCK(mpt); 5480 } 5481 break; 5482 } 5483 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5484 tgt->state = TGT_STATE_LOADED; 5485 break; 5486 default: 5487 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5488 "Reply Function\n", tgt->state); 5489 } 5490 return (TRUE); 5491 } 5492 5493 status = le16toh(reply_frame->IOCStatus); 5494 if (status != MPI_IOCSTATUS_SUCCESS) { 5495 dbg = MPT_PRT_ERROR; 5496 } else { 5497 dbg = MPT_PRT_DEBUG1; 5498 } 5499 5500 mpt_lprt(mpt, dbg, 5501 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5502 req, req->serno, reply_frame, reply_frame->Function, status); 5503 5504 switch (reply_frame->Function) { 5505 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5506 { 5507 mpt_tgt_state_t *tgt; 5508 #ifdef INVARIANTS 5509 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5510 #endif 5511 if (status != MPI_IOCSTATUS_SUCCESS) { 5512 /* 5513 * XXX What to do? 5514 */ 5515 break; 5516 } 5517 tgt = MPT_TGT_STATE(mpt, req); 5518 KASSERT(tgt->state == TGT_STATE_LOADING, 5519 ("bad state 0x%x on reply to buffer post\n", tgt->state)); 5520 mpt_assign_serno(mpt, req); 5521 tgt->state = TGT_STATE_LOADED; 5522 break; 5523 } 5524 case MPI_FUNCTION_TARGET_ASSIST: 5525 #ifdef INVARIANTS 5526 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5527 #endif 5528 mpt_prt(mpt, "target assist completion\n"); 5529 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5530 mpt_free_request(mpt, req); 5531 break; 5532 case MPI_FUNCTION_TARGET_STATUS_SEND: 5533 #ifdef INVARIANTS 5534 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5535 #endif 5536 mpt_prt(mpt, "status send completion\n"); 5537 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5538 mpt_free_request(mpt, req); 5539 break; 5540 case MPI_FUNCTION_TARGET_MODE_ABORT: 5541 { 5542 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5543 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5544 PTR_MSG_TARGET_MODE_ABORT abtp = 5545 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5546 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5547 #ifdef INVARIANTS 5548 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5549 #endif 5550 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5551 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5552 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5553 mpt_free_request(mpt, req); 5554 break; 5555 } 5556 default: 5557 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5558 "0x%x\n", reply_frame->Function); 5559 break; 5560 } 5561 return (TRUE); 5562 } 5563