1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 */ 96 #include <sys/cdefs.h> 97 __FBSDID("$FreeBSD$"); 98 99 #include <dev/mpt/mpt.h> 100 #include <dev/mpt/mpt_cam.h> 101 #include <dev/mpt/mpt_raid.h> 102 103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/mpt/mpilib/mpi_init.h" 105 #include "dev/mpt/mpilib/mpi_targ.h" 106 #include "dev/mpt/mpilib/mpi_fc.h" 107 #include "dev/mpt/mpilib/mpi_sas.h" 108 #if __FreeBSD_version >= 500000 109 #include <sys/sysctl.h> 110 #endif 111 #include <sys/callout.h> 112 #include <sys/kthread.h> 113 114 #if __FreeBSD_version >= 700025 115 #ifndef CAM_NEW_TRAN_CODE 116 #define CAM_NEW_TRAN_CODE 1 117 #endif 118 #endif 119 120 static void mpt_poll(struct cam_sim *); 121 static timeout_t mpt_timeout; 122 static void mpt_action(struct cam_sim *, union ccb *); 123 static int 124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 125 static void mpt_setwidth(struct mpt_softc *, int, int); 126 static void mpt_setsync(struct mpt_softc *, int, int, int); 127 static int mpt_update_spi_config(struct mpt_softc *, int); 128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); 129 130 static mpt_reply_handler_t mpt_scsi_reply_handler; 131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 132 static mpt_reply_handler_t mpt_fc_els_reply_handler; 133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 134 MSG_DEFAULT_REPLY *); 135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 136 static int mpt_fc_reset_link(struct mpt_softc *, int); 137 138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 140 static void mpt_recovery_thread(void *arg); 141 static void mpt_recover_commands(struct mpt_softc *mpt); 142 143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 144 u_int, u_int, u_int, int); 145 146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 148 static int mpt_add_els_buffers(struct mpt_softc *mpt); 149 static int mpt_add_target_commands(struct mpt_softc *mpt); 150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 152 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 156 uint8_t, uint8_t const *); 157 static void 158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 159 tgt_resource_t *, int); 160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 163 static mpt_reply_handler_t mpt_sata_pass_reply_handler; 164 165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 169 170 static mpt_probe_handler_t mpt_cam_probe; 171 static mpt_attach_handler_t mpt_cam_attach; 172 static mpt_enable_handler_t mpt_cam_enable; 173 static mpt_ready_handler_t mpt_cam_ready; 174 static mpt_event_handler_t mpt_cam_event; 175 static mpt_reset_handler_t mpt_cam_ioc_reset; 176 static mpt_detach_handler_t mpt_cam_detach; 177 178 static struct mpt_personality mpt_cam_personality = 179 { 180 .name = "mpt_cam", 181 .probe = mpt_cam_probe, 182 .attach = mpt_cam_attach, 183 .enable = mpt_cam_enable, 184 .ready = mpt_cam_ready, 185 .event = mpt_cam_event, 186 .reset = mpt_cam_ioc_reset, 187 .detach = mpt_cam_detach, 188 }; 189 190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 192 193 int mpt_enable_sata_wc = -1; 194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 195 196 int 197 mpt_cam_probe(struct mpt_softc *mpt) 198 { 199 int role; 200 201 /* 202 * Only attach to nodes that support the initiator or target role 203 * (or want to) or have RAID physical devices that need CAM pass-thru 204 * support. 205 */ 206 if (mpt->do_cfg_role) { 207 role = mpt->cfg_role; 208 } else { 209 role = mpt->role; 210 } 211 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 212 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 213 return (0); 214 } 215 return (ENODEV); 216 } 217 218 int 219 mpt_cam_attach(struct mpt_softc *mpt) 220 { 221 struct cam_devq *devq; 222 mpt_handler_t handler; 223 int maxq; 224 int error; 225 226 MPT_LOCK(mpt); 227 TAILQ_INIT(&mpt->request_timeout_list); 228 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 229 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 230 231 handler.reply_handler = mpt_scsi_reply_handler; 232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 233 &scsi_io_handler_id); 234 if (error != 0) { 235 MPT_UNLOCK(mpt); 236 goto cleanup; 237 } 238 239 handler.reply_handler = mpt_scsi_tmf_reply_handler; 240 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 241 &scsi_tmf_handler_id); 242 if (error != 0) { 243 MPT_UNLOCK(mpt); 244 goto cleanup; 245 } 246 247 /* 248 * If we're fibre channel and could support target mode, we register 249 * an ELS reply handler and give it resources. 250 */ 251 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 252 handler.reply_handler = mpt_fc_els_reply_handler; 253 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 254 &fc_els_handler_id); 255 if (error != 0) { 256 MPT_UNLOCK(mpt); 257 goto cleanup; 258 } 259 if (mpt_add_els_buffers(mpt) == FALSE) { 260 error = ENOMEM; 261 MPT_UNLOCK(mpt); 262 goto cleanup; 263 } 264 maxq -= mpt->els_cmds_allocated; 265 } 266 267 /* 268 * If we support target mode, we register a reply handler for it, 269 * but don't add command resources until we actually enable target 270 * mode. 271 */ 272 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 273 handler.reply_handler = mpt_scsi_tgt_reply_handler; 274 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 275 &mpt->scsi_tgt_handler_id); 276 if (error != 0) { 277 MPT_UNLOCK(mpt); 278 goto cleanup; 279 } 280 } 281 282 if (mpt->is_sas) { 283 handler.reply_handler = mpt_sata_pass_reply_handler; 284 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 285 &sata_pass_handler_id); 286 if (error != 0) { 287 MPT_UNLOCK(mpt); 288 goto cleanup; 289 } 290 } 291 292 /* 293 * We keep one request reserved for timeout TMF requests. 294 */ 295 mpt->tmf_req = mpt_get_request(mpt, FALSE); 296 if (mpt->tmf_req == NULL) { 297 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 298 error = ENOMEM; 299 MPT_UNLOCK(mpt); 300 goto cleanup; 301 } 302 303 /* 304 * Mark the request as free even though not on the free list. 305 * There is only one TMF request allowed to be outstanding at 306 * a time and the TMF routines perform their own allocation 307 * tracking using the standard state flags. 308 */ 309 mpt->tmf_req->state = REQ_STATE_FREE; 310 maxq--; 311 312 /* 313 * The rest of this is CAM foo, for which we need to drop our lock 314 */ 315 MPT_UNLOCK(mpt); 316 317 if (mpt_spawn_recovery_thread(mpt) != 0) { 318 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 319 error = ENOMEM; 320 goto cleanup; 321 } 322 323 /* 324 * Create the device queue for our SIM(s). 325 */ 326 devq = cam_simq_alloc(maxq); 327 if (devq == NULL) { 328 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 329 error = ENOMEM; 330 goto cleanup; 331 } 332 333 /* 334 * Construct our SIM entry. 335 */ 336 mpt->sim = 337 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 338 if (mpt->sim == NULL) { 339 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 340 cam_simq_free(devq); 341 error = ENOMEM; 342 goto cleanup; 343 } 344 345 /* 346 * Register exactly this bus. 347 */ 348 MPT_LOCK(mpt); 349 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { 350 mpt_prt(mpt, "Bus registration Failed!\n"); 351 error = ENOMEM; 352 MPT_UNLOCK(mpt); 353 goto cleanup; 354 } 355 356 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 357 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 358 mpt_prt(mpt, "Unable to allocate Path!\n"); 359 error = ENOMEM; 360 MPT_UNLOCK(mpt); 361 goto cleanup; 362 } 363 MPT_UNLOCK(mpt); 364 365 /* 366 * Only register a second bus for RAID physical 367 * devices if the controller supports RAID. 368 */ 369 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 370 return (0); 371 } 372 373 /* 374 * Create a "bus" to export all hidden disks to CAM. 375 */ 376 mpt->phydisk_sim = 377 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 378 if (mpt->phydisk_sim == NULL) { 379 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 380 error = ENOMEM; 381 goto cleanup; 382 } 383 384 /* 385 * Register this bus. 386 */ 387 MPT_LOCK(mpt); 388 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != 389 CAM_SUCCESS) { 390 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 391 error = ENOMEM; 392 MPT_UNLOCK(mpt); 393 goto cleanup; 394 } 395 396 if (xpt_create_path(&mpt->phydisk_path, NULL, 397 cam_sim_path(mpt->phydisk_sim), 398 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 399 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 400 error = ENOMEM; 401 MPT_UNLOCK(mpt); 402 goto cleanup; 403 } 404 MPT_UNLOCK(mpt); 405 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 406 return (0); 407 408 cleanup: 409 mpt_cam_detach(mpt); 410 return (error); 411 } 412 413 /* 414 * Read FC configuration information 415 */ 416 static int 417 mpt_read_config_info_fc(struct mpt_softc *mpt) 418 { 419 char *topology = NULL; 420 int rv; 421 422 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 423 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 424 if (rv) { 425 return (-1); 426 } 427 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 428 mpt->mpt_fcport_page0.Header.PageVersion, 429 mpt->mpt_fcport_page0.Header.PageLength, 430 mpt->mpt_fcport_page0.Header.PageNumber, 431 mpt->mpt_fcport_page0.Header.PageType); 432 433 434 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 435 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 436 if (rv) { 437 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 438 return (-1); 439 } 440 441 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 442 443 switch (mpt->mpt_fcport_page0.Flags & 444 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 445 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 446 mpt->mpt_fcport_speed = 0; 447 topology = "<NO LOOP>"; 448 break; 449 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 450 topology = "N-Port"; 451 break; 452 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 453 topology = "NL-Port"; 454 break; 455 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 456 topology = "F-Port"; 457 break; 458 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 459 topology = "FL-Port"; 460 break; 461 default: 462 mpt->mpt_fcport_speed = 0; 463 topology = "?"; 464 break; 465 } 466 467 mpt_lprt(mpt, MPT_PRT_INFO, 468 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 469 "Speed %u-Gbit\n", topology, 470 mpt->mpt_fcport_page0.WWNN.High, 471 mpt->mpt_fcport_page0.WWNN.Low, 472 mpt->mpt_fcport_page0.WWPN.High, 473 mpt->mpt_fcport_page0.WWPN.Low, 474 mpt->mpt_fcport_speed); 475 #if __FreeBSD_version >= 500000 476 MPT_UNLOCK(mpt); 477 { 478 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 479 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 480 481 snprintf(mpt->scinfo.fc.wwnn, 482 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 483 mpt->mpt_fcport_page0.WWNN.High, 484 mpt->mpt_fcport_page0.WWNN.Low); 485 486 snprintf(mpt->scinfo.fc.wwpn, 487 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 488 mpt->mpt_fcport_page0.WWPN.High, 489 mpt->mpt_fcport_page0.WWPN.Low); 490 491 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 492 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 493 "World Wide Node Name"); 494 495 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 496 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 497 "World Wide Port Name"); 498 499 } 500 MPT_LOCK(mpt); 501 #endif 502 return (0); 503 } 504 505 /* 506 * Set FC configuration information. 507 */ 508 static int 509 mpt_set_initial_config_fc(struct mpt_softc *mpt) 510 { 511 512 CONFIG_PAGE_FC_PORT_1 fc; 513 U32 fl; 514 int r, doit = 0; 515 int role; 516 517 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 518 &fc.Header, FALSE, 5000); 519 if (r) { 520 mpt_prt(mpt, "failed to read FC page 1 header\n"); 521 return (mpt_fc_reset_link(mpt, 1)); 522 } 523 524 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 525 &fc.Header, sizeof (fc), FALSE, 5000); 526 if (r) { 527 mpt_prt(mpt, "failed to read FC page 1\n"); 528 return (mpt_fc_reset_link(mpt, 1)); 529 } 530 531 /* 532 * Check our flags to make sure we support the role we want. 533 */ 534 doit = 0; 535 role = 0; 536 fl = le32toh(fc.Flags);; 537 538 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 539 role |= MPT_ROLE_INITIATOR; 540 } 541 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 542 role |= MPT_ROLE_TARGET; 543 } 544 545 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 546 547 if (mpt->do_cfg_role == 0) { 548 role = mpt->cfg_role; 549 } else { 550 mpt->do_cfg_role = 0; 551 } 552 553 if (role != mpt->cfg_role) { 554 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 555 if ((role & MPT_ROLE_INITIATOR) == 0) { 556 mpt_prt(mpt, "adding initiator role\n"); 557 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 558 doit++; 559 } else { 560 mpt_prt(mpt, "keeping initiator role\n"); 561 } 562 } else if (role & MPT_ROLE_INITIATOR) { 563 mpt_prt(mpt, "removing initiator role\n"); 564 doit++; 565 } 566 if (mpt->cfg_role & MPT_ROLE_TARGET) { 567 if ((role & MPT_ROLE_TARGET) == 0) { 568 mpt_prt(mpt, "adding target role\n"); 569 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 570 doit++; 571 } else { 572 mpt_prt(mpt, "keeping target role\n"); 573 } 574 } else if (role & MPT_ROLE_TARGET) { 575 mpt_prt(mpt, "removing target role\n"); 576 doit++; 577 } 578 mpt->role = mpt->cfg_role; 579 } 580 581 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 582 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 583 mpt_prt(mpt, "adding OXID option\n"); 584 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 585 doit++; 586 } 587 } 588 589 if (doit) { 590 fc.Flags = htole32(fl); 591 r = mpt_write_cfg_page(mpt, 592 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 593 sizeof(fc), FALSE, 5000); 594 if (r != 0) { 595 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 596 return (0); 597 } 598 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 599 "effect until next reboot or IOC reset\n"); 600 } 601 return (0); 602 } 603 604 static int 605 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 606 { 607 ConfigExtendedPageHeader_t hdr; 608 struct mptsas_phyinfo *phyinfo; 609 SasIOUnitPage0_t *buffer; 610 int error, len, i; 611 612 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 613 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 614 &hdr, 0, 10000); 615 if (error) 616 goto out; 617 if (hdr.ExtPageLength == 0) { 618 error = ENXIO; 619 goto out; 620 } 621 622 len = hdr.ExtPageLength * 4; 623 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 624 if (buffer == NULL) { 625 error = ENOMEM; 626 goto out; 627 } 628 629 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 630 0, &hdr, buffer, len, 0, 10000); 631 if (error) { 632 free(buffer, M_DEVBUF); 633 goto out; 634 } 635 636 portinfo->num_phys = buffer->NumPhys; 637 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * 638 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 639 if (portinfo->phy_info == NULL) { 640 free(buffer, M_DEVBUF); 641 error = ENOMEM; 642 goto out; 643 } 644 645 for (i = 0; i < portinfo->num_phys; i++) { 646 phyinfo = &portinfo->phy_info[i]; 647 phyinfo->phy_num = i; 648 phyinfo->port_id = buffer->PhyData[i].Port; 649 phyinfo->negotiated_link_rate = 650 buffer->PhyData[i].NegotiatedLinkRate; 651 phyinfo->handle = 652 le16toh(buffer->PhyData[i].ControllerDevHandle); 653 } 654 655 free(buffer, M_DEVBUF); 656 out: 657 return (error); 658 } 659 660 static int 661 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 662 uint32_t form, uint32_t form_specific) 663 { 664 ConfigExtendedPageHeader_t hdr; 665 SasPhyPage0_t *buffer; 666 int error; 667 668 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 669 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 670 0, 10000); 671 if (error) 672 goto out; 673 if (hdr.ExtPageLength == 0) { 674 error = ENXIO; 675 goto out; 676 } 677 678 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 679 if (buffer == NULL) { 680 error = ENOMEM; 681 goto out; 682 } 683 684 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 685 form + form_specific, &hdr, buffer, 686 sizeof(SasPhyPage0_t), 0, 10000); 687 if (error) { 688 free(buffer, M_DEVBUF); 689 goto out; 690 } 691 692 phy_info->hw_link_rate = buffer->HwLinkRate; 693 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 694 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 695 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 696 697 free(buffer, M_DEVBUF); 698 out: 699 return (error); 700 } 701 702 static int 703 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 704 uint32_t form, uint32_t form_specific) 705 { 706 ConfigExtendedPageHeader_t hdr; 707 SasDevicePage0_t *buffer; 708 uint64_t sas_address; 709 int error = 0; 710 711 bzero(device_info, sizeof(*device_info)); 712 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 713 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 714 &hdr, 0, 10000); 715 if (error) 716 goto out; 717 if (hdr.ExtPageLength == 0) { 718 error = ENXIO; 719 goto out; 720 } 721 722 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 723 if (buffer == NULL) { 724 error = ENOMEM; 725 goto out; 726 } 727 728 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 729 form + form_specific, &hdr, buffer, 730 sizeof(SasDevicePage0_t), 0, 10000); 731 if (error) { 732 free(buffer, M_DEVBUF); 733 goto out; 734 } 735 736 device_info->dev_handle = le16toh(buffer->DevHandle); 737 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 738 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 739 device_info->slot = le16toh(buffer->Slot); 740 device_info->phy_num = buffer->PhyNum; 741 device_info->physical_port = buffer->PhysicalPort; 742 device_info->target_id = buffer->TargetID; 743 device_info->bus = buffer->Bus; 744 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 745 device_info->sas_address = le64toh(sas_address); 746 device_info->device_info = le32toh(buffer->DeviceInfo); 747 748 free(buffer, M_DEVBUF); 749 out: 750 return (error); 751 } 752 753 /* 754 * Read SAS configuration information. Nothing to do yet. 755 */ 756 static int 757 mpt_read_config_info_sas(struct mpt_softc *mpt) 758 { 759 struct mptsas_portinfo *portinfo; 760 struct mptsas_phyinfo *phyinfo; 761 int error, i; 762 763 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 764 if (portinfo == NULL) 765 return (ENOMEM); 766 767 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 768 if (error) { 769 free(portinfo, M_DEVBUF); 770 return (0); 771 } 772 773 for (i = 0; i < portinfo->num_phys; i++) { 774 phyinfo = &portinfo->phy_info[i]; 775 error = mptsas_sas_phy_pg0(mpt, phyinfo, 776 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 777 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 778 if (error) 779 break; 780 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 781 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 782 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 783 phyinfo->handle); 784 if (error) 785 break; 786 phyinfo->identify.phy_num = phyinfo->phy_num = i; 787 if (phyinfo->attached.dev_handle) 788 error = mptsas_sas_device_pg0(mpt, 789 &phyinfo->attached, 790 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 791 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 792 phyinfo->attached.dev_handle); 793 if (error) 794 break; 795 } 796 mpt->sas_portinfo = portinfo; 797 return (0); 798 } 799 800 static void 801 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 802 int enabled) 803 { 804 SataPassthroughRequest_t *pass; 805 request_t *req; 806 int error, status; 807 808 req = mpt_get_request(mpt, 0); 809 if (req == NULL) 810 return; 811 812 pass = req->req_vbuf; 813 bzero(pass, sizeof(SataPassthroughRequest_t)); 814 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 815 pass->TargetID = devinfo->target_id; 816 pass->Bus = devinfo->bus; 817 pass->PassthroughFlags = 0; 818 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 819 pass->DataLength = 0; 820 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 821 pass->CommandFIS[0] = 0x27; 822 pass->CommandFIS[1] = 0x80; 823 pass->CommandFIS[2] = 0xef; 824 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 825 pass->CommandFIS[7] = 0x40; 826 pass->CommandFIS[15] = 0x08; 827 828 mpt_check_doorbell(mpt); 829 mpt_send_cmd(mpt, req); 830 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 831 10 * 1000); 832 if (error) { 833 mpt_free_request(mpt, req); 834 printf("error %d sending passthrough\n", error); 835 return; 836 } 837 838 status = le16toh(req->IOCStatus); 839 if (status != MPI_IOCSTATUS_SUCCESS) { 840 mpt_free_request(mpt, req); 841 printf("IOCSTATUS %d\n", status); 842 return; 843 } 844 845 mpt_free_request(mpt, req); 846 } 847 848 /* 849 * Set SAS configuration information. Nothing to do yet. 850 */ 851 static int 852 mpt_set_initial_config_sas(struct mpt_softc *mpt) 853 { 854 struct mptsas_phyinfo *phyinfo; 855 int i; 856 857 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 858 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 859 phyinfo = &mpt->sas_portinfo->phy_info[i]; 860 if (phyinfo->attached.dev_handle == 0) 861 continue; 862 if ((phyinfo->attached.device_info & 863 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 864 continue; 865 if (bootverbose) 866 device_printf(mpt->dev, 867 "%sabling SATA WC on phy %d\n", 868 (mpt_enable_sata_wc) ? "En" : "Dis", i); 869 mptsas_set_sata_wc(mpt, &phyinfo->attached, 870 mpt_enable_sata_wc); 871 } 872 } 873 874 return (0); 875 } 876 877 static int 878 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 879 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 880 { 881 if (req != NULL) { 882 883 if (reply_frame != NULL) { 884 MSG_SATA_PASSTHROUGH_REQUEST *pass; 885 MSG_SATA_PASSTHROUGH_REPLY *reply; 886 887 pass = (MSG_SATA_PASSTHROUGH_REQUEST *)req->req_vbuf; 888 reply = (MSG_SATA_PASSTHROUGH_REPLY *)reply_frame; 889 req->IOCStatus = le16toh(reply_frame->IOCStatus); 890 } 891 req->state &= ~REQ_STATE_QUEUED; 892 req->state |= REQ_STATE_DONE; 893 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 894 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 895 wakeup(req); 896 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 897 /* 898 * Whew- we can free this request (late completion) 899 */ 900 mpt_free_request(mpt, req); 901 } 902 } 903 904 return (TRUE); 905 } 906 907 /* 908 * Read SCSI configuration information 909 */ 910 static int 911 mpt_read_config_info_spi(struct mpt_softc *mpt) 912 { 913 int rv, i; 914 915 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 916 &mpt->mpt_port_page0.Header, FALSE, 5000); 917 if (rv) { 918 return (-1); 919 } 920 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 921 mpt->mpt_port_page0.Header.PageVersion, 922 mpt->mpt_port_page0.Header.PageLength, 923 mpt->mpt_port_page0.Header.PageNumber, 924 mpt->mpt_port_page0.Header.PageType); 925 926 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 927 &mpt->mpt_port_page1.Header, FALSE, 5000); 928 if (rv) { 929 return (-1); 930 } 931 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 932 mpt->mpt_port_page1.Header.PageVersion, 933 mpt->mpt_port_page1.Header.PageLength, 934 mpt->mpt_port_page1.Header.PageNumber, 935 mpt->mpt_port_page1.Header.PageType); 936 937 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 938 &mpt->mpt_port_page2.Header, FALSE, 5000); 939 if (rv) { 940 return (-1); 941 } 942 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 943 mpt->mpt_port_page2.Header.PageVersion, 944 mpt->mpt_port_page2.Header.PageLength, 945 mpt->mpt_port_page2.Header.PageNumber, 946 mpt->mpt_port_page2.Header.PageType); 947 948 for (i = 0; i < 16; i++) { 949 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 950 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 951 if (rv) { 952 return (-1); 953 } 954 mpt_lprt(mpt, MPT_PRT_DEBUG, 955 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 956 mpt->mpt_dev_page0[i].Header.PageVersion, 957 mpt->mpt_dev_page0[i].Header.PageLength, 958 mpt->mpt_dev_page0[i].Header.PageNumber, 959 mpt->mpt_dev_page0[i].Header.PageType); 960 961 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 962 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 963 if (rv) { 964 return (-1); 965 } 966 mpt_lprt(mpt, MPT_PRT_DEBUG, 967 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 968 mpt->mpt_dev_page1[i].Header.PageVersion, 969 mpt->mpt_dev_page1[i].Header.PageLength, 970 mpt->mpt_dev_page1[i].Header.PageNumber, 971 mpt->mpt_dev_page1[i].Header.PageType); 972 } 973 974 /* 975 * At this point, we don't *have* to fail. As long as we have 976 * valid config header information, we can (barely) lurch 977 * along. 978 */ 979 980 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 981 sizeof(mpt->mpt_port_page0), FALSE, 5000); 982 if (rv) { 983 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 984 } else { 985 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 986 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 987 mpt->mpt_port_page0.Capabilities, 988 mpt->mpt_port_page0.PhysicalInterface); 989 } 990 991 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 992 sizeof(mpt->mpt_port_page1), FALSE, 5000); 993 if (rv) { 994 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 995 } else { 996 mpt_lprt(mpt, MPT_PRT_DEBUG, 997 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 998 mpt->mpt_port_page1.Configuration, 999 mpt->mpt_port_page1.OnBusTimerValue); 1000 } 1001 1002 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 1003 sizeof(mpt->mpt_port_page2), FALSE, 5000); 1004 if (rv) { 1005 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 1006 } else { 1007 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1008 "Port Page 2: Flags %x Settings %x\n", 1009 mpt->mpt_port_page2.PortFlags, 1010 mpt->mpt_port_page2.PortSettings); 1011 for (i = 0; i < 16; i++) { 1012 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1013 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1014 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1015 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1016 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1017 } 1018 } 1019 1020 for (i = 0; i < 16; i++) { 1021 rv = mpt_read_cur_cfg_page(mpt, i, 1022 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1023 FALSE, 5000); 1024 if (rv) { 1025 mpt_prt(mpt, 1026 "cannot read SPI Target %d Device Page 0\n", i); 1027 continue; 1028 } 1029 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1030 "target %d page 0: Negotiated Params %x Information %x\n", 1031 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1032 mpt->mpt_dev_page0[i].Information); 1033 1034 rv = mpt_read_cur_cfg_page(mpt, i, 1035 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1036 FALSE, 5000); 1037 if (rv) { 1038 mpt_prt(mpt, 1039 "cannot read SPI Target %d Device Page 1\n", i); 1040 continue; 1041 } 1042 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1043 "target %d page 1: Requested Params %x Configuration %x\n", 1044 i, mpt->mpt_dev_page1[i].RequestedParameters, 1045 mpt->mpt_dev_page1[i].Configuration); 1046 } 1047 return (0); 1048 } 1049 1050 /* 1051 * Validate SPI configuration information. 1052 * 1053 * In particular, validate SPI Port Page 1. 1054 */ 1055 static int 1056 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1057 { 1058 int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id; 1059 int error; 1060 1061 mpt->mpt_disc_enable = 0xff; 1062 mpt->mpt_tag_enable = 0; 1063 1064 if (mpt->mpt_port_page1.Configuration != pp1val) { 1065 CONFIG_PAGE_SCSI_PORT_1 tmp; 1066 1067 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1068 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 1069 tmp = mpt->mpt_port_page1; 1070 tmp.Configuration = pp1val; 1071 error = mpt_write_cur_cfg_page(mpt, 0, 1072 &tmp.Header, sizeof(tmp), FALSE, 5000); 1073 if (error) { 1074 return (-1); 1075 } 1076 error = mpt_read_cur_cfg_page(mpt, 0, 1077 &tmp.Header, sizeof(tmp), FALSE, 5000); 1078 if (error) { 1079 return (-1); 1080 } 1081 if (tmp.Configuration != pp1val) { 1082 mpt_prt(mpt, 1083 "failed to reset SPI Port Page 1 Config value\n"); 1084 return (-1); 1085 } 1086 mpt->mpt_port_page1 = tmp; 1087 } 1088 1089 /* 1090 * The purpose of this exercise is to get 1091 * all targets back to async/narrow. 1092 * 1093 * We skip this step if the BIOS has already negotiated 1094 * speeds with the targets and does not require us to 1095 * do Domain Validation. 1096 */ 1097 i = mpt->mpt_port_page2.PortSettings & 1098 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1099 j = mpt->mpt_port_page2.PortFlags & 1100 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 1101 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* && 1102 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) { 1103 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1104 "honoring BIOS transfer negotiations\n"); 1105 } else { 1106 for (i = 0; i < 16; i++) { 1107 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1108 mpt->mpt_dev_page1[i].Configuration = 0; 1109 (void) mpt_update_spi_config(mpt, i); 1110 } 1111 } 1112 return (0); 1113 } 1114 1115 int 1116 mpt_cam_enable(struct mpt_softc *mpt) 1117 { 1118 int error; 1119 1120 MPT_LOCK(mpt); 1121 1122 error = EIO; 1123 if (mpt->is_fc) { 1124 if (mpt_read_config_info_fc(mpt)) { 1125 goto out; 1126 } 1127 if (mpt_set_initial_config_fc(mpt)) { 1128 goto out; 1129 } 1130 } else if (mpt->is_sas) { 1131 if (mpt_read_config_info_sas(mpt)) { 1132 goto out; 1133 } 1134 if (mpt_set_initial_config_sas(mpt)) { 1135 goto out; 1136 } 1137 } else if (mpt->is_spi) { 1138 if (mpt_read_config_info_spi(mpt)) { 1139 goto out; 1140 } 1141 if (mpt_set_initial_config_spi(mpt)) { 1142 goto out; 1143 } 1144 } 1145 error = 0; 1146 1147 out: 1148 MPT_UNLOCK(mpt); 1149 return (error); 1150 } 1151 1152 void 1153 mpt_cam_ready(struct mpt_softc *mpt) 1154 { 1155 /* 1156 * If we're in target mode, hang out resources now 1157 * so we don't cause the world to hang talking to us. 1158 */ 1159 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1160 /* 1161 * Try to add some target command resources 1162 */ 1163 MPT_LOCK(mpt); 1164 if (mpt_add_target_commands(mpt) == FALSE) { 1165 mpt_prt(mpt, "failed to add target commands\n"); 1166 } 1167 MPT_UNLOCK(mpt); 1168 } 1169 mpt->ready = 1; 1170 } 1171 1172 void 1173 mpt_cam_detach(struct mpt_softc *mpt) 1174 { 1175 mpt_handler_t handler; 1176 1177 MPT_LOCK(mpt); 1178 mpt->ready = 0; 1179 mpt_terminate_recovery_thread(mpt); 1180 1181 handler.reply_handler = mpt_scsi_reply_handler; 1182 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1183 scsi_io_handler_id); 1184 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1185 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1186 scsi_tmf_handler_id); 1187 handler.reply_handler = mpt_fc_els_reply_handler; 1188 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1189 fc_els_handler_id); 1190 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1191 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1192 mpt->scsi_tgt_handler_id); 1193 handler.reply_handler = mpt_sata_pass_reply_handler; 1194 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1195 sata_pass_handler_id); 1196 1197 if (mpt->tmf_req != NULL) { 1198 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1199 mpt_free_request(mpt, mpt->tmf_req); 1200 mpt->tmf_req = NULL; 1201 } 1202 if (mpt->sas_portinfo != NULL) { 1203 free(mpt->sas_portinfo, M_DEVBUF); 1204 mpt->sas_portinfo = NULL; 1205 } 1206 MPT_UNLOCK(mpt); 1207 1208 if (mpt->sim != NULL) { 1209 xpt_free_path(mpt->path); 1210 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1211 cam_sim_free(mpt->sim, TRUE); 1212 mpt->sim = NULL; 1213 } 1214 1215 if (mpt->phydisk_sim != NULL) { 1216 xpt_free_path(mpt->phydisk_path); 1217 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1218 cam_sim_free(mpt->phydisk_sim, TRUE); 1219 mpt->phydisk_sim = NULL; 1220 } 1221 } 1222 1223 /* This routine is used after a system crash to dump core onto the swap device. 1224 */ 1225 static void 1226 mpt_poll(struct cam_sim *sim) 1227 { 1228 struct mpt_softc *mpt; 1229 1230 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1231 mpt_intr(mpt); 1232 } 1233 1234 /* 1235 * Watchdog timeout routine for SCSI requests. 1236 */ 1237 static void 1238 mpt_timeout(void *arg) 1239 { 1240 union ccb *ccb; 1241 struct mpt_softc *mpt; 1242 request_t *req; 1243 1244 ccb = (union ccb *)arg; 1245 mpt = ccb->ccb_h.ccb_mpt_ptr; 1246 1247 MPT_LOCK(mpt); 1248 req = ccb->ccb_h.ccb_req_ptr; 1249 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1250 req->serno, ccb, req->ccb); 1251 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1252 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1253 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1254 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1255 req->state |= REQ_STATE_TIMEDOUT; 1256 mpt_wakeup_recovery_thread(mpt); 1257 } 1258 MPT_UNLOCK(mpt); 1259 } 1260 1261 /* 1262 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 1263 * 1264 * Takes a list of physical segments and builds the SGL for SCSI IO command 1265 * and forwards the commard to the IOC after one last check that CAM has not 1266 * aborted the transaction. 1267 */ 1268 static void 1269 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1270 { 1271 request_t *req, *trq; 1272 char *mpt_off; 1273 union ccb *ccb; 1274 struct mpt_softc *mpt; 1275 int seg, first_lim; 1276 uint32_t flags, nxt_off; 1277 void *sglp = NULL; 1278 MSG_REQUEST_HEADER *hdrp; 1279 SGE_SIMPLE64 *se; 1280 SGE_CHAIN64 *ce; 1281 int istgt = 0; 1282 1283 req = (request_t *)arg; 1284 ccb = req->ccb; 1285 1286 mpt = ccb->ccb_h.ccb_mpt_ptr; 1287 req = ccb->ccb_h.ccb_req_ptr; 1288 1289 hdrp = req->req_vbuf; 1290 mpt_off = req->req_vbuf; 1291 1292 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1293 error = EFBIG; 1294 } 1295 1296 if (error == 0) { 1297 switch (hdrp->Function) { 1298 case MPI_FUNCTION_SCSI_IO_REQUEST: 1299 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1300 istgt = 0; 1301 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1302 break; 1303 case MPI_FUNCTION_TARGET_ASSIST: 1304 istgt = 1; 1305 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1306 break; 1307 default: 1308 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1309 hdrp->Function); 1310 error = EINVAL; 1311 break; 1312 } 1313 } 1314 1315 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1316 error = EFBIG; 1317 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1318 nseg, mpt->max_seg_cnt); 1319 } 1320 1321 bad: 1322 if (error != 0) { 1323 if (error != EFBIG && error != ENOMEM) { 1324 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1325 } 1326 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1327 cam_status status; 1328 mpt_freeze_ccb(ccb); 1329 if (error == EFBIG) { 1330 status = CAM_REQ_TOO_BIG; 1331 } else if (error == ENOMEM) { 1332 if (mpt->outofbeer == 0) { 1333 mpt->outofbeer = 1; 1334 xpt_freeze_simq(mpt->sim, 1); 1335 mpt_lprt(mpt, MPT_PRT_DEBUG, 1336 "FREEZEQ\n"); 1337 } 1338 status = CAM_REQUEUE_REQ; 1339 } else { 1340 status = CAM_REQ_CMP_ERR; 1341 } 1342 mpt_set_ccb_status(ccb, status); 1343 } 1344 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1345 request_t *cmd_req = 1346 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1347 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1348 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1349 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1350 } 1351 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1352 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1353 xpt_done(ccb); 1354 CAMLOCK_2_MPTLOCK(mpt); 1355 mpt_free_request(mpt, req); 1356 MPTLOCK_2_CAMLOCK(mpt); 1357 return; 1358 } 1359 1360 /* 1361 * No data to transfer? 1362 * Just make a single simple SGL with zero length. 1363 */ 1364 1365 if (mpt->verbose >= MPT_PRT_DEBUG) { 1366 int tidx = ((char *)sglp) - mpt_off; 1367 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1368 } 1369 1370 if (nseg == 0) { 1371 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1372 MPI_pSGE_SET_FLAGS(se1, 1373 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1374 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1375 se1->FlagsLength = htole32(se1->FlagsLength); 1376 goto out; 1377 } 1378 1379 1380 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1381 if (istgt == 0) { 1382 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1383 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1384 } 1385 } else { 1386 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1387 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1388 } 1389 } 1390 1391 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1392 bus_dmasync_op_t op; 1393 if (istgt == 0) { 1394 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1395 op = BUS_DMASYNC_PREREAD; 1396 } else { 1397 op = BUS_DMASYNC_PREWRITE; 1398 } 1399 } else { 1400 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1401 op = BUS_DMASYNC_PREWRITE; 1402 } else { 1403 op = BUS_DMASYNC_PREREAD; 1404 } 1405 } 1406 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1407 } 1408 1409 /* 1410 * Okay, fill in what we can at the end of the command frame. 1411 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1412 * the command frame. 1413 * 1414 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1415 * SIMPLE64 pointers and start doing CHAIN64 entries after 1416 * that. 1417 */ 1418 1419 if (nseg < MPT_NSGL_FIRST(mpt)) { 1420 first_lim = nseg; 1421 } else { 1422 /* 1423 * Leave room for CHAIN element 1424 */ 1425 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1426 } 1427 1428 se = (SGE_SIMPLE64 *) sglp; 1429 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1430 uint32_t tf; 1431 1432 memset(se, 0, sizeof (*se)); 1433 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1434 if (sizeof(bus_addr_t) > 4) { 1435 se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32; 1436 } 1437 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1438 tf = flags; 1439 if (seg == first_lim - 1) { 1440 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1441 } 1442 if (seg == nseg - 1) { 1443 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1444 MPI_SGE_FLAGS_END_OF_BUFFER; 1445 } 1446 MPI_pSGE_SET_FLAGS(se, tf); 1447 se->FlagsLength = htole32(se->FlagsLength); 1448 } 1449 1450 if (seg == nseg) { 1451 goto out; 1452 } 1453 1454 /* 1455 * Tell the IOC where to find the first chain element. 1456 */ 1457 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1458 nxt_off = MPT_RQSL(mpt); 1459 trq = req; 1460 1461 /* 1462 * Make up the rest of the data segments out of a chain element 1463 * (contiained in the current request frame) which points to 1464 * SIMPLE64 elements in the next request frame, possibly ending 1465 * with *another* chain element (if there's more). 1466 */ 1467 while (seg < nseg) { 1468 int this_seg_lim; 1469 uint32_t tf, cur_off; 1470 bus_addr_t chain_list_addr; 1471 1472 /* 1473 * Point to the chain descriptor. Note that the chain 1474 * descriptor is at the end of the *previous* list (whether 1475 * chain or simple). 1476 */ 1477 ce = (SGE_CHAIN64 *) se; 1478 1479 /* 1480 * Before we change our current pointer, make sure we won't 1481 * overflow the request area with this frame. Note that we 1482 * test against 'greater than' here as it's okay in this case 1483 * to have next offset be just outside the request area. 1484 */ 1485 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1486 nxt_off = MPT_REQUEST_AREA; 1487 goto next_chain; 1488 } 1489 1490 /* 1491 * Set our SGE element pointer to the beginning of the chain 1492 * list and update our next chain list offset. 1493 */ 1494 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1495 cur_off = nxt_off; 1496 nxt_off += MPT_RQSL(mpt); 1497 1498 /* 1499 * Now initialized the chain descriptor. 1500 */ 1501 memset(ce, 0, sizeof (*ce)); 1502 1503 /* 1504 * Get the physical address of the chain list. 1505 */ 1506 chain_list_addr = trq->req_pbuf; 1507 chain_list_addr += cur_off; 1508 if (sizeof (bus_addr_t) > 4) { 1509 ce->Address.High = 1510 htole32((uint32_t) ((uint64_t)chain_list_addr >> 32)); 1511 } 1512 ce->Address.Low = htole32((uint32_t) chain_list_addr); 1513 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1514 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1515 1516 /* 1517 * If we have more than a frame's worth of segments left, 1518 * set up the chain list to have the last element be another 1519 * chain descriptor. 1520 */ 1521 if ((nseg - seg) > MPT_NSGL(mpt)) { 1522 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1523 /* 1524 * The length of the chain is the length in bytes of the 1525 * number of segments plus the next chain element. 1526 * 1527 * The next chain descriptor offset is the length, 1528 * in words, of the number of segments. 1529 */ 1530 ce->Length = (this_seg_lim - seg) * 1531 sizeof (SGE_SIMPLE64); 1532 ce->NextChainOffset = ce->Length >> 2; 1533 ce->Length += sizeof (SGE_CHAIN64); 1534 } else { 1535 this_seg_lim = nseg; 1536 ce->Length = (this_seg_lim - seg) * 1537 sizeof (SGE_SIMPLE64); 1538 } 1539 1540 /* 1541 * Fill in the chain list SGE elements with our segment data. 1542 * 1543 * If we're the last element in this chain list, set the last 1544 * element flag. If we're the completely last element period, 1545 * set the end of list and end of buffer flags. 1546 */ 1547 while (seg < this_seg_lim) { 1548 memset(se, 0, sizeof (*se)); 1549 se->Address.Low = htole32(dm_segs->ds_addr); 1550 if (sizeof (bus_addr_t) > 4) { 1551 se->Address.High = 1552 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1553 } 1554 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1555 tf = flags; 1556 if (seg == this_seg_lim - 1) { 1557 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1558 } 1559 if (seg == nseg - 1) { 1560 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1561 MPI_SGE_FLAGS_END_OF_BUFFER; 1562 } 1563 MPI_pSGE_SET_FLAGS(se, tf); 1564 se->FlagsLength = htole32(se->FlagsLength); 1565 se++; 1566 seg++; 1567 dm_segs++; 1568 } 1569 1570 next_chain: 1571 /* 1572 * If we have more segments to do and we've used up all of 1573 * the space in a request area, go allocate another one 1574 * and chain to that. 1575 */ 1576 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1577 request_t *nrq; 1578 1579 CAMLOCK_2_MPTLOCK(mpt); 1580 nrq = mpt_get_request(mpt, FALSE); 1581 MPTLOCK_2_CAMLOCK(mpt); 1582 1583 if (nrq == NULL) { 1584 error = ENOMEM; 1585 goto bad; 1586 } 1587 1588 /* 1589 * Append the new request area on the tail of our list. 1590 */ 1591 if ((trq = req->chain) == NULL) { 1592 req->chain = nrq; 1593 } else { 1594 while (trq->chain != NULL) { 1595 trq = trq->chain; 1596 } 1597 trq->chain = nrq; 1598 } 1599 trq = nrq; 1600 mpt_off = trq->req_vbuf; 1601 if (mpt->verbose >= MPT_PRT_DEBUG) { 1602 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1603 } 1604 nxt_off = 0; 1605 } 1606 } 1607 out: 1608 1609 /* 1610 * Last time we need to check if this CCB needs to be aborted. 1611 */ 1612 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1613 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1614 request_t *cmd_req = 1615 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1616 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1617 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1618 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1619 } 1620 mpt_prt(mpt, 1621 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1622 ccb->ccb_h.status & CAM_STATUS_MASK); 1623 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1624 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1625 } 1626 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1627 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1628 xpt_done(ccb); 1629 CAMLOCK_2_MPTLOCK(mpt); 1630 mpt_free_request(mpt, req); 1631 MPTLOCK_2_CAMLOCK(mpt); 1632 return; 1633 } 1634 1635 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1636 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1637 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 1638 mpt_timeout, ccb); 1639 } 1640 if (mpt->verbose > MPT_PRT_DEBUG) { 1641 int nc = 0; 1642 mpt_print_request(req->req_vbuf); 1643 for (trq = req->chain; trq; trq = trq->chain) { 1644 printf(" Additional Chain Area %d\n", nc++); 1645 mpt_dump_sgl(trq->req_vbuf, 0); 1646 } 1647 } 1648 1649 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1650 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1651 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1652 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1653 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1654 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1655 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1656 } else { 1657 tgt->state = TGT_STATE_MOVING_DATA; 1658 } 1659 #else 1660 tgt->state = TGT_STATE_MOVING_DATA; 1661 #endif 1662 } 1663 CAMLOCK_2_MPTLOCK(mpt); 1664 mpt_send_cmd(mpt, req); 1665 MPTLOCK_2_CAMLOCK(mpt); 1666 } 1667 1668 static void 1669 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1670 { 1671 request_t *req, *trq; 1672 char *mpt_off; 1673 union ccb *ccb; 1674 struct mpt_softc *mpt; 1675 int seg, first_lim; 1676 uint32_t flags, nxt_off; 1677 void *sglp = NULL; 1678 MSG_REQUEST_HEADER *hdrp; 1679 SGE_SIMPLE32 *se; 1680 SGE_CHAIN32 *ce; 1681 int istgt = 0; 1682 1683 req = (request_t *)arg; 1684 ccb = req->ccb; 1685 1686 mpt = ccb->ccb_h.ccb_mpt_ptr; 1687 req = ccb->ccb_h.ccb_req_ptr; 1688 1689 hdrp = req->req_vbuf; 1690 mpt_off = req->req_vbuf; 1691 1692 1693 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1694 error = EFBIG; 1695 } 1696 1697 if (error == 0) { 1698 switch (hdrp->Function) { 1699 case MPI_FUNCTION_SCSI_IO_REQUEST: 1700 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1701 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1702 break; 1703 case MPI_FUNCTION_TARGET_ASSIST: 1704 istgt = 1; 1705 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1706 break; 1707 default: 1708 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1709 hdrp->Function); 1710 error = EINVAL; 1711 break; 1712 } 1713 } 1714 1715 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1716 error = EFBIG; 1717 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1718 nseg, mpt->max_seg_cnt); 1719 } 1720 1721 bad: 1722 if (error != 0) { 1723 if (error != EFBIG && error != ENOMEM) { 1724 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1725 } 1726 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1727 cam_status status; 1728 mpt_freeze_ccb(ccb); 1729 if (error == EFBIG) { 1730 status = CAM_REQ_TOO_BIG; 1731 } else if (error == ENOMEM) { 1732 if (mpt->outofbeer == 0) { 1733 mpt->outofbeer = 1; 1734 xpt_freeze_simq(mpt->sim, 1); 1735 mpt_lprt(mpt, MPT_PRT_DEBUG, 1736 "FREEZEQ\n"); 1737 } 1738 status = CAM_REQUEUE_REQ; 1739 } else { 1740 status = CAM_REQ_CMP_ERR; 1741 } 1742 mpt_set_ccb_status(ccb, status); 1743 } 1744 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1745 request_t *cmd_req = 1746 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1747 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1748 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1749 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1750 } 1751 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1752 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1753 xpt_done(ccb); 1754 CAMLOCK_2_MPTLOCK(mpt); 1755 mpt_free_request(mpt, req); 1756 MPTLOCK_2_CAMLOCK(mpt); 1757 return; 1758 } 1759 1760 /* 1761 * No data to transfer? 1762 * Just make a single simple SGL with zero length. 1763 */ 1764 1765 if (mpt->verbose >= MPT_PRT_DEBUG) { 1766 int tidx = ((char *)sglp) - mpt_off; 1767 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1768 } 1769 1770 if (nseg == 0) { 1771 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1772 MPI_pSGE_SET_FLAGS(se1, 1773 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1774 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1775 se1->FlagsLength = htole32(se1->FlagsLength); 1776 goto out; 1777 } 1778 1779 1780 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1781 if (istgt == 0) { 1782 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1783 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1784 } 1785 } else { 1786 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1787 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1788 } 1789 } 1790 1791 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1792 bus_dmasync_op_t op; 1793 if (istgt) { 1794 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1795 op = BUS_DMASYNC_PREREAD; 1796 } else { 1797 op = BUS_DMASYNC_PREWRITE; 1798 } 1799 } else { 1800 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1801 op = BUS_DMASYNC_PREWRITE; 1802 } else { 1803 op = BUS_DMASYNC_PREREAD; 1804 } 1805 } 1806 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1807 } 1808 1809 /* 1810 * Okay, fill in what we can at the end of the command frame. 1811 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1812 * the command frame. 1813 * 1814 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1815 * SIMPLE32 pointers and start doing CHAIN32 entries after 1816 * that. 1817 */ 1818 1819 if (nseg < MPT_NSGL_FIRST(mpt)) { 1820 first_lim = nseg; 1821 } else { 1822 /* 1823 * Leave room for CHAIN element 1824 */ 1825 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1826 } 1827 1828 se = (SGE_SIMPLE32 *) sglp; 1829 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1830 uint32_t tf; 1831 1832 memset(se, 0,sizeof (*se)); 1833 se->Address = dm_segs->ds_addr; 1834 1835 1836 1837 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1838 tf = flags; 1839 if (seg == first_lim - 1) { 1840 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1841 } 1842 if (seg == nseg - 1) { 1843 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1844 MPI_SGE_FLAGS_END_OF_BUFFER; 1845 } 1846 MPI_pSGE_SET_FLAGS(se, tf); 1847 se->FlagsLength = htole32(se->FlagsLength); 1848 } 1849 1850 if (seg == nseg) { 1851 goto out; 1852 } 1853 1854 /* 1855 * Tell the IOC where to find the first chain element. 1856 */ 1857 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1858 nxt_off = MPT_RQSL(mpt); 1859 trq = req; 1860 1861 /* 1862 * Make up the rest of the data segments out of a chain element 1863 * (contiained in the current request frame) which points to 1864 * SIMPLE32 elements in the next request frame, possibly ending 1865 * with *another* chain element (if there's more). 1866 */ 1867 while (seg < nseg) { 1868 int this_seg_lim; 1869 uint32_t tf, cur_off; 1870 bus_addr_t chain_list_addr; 1871 1872 /* 1873 * Point to the chain descriptor. Note that the chain 1874 * descriptor is at the end of the *previous* list (whether 1875 * chain or simple). 1876 */ 1877 ce = (SGE_CHAIN32 *) se; 1878 1879 /* 1880 * Before we change our current pointer, make sure we won't 1881 * overflow the request area with this frame. Note that we 1882 * test against 'greater than' here as it's okay in this case 1883 * to have next offset be just outside the request area. 1884 */ 1885 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1886 nxt_off = MPT_REQUEST_AREA; 1887 goto next_chain; 1888 } 1889 1890 /* 1891 * Set our SGE element pointer to the beginning of the chain 1892 * list and update our next chain list offset. 1893 */ 1894 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1895 cur_off = nxt_off; 1896 nxt_off += MPT_RQSL(mpt); 1897 1898 /* 1899 * Now initialized the chain descriptor. 1900 */ 1901 memset(ce, 0, sizeof (*ce)); 1902 1903 /* 1904 * Get the physical address of the chain list. 1905 */ 1906 chain_list_addr = trq->req_pbuf; 1907 chain_list_addr += cur_off; 1908 1909 1910 1911 ce->Address = chain_list_addr; 1912 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1913 1914 1915 /* 1916 * If we have more than a frame's worth of segments left, 1917 * set up the chain list to have the last element be another 1918 * chain descriptor. 1919 */ 1920 if ((nseg - seg) > MPT_NSGL(mpt)) { 1921 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1922 /* 1923 * The length of the chain is the length in bytes of the 1924 * number of segments plus the next chain element. 1925 * 1926 * The next chain descriptor offset is the length, 1927 * in words, of the number of segments. 1928 */ 1929 ce->Length = (this_seg_lim - seg) * 1930 sizeof (SGE_SIMPLE32); 1931 ce->NextChainOffset = ce->Length >> 2; 1932 ce->Length += sizeof (SGE_CHAIN32); 1933 } else { 1934 this_seg_lim = nseg; 1935 ce->Length = (this_seg_lim - seg) * 1936 sizeof (SGE_SIMPLE32); 1937 } 1938 1939 /* 1940 * Fill in the chain list SGE elements with our segment data. 1941 * 1942 * If we're the last element in this chain list, set the last 1943 * element flag. If we're the completely last element period, 1944 * set the end of list and end of buffer flags. 1945 */ 1946 while (seg < this_seg_lim) { 1947 memset(se, 0, sizeof (*se)); 1948 se->Address = dm_segs->ds_addr; 1949 1950 1951 1952 1953 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1954 tf = flags; 1955 if (seg == this_seg_lim - 1) { 1956 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1957 } 1958 if (seg == nseg - 1) { 1959 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1960 MPI_SGE_FLAGS_END_OF_BUFFER; 1961 } 1962 MPI_pSGE_SET_FLAGS(se, tf); 1963 se->FlagsLength = htole32(se->FlagsLength); 1964 se++; 1965 seg++; 1966 dm_segs++; 1967 } 1968 1969 next_chain: 1970 /* 1971 * If we have more segments to do and we've used up all of 1972 * the space in a request area, go allocate another one 1973 * and chain to that. 1974 */ 1975 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1976 request_t *nrq; 1977 1978 CAMLOCK_2_MPTLOCK(mpt); 1979 nrq = mpt_get_request(mpt, FALSE); 1980 MPTLOCK_2_CAMLOCK(mpt); 1981 1982 if (nrq == NULL) { 1983 error = ENOMEM; 1984 goto bad; 1985 } 1986 1987 /* 1988 * Append the new request area on the tail of our list. 1989 */ 1990 if ((trq = req->chain) == NULL) { 1991 req->chain = nrq; 1992 } else { 1993 while (trq->chain != NULL) { 1994 trq = trq->chain; 1995 } 1996 trq->chain = nrq; 1997 } 1998 trq = nrq; 1999 mpt_off = trq->req_vbuf; 2000 if (mpt->verbose >= MPT_PRT_DEBUG) { 2001 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 2002 } 2003 nxt_off = 0; 2004 } 2005 } 2006 out: 2007 2008 /* 2009 * Last time we need to check if this CCB needs to be aborted. 2010 */ 2011 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2012 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2013 request_t *cmd_req = 2014 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2015 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2016 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2017 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2018 } 2019 mpt_prt(mpt, 2020 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2021 ccb->ccb_h.status & CAM_STATUS_MASK); 2022 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2023 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2024 } 2025 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2026 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2027 xpt_done(ccb); 2028 CAMLOCK_2_MPTLOCK(mpt); 2029 mpt_free_request(mpt, req); 2030 MPTLOCK_2_CAMLOCK(mpt); 2031 return; 2032 } 2033 2034 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2035 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2036 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 2037 mpt_timeout, ccb); 2038 } 2039 if (mpt->verbose > MPT_PRT_DEBUG) { 2040 int nc = 0; 2041 mpt_print_request(req->req_vbuf); 2042 for (trq = req->chain; trq; trq = trq->chain) { 2043 printf(" Additional Chain Area %d\n", nc++); 2044 mpt_dump_sgl(trq->req_vbuf, 0); 2045 } 2046 } 2047 2048 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2049 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2050 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2051 #ifdef WE_TRUST_AUTO_GOOD_STATUS 2052 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2053 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2054 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2055 } else { 2056 tgt->state = TGT_STATE_MOVING_DATA; 2057 } 2058 #else 2059 tgt->state = TGT_STATE_MOVING_DATA; 2060 #endif 2061 } 2062 CAMLOCK_2_MPTLOCK(mpt); 2063 mpt_send_cmd(mpt, req); 2064 MPTLOCK_2_CAMLOCK(mpt); 2065 } 2066 2067 static void 2068 mpt_start(struct cam_sim *sim, union ccb *ccb) 2069 { 2070 request_t *req; 2071 struct mpt_softc *mpt; 2072 MSG_SCSI_IO_REQUEST *mpt_req; 2073 struct ccb_scsiio *csio = &ccb->csio; 2074 struct ccb_hdr *ccbh = &ccb->ccb_h; 2075 bus_dmamap_callback_t *cb; 2076 target_id_t tgt; 2077 int raid_passthru; 2078 2079 /* Get the pointer for the physical addapter */ 2080 mpt = ccb->ccb_h.ccb_mpt_ptr; 2081 raid_passthru = (sim == mpt->phydisk_sim); 2082 2083 CAMLOCK_2_MPTLOCK(mpt); 2084 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2085 if (mpt->outofbeer == 0) { 2086 mpt->outofbeer = 1; 2087 xpt_freeze_simq(mpt->sim, 1); 2088 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2089 } 2090 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2091 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2092 MPTLOCK_2_CAMLOCK(mpt); 2093 xpt_done(ccb); 2094 return; 2095 } 2096 #ifdef INVARIANTS 2097 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2098 #endif 2099 MPTLOCK_2_CAMLOCK(mpt); 2100 2101 if (sizeof (bus_addr_t) > 4) { 2102 cb = mpt_execute_req_a64; 2103 } else { 2104 cb = mpt_execute_req; 2105 } 2106 2107 /* 2108 * Link the ccb and the request structure so we can find 2109 * the other knowing either the request or the ccb 2110 */ 2111 req->ccb = ccb; 2112 ccb->ccb_h.ccb_req_ptr = req; 2113 2114 /* Now we build the command for the IOC */ 2115 mpt_req = req->req_vbuf; 2116 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2117 2118 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2119 if (raid_passthru) { 2120 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2121 CAMLOCK_2_MPTLOCK(mpt); 2122 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2123 MPTLOCK_2_CAMLOCK(mpt); 2124 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2125 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2126 xpt_done(ccb); 2127 return; 2128 } 2129 MPTLOCK_2_CAMLOCK(mpt); 2130 mpt_req->Bus = 0; /* we never set bus here */ 2131 } else { 2132 tgt = ccb->ccb_h.target_id; 2133 mpt_req->Bus = 0; /* XXX */ 2134 2135 } 2136 mpt_req->SenseBufferLength = 2137 (csio->sense_len < MPT_SENSE_SIZE) ? 2138 csio->sense_len : MPT_SENSE_SIZE; 2139 2140 /* 2141 * We use the message context to find the request structure when we 2142 * Get the command completion interrupt from the IOC. 2143 */ 2144 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2145 2146 /* Which physical device to do the I/O on */ 2147 mpt_req->TargetID = tgt; 2148 2149 /* We assume a single level LUN type */ 2150 if (ccb->ccb_h.target_lun >= 256) { 2151 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 2152 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 2153 } else { 2154 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 2155 } 2156 2157 /* Set the direction of the transfer */ 2158 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2159 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2160 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2161 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2162 } else { 2163 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2164 } 2165 2166 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2167 switch(ccb->csio.tag_action) { 2168 case MSG_HEAD_OF_Q_TAG: 2169 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2170 break; 2171 case MSG_ACA_TASK: 2172 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2173 break; 2174 case MSG_ORDERED_Q_TAG: 2175 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2176 break; 2177 case MSG_SIMPLE_Q_TAG: 2178 default: 2179 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2180 break; 2181 } 2182 } else { 2183 if (mpt->is_fc || mpt->is_sas) { 2184 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2185 } else { 2186 /* XXX No such thing for a target doing packetized. */ 2187 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2188 } 2189 } 2190 2191 if (mpt->is_spi) { 2192 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2193 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2194 } 2195 } 2196 2197 /* Copy the scsi command block into place */ 2198 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2199 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2200 } else { 2201 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2202 } 2203 2204 mpt_req->CDBLength = csio->cdb_len; 2205 mpt_req->DataLength = htole32(csio->dxfer_len); 2206 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2207 2208 /* 2209 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2210 */ 2211 if (mpt->verbose == MPT_PRT_DEBUG) { 2212 U32 df; 2213 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2214 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2215 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2216 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2217 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2218 mpt_prtc(mpt, "(%s %u byte%s ", 2219 (df == MPI_SCSIIO_CONTROL_READ)? 2220 "read" : "write", csio->dxfer_len, 2221 (csio->dxfer_len == 1)? ")" : "s)"); 2222 } 2223 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 2224 ccb->ccb_h.target_lun, req, req->serno); 2225 } 2226 2227 /* 2228 * If we have any data to send with this command map it into bus space. 2229 */ 2230 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2231 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 2232 /* 2233 * We've been given a pointer to a single buffer. 2234 */ 2235 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 2236 /* 2237 * Virtual address that needs to translated into 2238 * one or more physical address ranges. 2239 */ 2240 int error; 2241 int s = splsoftvm(); 2242 error = bus_dmamap_load(mpt->buffer_dmat, 2243 req->dmap, csio->data_ptr, csio->dxfer_len, 2244 cb, req, 0); 2245 splx(s); 2246 if (error == EINPROGRESS) { 2247 /* 2248 * So as to maintain ordering, 2249 * freeze the controller queue 2250 * until our mapping is 2251 * returned. 2252 */ 2253 xpt_freeze_simq(mpt->sim, 1); 2254 ccbh->status |= CAM_RELEASE_SIMQ; 2255 } 2256 } else { 2257 /* 2258 * We have been given a pointer to single 2259 * physical buffer. 2260 */ 2261 struct bus_dma_segment seg; 2262 seg.ds_addr = 2263 (bus_addr_t)(vm_offset_t)csio->data_ptr; 2264 seg.ds_len = csio->dxfer_len; 2265 (*cb)(req, &seg, 1, 0); 2266 } 2267 } else { 2268 /* 2269 * We have been given a list of addresses. 2270 * This case could be easily supported but they are not 2271 * currently generated by the CAM subsystem so there 2272 * is no point in wasting the time right now. 2273 */ 2274 struct bus_dma_segment *segs; 2275 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 2276 (*cb)(req, NULL, 0, EFAULT); 2277 } else { 2278 /* Just use the segments provided */ 2279 segs = (struct bus_dma_segment *)csio->data_ptr; 2280 (*cb)(req, segs, csio->sglist_cnt, 0); 2281 } 2282 } 2283 } else { 2284 (*cb)(req, NULL, 0, 0); 2285 } 2286 } 2287 2288 static int 2289 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2290 int sleep_ok) 2291 { 2292 int error; 2293 uint16_t status; 2294 uint8_t response; 2295 2296 error = mpt_scsi_send_tmf(mpt, 2297 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2298 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2299 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2300 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2301 0, /* XXX How do I get the channel ID? */ 2302 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2303 lun != CAM_LUN_WILDCARD ? lun : 0, 2304 0, sleep_ok); 2305 2306 if (error != 0) { 2307 /* 2308 * mpt_scsi_send_tmf hard resets on failure, so no 2309 * need to do so here. 2310 */ 2311 mpt_prt(mpt, 2312 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2313 return (EIO); 2314 } 2315 2316 /* Wait for bus reset to be processed by the IOC. */ 2317 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2318 REQ_STATE_DONE, sleep_ok, 5000); 2319 2320 status = mpt->tmf_req->IOCStatus; 2321 response = mpt->tmf_req->ResponseCode; 2322 mpt->tmf_req->state = REQ_STATE_FREE; 2323 2324 if (error) { 2325 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2326 "Resetting controller.\n"); 2327 mpt_reset(mpt, TRUE); 2328 return (ETIMEDOUT); 2329 } 2330 2331 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2332 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2333 "Resetting controller.\n", status); 2334 mpt_reset(mpt, TRUE); 2335 return (EIO); 2336 } 2337 2338 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2339 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2340 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2341 "Resetting controller.\n", response); 2342 mpt_reset(mpt, TRUE); 2343 return (EIO); 2344 } 2345 return (0); 2346 } 2347 2348 static int 2349 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2350 { 2351 int r = 0; 2352 request_t *req; 2353 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2354 2355 req = mpt_get_request(mpt, FALSE); 2356 if (req == NULL) { 2357 return (ENOMEM); 2358 } 2359 fc = req->req_vbuf; 2360 memset(fc, 0, sizeof(*fc)); 2361 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2362 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2363 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2364 mpt_send_cmd(mpt, req); 2365 if (dowait) { 2366 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2367 REQ_STATE_DONE, FALSE, 60 * 1000); 2368 if (r == 0) { 2369 mpt_free_request(mpt, req); 2370 } 2371 } 2372 return (r); 2373 } 2374 2375 static int 2376 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2377 MSG_EVENT_NOTIFY_REPLY *msg) 2378 { 2379 uint32_t data0, data1; 2380 2381 data0 = le32toh(msg->Data[0]); 2382 data1 = le32toh(msg->Data[1]); 2383 switch(msg->Event & 0xFF) { 2384 case MPI_EVENT_UNIT_ATTENTION: 2385 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2386 (data0 >> 8) & 0xff, data0 & 0xff); 2387 break; 2388 2389 case MPI_EVENT_IOC_BUS_RESET: 2390 /* We generated a bus reset */ 2391 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2392 (data0 >> 8) & 0xff); 2393 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2394 break; 2395 2396 case MPI_EVENT_EXT_BUS_RESET: 2397 /* Someone else generated a bus reset */ 2398 mpt_prt(mpt, "External Bus Reset Detected\n"); 2399 /* 2400 * These replies don't return EventData like the MPI 2401 * spec says they do 2402 */ 2403 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2404 break; 2405 2406 case MPI_EVENT_RESCAN: 2407 #if __FreeBSD_version >= 600000 2408 { 2409 union ccb *ccb; 2410 uint32_t pathid; 2411 /* 2412 * In general this means a device has been added to the loop. 2413 */ 2414 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2415 if (mpt->ready == 0) { 2416 break; 2417 } 2418 if (mpt->phydisk_sim) { 2419 pathid = cam_sim_path(mpt->phydisk_sim); 2420 } else { 2421 pathid = cam_sim_path(mpt->sim); 2422 } 2423 MPTLOCK_2_CAMLOCK(mpt); 2424 /* 2425 * Allocate a CCB, create a wildcard path for this bus, 2426 * and schedule a rescan. 2427 */ 2428 ccb = xpt_alloc_ccb_nowait(); 2429 if (ccb == NULL) { 2430 mpt_prt(mpt, "unable to alloc CCB for rescan\n"); 2431 CAMLOCK_2_MPTLOCK(mpt); 2432 break; 2433 } 2434 2435 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 2436 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2437 CAMLOCK_2_MPTLOCK(mpt); 2438 mpt_prt(mpt, "unable to create path for rescan\n"); 2439 xpt_free_ccb(ccb); 2440 break; 2441 } 2442 xpt_rescan(ccb); 2443 CAMLOCK_2_MPTLOCK(mpt); 2444 break; 2445 } 2446 #else 2447 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2448 break; 2449 #endif 2450 case MPI_EVENT_LINK_STATUS_CHANGE: 2451 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2452 (data1 >> 8) & 0xff, 2453 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2454 break; 2455 2456 case MPI_EVENT_LOOP_STATE_CHANGE: 2457 switch ((data0 >> 16) & 0xff) { 2458 case 0x01: 2459 mpt_prt(mpt, 2460 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2461 "(Loop Initialization)\n", 2462 (data1 >> 8) & 0xff, 2463 (data0 >> 8) & 0xff, 2464 (data0 ) & 0xff); 2465 switch ((data0 >> 8) & 0xff) { 2466 case 0xF7: 2467 if ((data0 & 0xff) == 0xF7) { 2468 mpt_prt(mpt, "Device needs AL_PA\n"); 2469 } else { 2470 mpt_prt(mpt, "Device %02x doesn't like " 2471 "FC performance\n", 2472 data0 & 0xFF); 2473 } 2474 break; 2475 case 0xF8: 2476 if ((data0 & 0xff) == 0xF7) { 2477 mpt_prt(mpt, "Device had loop failure " 2478 "at its receiver prior to acquiring" 2479 " AL_PA\n"); 2480 } else { 2481 mpt_prt(mpt, "Device %02x detected loop" 2482 " failure at its receiver\n", 2483 data0 & 0xFF); 2484 } 2485 break; 2486 default: 2487 mpt_prt(mpt, "Device %02x requests that device " 2488 "%02x reset itself\n", 2489 data0 & 0xFF, 2490 (data0 >> 8) & 0xFF); 2491 break; 2492 } 2493 break; 2494 case 0x02: 2495 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2496 "LPE(%02x,%02x) (Loop Port Enable)\n", 2497 (data1 >> 8) & 0xff, /* Port */ 2498 (data0 >> 8) & 0xff, /* Character 3 */ 2499 (data0 ) & 0xff /* Character 4 */); 2500 break; 2501 case 0x03: 2502 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2503 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2504 (data1 >> 8) & 0xff, /* Port */ 2505 (data0 >> 8) & 0xff, /* Character 3 */ 2506 (data0 ) & 0xff /* Character 4 */); 2507 break; 2508 default: 2509 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2510 "FC event (%02x %02x %02x)\n", 2511 (data1 >> 8) & 0xff, /* Port */ 2512 (data0 >> 16) & 0xff, /* Event */ 2513 (data0 >> 8) & 0xff, /* Character 3 */ 2514 (data0 ) & 0xff /* Character 4 */); 2515 } 2516 break; 2517 2518 case MPI_EVENT_LOGOUT: 2519 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2520 (data1 >> 8) & 0xff, data0); 2521 break; 2522 case MPI_EVENT_QUEUE_FULL: 2523 { 2524 struct cam_sim *sim; 2525 struct cam_path *tmppath; 2526 struct ccb_relsim crs; 2527 PTR_EVENT_DATA_QUEUE_FULL pqf = 2528 (PTR_EVENT_DATA_QUEUE_FULL) msg->Data; 2529 lun_id_t lun_id; 2530 2531 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2532 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2533 if (mpt->phydisk_sim) { 2534 sim = mpt->phydisk_sim; 2535 } else { 2536 sim = mpt->sim; 2537 } 2538 MPTLOCK_2_CAMLOCK(mpt); 2539 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2540 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2541 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2542 mpt_prt(mpt, "unable to create a path to send " 2543 "XPT_REL_SIMQ"); 2544 CAMLOCK_2_MPTLOCK(mpt); 2545 break; 2546 } 2547 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2548 crs.ccb_h.func_code = XPT_REL_SIMQ; 2549 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2550 crs.openings = pqf->CurrentDepth - 1; 2551 xpt_action((union ccb *)&crs); 2552 if (crs.ccb_h.status != CAM_REQ_CMP) { 2553 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2554 } 2555 xpt_free_path(tmppath); 2556 } 2557 CAMLOCK_2_MPTLOCK(mpt); 2558 break; 2559 } 2560 case MPI_EVENT_EVENT_CHANGE: 2561 case MPI_EVENT_INTEGRATED_RAID: 2562 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2563 case MPI_EVENT_SAS_SES: 2564 break; 2565 default: 2566 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2567 msg->Event & 0xFF); 2568 return (0); 2569 } 2570 return (1); 2571 } 2572 2573 /* 2574 * Reply path for all SCSI I/O requests, called from our 2575 * interrupt handler by extracting our handler index from 2576 * the MsgContext field of the reply from the IOC. 2577 * 2578 * This routine is optimized for the common case of a 2579 * completion without error. All exception handling is 2580 * offloaded to non-inlined helper routines to minimize 2581 * cache footprint. 2582 */ 2583 static int 2584 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2585 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2586 { 2587 MSG_SCSI_IO_REQUEST *scsi_req; 2588 union ccb *ccb; 2589 target_id_t tgt; 2590 2591 if (req->state == REQ_STATE_FREE) { 2592 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2593 return (TRUE); 2594 } 2595 2596 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2597 ccb = req->ccb; 2598 if (ccb == NULL) { 2599 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2600 req, req->serno); 2601 return (TRUE); 2602 } 2603 2604 tgt = scsi_req->TargetID; 2605 mpt_req_untimeout(req, mpt_timeout, ccb); 2606 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2607 2608 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2609 bus_dmasync_op_t op; 2610 2611 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2612 op = BUS_DMASYNC_POSTREAD; 2613 else 2614 op = BUS_DMASYNC_POSTWRITE; 2615 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2616 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2617 } 2618 2619 if (reply_frame == NULL) { 2620 /* 2621 * Context only reply, completion without error status. 2622 */ 2623 ccb->csio.resid = 0; 2624 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2625 ccb->csio.scsi_status = SCSI_STATUS_OK; 2626 } else { 2627 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2628 } 2629 2630 if (mpt->outofbeer) { 2631 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2632 mpt->outofbeer = 0; 2633 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2634 } 2635 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2636 struct scsi_inquiry_data *iq = 2637 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2638 if (scsi_req->Function == 2639 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2640 /* 2641 * Fake out the device type so that only the 2642 * pass-thru device will attach. 2643 */ 2644 iq->device &= ~0x1F; 2645 iq->device |= T_NODEVICE; 2646 } 2647 } 2648 if (mpt->verbose == MPT_PRT_DEBUG) { 2649 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2650 req, req->serno); 2651 } 2652 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2653 MPTLOCK_2_CAMLOCK(mpt); 2654 xpt_done(ccb); 2655 CAMLOCK_2_MPTLOCK(mpt); 2656 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2657 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2658 } else { 2659 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2660 req, req->serno); 2661 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2662 } 2663 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2664 ("CCB req needed wakeup")); 2665 #ifdef INVARIANTS 2666 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2667 #endif 2668 mpt_free_request(mpt, req); 2669 return (TRUE); 2670 } 2671 2672 static int 2673 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2674 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2675 { 2676 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2677 2678 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2679 #ifdef INVARIANTS 2680 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2681 #endif 2682 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2683 /* Record IOC Status and Response Code of TMF for any waiters. */ 2684 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2685 req->ResponseCode = tmf_reply->ResponseCode; 2686 2687 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2688 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2689 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2690 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2691 req->state |= REQ_STATE_DONE; 2692 wakeup(req); 2693 } else { 2694 mpt->tmf_req->state = REQ_STATE_FREE; 2695 } 2696 return (TRUE); 2697 } 2698 2699 /* 2700 * XXX: Move to definitions file 2701 */ 2702 #define ELS 0x22 2703 #define FC4LS 0x32 2704 #define ABTS 0x81 2705 #define BA_ACC 0x84 2706 2707 #define LS_RJT 0x01 2708 #define LS_ACC 0x02 2709 #define PLOGI 0x03 2710 #define LOGO 0x05 2711 #define SRR 0x14 2712 #define PRLI 0x20 2713 #define PRLO 0x21 2714 #define ADISC 0x52 2715 #define RSCN 0x61 2716 2717 static void 2718 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2719 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2720 { 2721 uint32_t fl; 2722 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2723 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2724 2725 /* 2726 * We are going to reuse the ELS request to send this response back. 2727 */ 2728 rsp = &tmp; 2729 memset(rsp, 0, sizeof(*rsp)); 2730 2731 #ifdef USE_IMMEDIATE_LINK_DATA 2732 /* 2733 * Apparently the IMMEDIATE stuff doesn't seem to work. 2734 */ 2735 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2736 #endif 2737 rsp->RspLength = length; 2738 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2739 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2740 2741 /* 2742 * Copy over information from the original reply frame to 2743 * it's correct place in the response. 2744 */ 2745 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2746 2747 /* 2748 * And now copy back the temporary area to the original frame. 2749 */ 2750 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2751 rsp = req->req_vbuf; 2752 2753 #ifdef USE_IMMEDIATE_LINK_DATA 2754 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2755 #else 2756 { 2757 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2758 bus_addr_t paddr = req->req_pbuf; 2759 paddr += MPT_RQSL(mpt); 2760 2761 fl = 2762 MPI_SGE_FLAGS_HOST_TO_IOC | 2763 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2764 MPI_SGE_FLAGS_LAST_ELEMENT | 2765 MPI_SGE_FLAGS_END_OF_LIST | 2766 MPI_SGE_FLAGS_END_OF_BUFFER; 2767 fl <<= MPI_SGE_FLAGS_SHIFT; 2768 fl |= (length); 2769 se->FlagsLength = htole32(fl); 2770 se->Address = htole32((uint32_t) paddr); 2771 } 2772 #endif 2773 2774 /* 2775 * Send it on... 2776 */ 2777 mpt_send_cmd(mpt, req); 2778 } 2779 2780 static int 2781 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2782 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2783 { 2784 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2785 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2786 U8 rctl; 2787 U8 type; 2788 U8 cmd; 2789 U16 status = le16toh(reply_frame->IOCStatus); 2790 U32 *elsbuf; 2791 int ioindex; 2792 int do_refresh = TRUE; 2793 2794 #ifdef INVARIANTS 2795 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2796 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2797 req, req->serno, rp->Function)); 2798 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2799 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2800 } else { 2801 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2802 } 2803 #endif 2804 mpt_lprt(mpt, MPT_PRT_DEBUG, 2805 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2806 req, req->serno, reply_frame, reply_frame->Function); 2807 2808 if (status != MPI_IOCSTATUS_SUCCESS) { 2809 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2810 status, reply_frame->Function); 2811 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2812 /* 2813 * XXX: to get around shutdown issue 2814 */ 2815 mpt->disabled = 1; 2816 return (TRUE); 2817 } 2818 return (TRUE); 2819 } 2820 2821 /* 2822 * If the function of a link service response, we recycle the 2823 * response to be a refresh for a new link service request. 2824 * 2825 * The request pointer is bogus in this case and we have to fetch 2826 * it based upon the TransactionContext. 2827 */ 2828 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2829 /* Freddie Uncle Charlie Katie */ 2830 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2831 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2832 if (mpt->els_cmd_ptrs[ioindex] == req) { 2833 break; 2834 } 2835 2836 KASSERT(ioindex < mpt->els_cmds_allocated, 2837 ("can't find my mommie!")); 2838 2839 /* remove from active list as we're going to re-post it */ 2840 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2841 req->state &= ~REQ_STATE_QUEUED; 2842 req->state |= REQ_STATE_DONE; 2843 mpt_fc_post_els(mpt, req, ioindex); 2844 return (TRUE); 2845 } 2846 2847 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2848 /* remove from active list as we're done */ 2849 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2850 req->state &= ~REQ_STATE_QUEUED; 2851 req->state |= REQ_STATE_DONE; 2852 if (req->state & REQ_STATE_TIMEDOUT) { 2853 mpt_lprt(mpt, MPT_PRT_DEBUG, 2854 "Sync Primitive Send Completed After Timeout\n"); 2855 mpt_free_request(mpt, req); 2856 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2857 mpt_lprt(mpt, MPT_PRT_DEBUG, 2858 "Async Primitive Send Complete\n"); 2859 mpt_free_request(mpt, req); 2860 } else { 2861 mpt_lprt(mpt, MPT_PRT_DEBUG, 2862 "Sync Primitive Send Complete- Waking Waiter\n"); 2863 wakeup(req); 2864 } 2865 return (TRUE); 2866 } 2867 2868 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2869 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2870 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2871 rp->MsgLength, rp->MsgFlags); 2872 return (TRUE); 2873 } 2874 2875 if (rp->MsgLength <= 5) { 2876 /* 2877 * This is just a ack of an original ELS buffer post 2878 */ 2879 mpt_lprt(mpt, MPT_PRT_DEBUG, 2880 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2881 return (TRUE); 2882 } 2883 2884 2885 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2886 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2887 2888 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2889 cmd = be32toh(elsbuf[0]) >> 24; 2890 2891 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2892 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2893 return (TRUE); 2894 } 2895 2896 ioindex = le32toh(rp->TransactionContext); 2897 req = mpt->els_cmd_ptrs[ioindex]; 2898 2899 if (rctl == ELS && type == 1) { 2900 switch (cmd) { 2901 case PRLI: 2902 /* 2903 * Send back a PRLI ACC 2904 */ 2905 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2906 le32toh(rp->Wwn.PortNameHigh), 2907 le32toh(rp->Wwn.PortNameLow)); 2908 elsbuf[0] = htobe32(0x02100014); 2909 elsbuf[1] |= htobe32(0x00000100); 2910 elsbuf[4] = htobe32(0x00000002); 2911 if (mpt->role & MPT_ROLE_TARGET) 2912 elsbuf[4] |= htobe32(0x00000010); 2913 if (mpt->role & MPT_ROLE_INITIATOR) 2914 elsbuf[4] |= htobe32(0x00000020); 2915 /* remove from active list as we're done */ 2916 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2917 req->state &= ~REQ_STATE_QUEUED; 2918 req->state |= REQ_STATE_DONE; 2919 mpt_fc_els_send_response(mpt, req, rp, 20); 2920 do_refresh = FALSE; 2921 break; 2922 case PRLO: 2923 memset(elsbuf, 0, 5 * (sizeof (U32))); 2924 elsbuf[0] = htobe32(0x02100014); 2925 elsbuf[1] = htobe32(0x08000100); 2926 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2927 le32toh(rp->Wwn.PortNameHigh), 2928 le32toh(rp->Wwn.PortNameLow)); 2929 /* remove from active list as we're done */ 2930 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2931 req->state &= ~REQ_STATE_QUEUED; 2932 req->state |= REQ_STATE_DONE; 2933 mpt_fc_els_send_response(mpt, req, rp, 20); 2934 do_refresh = FALSE; 2935 break; 2936 default: 2937 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2938 break; 2939 } 2940 } else if (rctl == ABTS && type == 0) { 2941 uint16_t rx_id = le16toh(rp->Rxid); 2942 uint16_t ox_id = le16toh(rp->Oxid); 2943 request_t *tgt_req = NULL; 2944 2945 mpt_prt(mpt, 2946 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2947 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2948 le32toh(rp->Wwn.PortNameLow)); 2949 if (rx_id >= mpt->mpt_max_tgtcmds) { 2950 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2951 } else if (mpt->tgt_cmd_ptrs == NULL) { 2952 mpt_prt(mpt, "No TGT CMD PTRS\n"); 2953 } else { 2954 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 2955 } 2956 if (tgt_req) { 2957 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 2958 uint8_t *vbuf; 2959 union ccb *ccb = tgt->ccb; 2960 uint32_t ct_id; 2961 2962 vbuf = tgt_req->req_vbuf; 2963 vbuf += MPT_RQSL(mpt); 2964 2965 /* 2966 * Check to make sure we have the correct command 2967 * The reply descriptor in the target state should 2968 * should contain an IoIndex that should match the 2969 * RX_ID. 2970 * 2971 * It'd be nice to have OX_ID to crosscheck with 2972 * as well. 2973 */ 2974 ct_id = GET_IO_INDEX(tgt->reply_desc); 2975 2976 if (ct_id != rx_id) { 2977 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2978 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 2979 rx_id, ct_id); 2980 goto skip; 2981 } 2982 2983 ccb = tgt->ccb; 2984 if (ccb) { 2985 mpt_prt(mpt, 2986 "CCB (%p): lun %u flags %x status %x\n", 2987 ccb, ccb->ccb_h.target_lun, 2988 ccb->ccb_h.flags, ccb->ccb_h.status); 2989 } 2990 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 2991 "%x nxfers %x\n", tgt->state, 2992 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 2993 tgt->nxfers); 2994 skip: 2995 if (mpt_abort_target_cmd(mpt, tgt_req)) { 2996 mpt_prt(mpt, "unable to start TargetAbort\n"); 2997 } 2998 } else { 2999 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 3000 } 3001 memset(elsbuf, 0, 5 * (sizeof (U32))); 3002 elsbuf[0] = htobe32(0); 3003 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 3004 elsbuf[2] = htobe32(0x000ffff); 3005 /* 3006 * Dork with the reply frame so that the reponse to it 3007 * will be correct. 3008 */ 3009 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 3010 /* remove from active list as we're done */ 3011 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3012 req->state &= ~REQ_STATE_QUEUED; 3013 req->state |= REQ_STATE_DONE; 3014 mpt_fc_els_send_response(mpt, req, rp, 12); 3015 do_refresh = FALSE; 3016 } else { 3017 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 3018 } 3019 if (do_refresh == TRUE) { 3020 /* remove from active list as we're done */ 3021 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3022 req->state &= ~REQ_STATE_QUEUED; 3023 req->state |= REQ_STATE_DONE; 3024 mpt_fc_post_els(mpt, req, ioindex); 3025 } 3026 return (TRUE); 3027 } 3028 3029 /* 3030 * Clean up all SCSI Initiator personality state in response 3031 * to a controller reset. 3032 */ 3033 static void 3034 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3035 { 3036 /* 3037 * The pending list is already run down by 3038 * the generic handler. Perform the same 3039 * operation on the timed out request list. 3040 */ 3041 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3042 MPI_IOCSTATUS_INVALID_STATE); 3043 3044 /* 3045 * XXX: We need to repost ELS and Target Command Buffers? 3046 */ 3047 3048 /* 3049 * Inform the XPT that a bus reset has occurred. 3050 */ 3051 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3052 } 3053 3054 /* 3055 * Parse additional completion information in the reply 3056 * frame for SCSI I/O requests. 3057 */ 3058 static int 3059 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3060 MSG_DEFAULT_REPLY *reply_frame) 3061 { 3062 union ccb *ccb; 3063 MSG_SCSI_IO_REPLY *scsi_io_reply; 3064 u_int ioc_status; 3065 u_int sstate; 3066 u_int loginfo; 3067 3068 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3069 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3070 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3071 ("MPT SCSI I/O Handler called with incorrect reply type")); 3072 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3073 ("MPT SCSI I/O Handler called with continuation reply")); 3074 3075 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3076 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3077 loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE; 3078 ioc_status &= MPI_IOCSTATUS_MASK; 3079 sstate = scsi_io_reply->SCSIState; 3080 3081 ccb = req->ccb; 3082 ccb->csio.resid = 3083 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3084 3085 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3086 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3087 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3088 ccb->csio.sense_resid = 3089 ccb->csio.sense_len - scsi_io_reply->SenseCount; 3090 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3091 min(ccb->csio.sense_len, scsi_io_reply->SenseCount)); 3092 } 3093 3094 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3095 /* 3096 * Tag messages rejected, but non-tagged retry 3097 * was successful. 3098 XXXX 3099 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3100 */ 3101 } 3102 3103 switch(ioc_status) { 3104 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3105 /* 3106 * XXX 3107 * Linux driver indicates that a zero 3108 * transfer length with this error code 3109 * indicates a CRC error. 3110 * 3111 * No need to swap the bytes for checking 3112 * against zero. 3113 */ 3114 if (scsi_io_reply->TransferCount == 0) { 3115 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3116 break; 3117 } 3118 /* FALLTHROUGH */ 3119 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3120 case MPI_IOCSTATUS_SUCCESS: 3121 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3122 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3123 /* 3124 * Status was never returned for this transaction. 3125 */ 3126 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3127 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3128 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3129 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3130 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3131 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3132 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3133 3134 /* XXX Handle SPI-Packet and FCP-2 reponse info. */ 3135 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3136 } else 3137 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3138 break; 3139 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3140 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3141 break; 3142 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3143 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3144 break; 3145 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3146 /* 3147 * Since selection timeouts and "device really not 3148 * there" are grouped into this error code, report 3149 * selection timeout. Selection timeouts are 3150 * typically retried before giving up on the device 3151 * whereas "device not there" errors are considered 3152 * unretryable. 3153 */ 3154 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3155 break; 3156 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3157 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3158 break; 3159 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3160 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3161 break; 3162 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3163 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3164 break; 3165 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3166 ccb->ccb_h.status = CAM_UA_TERMIO; 3167 break; 3168 case MPI_IOCSTATUS_INVALID_STATE: 3169 /* 3170 * The IOC has been reset. Emulate a bus reset. 3171 */ 3172 /* FALLTHROUGH */ 3173 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3174 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3175 break; 3176 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3177 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3178 /* 3179 * Don't clobber any timeout status that has 3180 * already been set for this transaction. We 3181 * want the SCSI layer to be able to differentiate 3182 * between the command we aborted due to timeout 3183 * and any innocent bystanders. 3184 */ 3185 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3186 break; 3187 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3188 break; 3189 3190 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3191 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3192 break; 3193 case MPI_IOCSTATUS_BUSY: 3194 mpt_set_ccb_status(ccb, CAM_BUSY); 3195 break; 3196 case MPI_IOCSTATUS_INVALID_FUNCTION: 3197 case MPI_IOCSTATUS_INVALID_SGL: 3198 case MPI_IOCSTATUS_INTERNAL_ERROR: 3199 case MPI_IOCSTATUS_INVALID_FIELD: 3200 default: 3201 /* XXX 3202 * Some of the above may need to kick 3203 * of a recovery action!!!! 3204 */ 3205 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3206 break; 3207 } 3208 3209 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3210 mpt_freeze_ccb(ccb); 3211 } 3212 3213 return (TRUE); 3214 } 3215 3216 static void 3217 mpt_action(struct cam_sim *sim, union ccb *ccb) 3218 { 3219 struct mpt_softc *mpt; 3220 struct ccb_trans_settings *cts; 3221 target_id_t tgt; 3222 lun_id_t lun; 3223 int raid_passthru; 3224 3225 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3226 3227 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3228 raid_passthru = (sim == mpt->phydisk_sim); 3229 MPT_LOCK_ASSERT(mpt); 3230 3231 tgt = ccb->ccb_h.target_id; 3232 lun = ccb->ccb_h.target_lun; 3233 if (raid_passthru && 3234 ccb->ccb_h.func_code != XPT_PATH_INQ && 3235 ccb->ccb_h.func_code != XPT_RESET_BUS && 3236 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3237 CAMLOCK_2_MPTLOCK(mpt); 3238 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3239 MPTLOCK_2_CAMLOCK(mpt); 3240 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3241 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3242 xpt_done(ccb); 3243 return; 3244 } 3245 MPTLOCK_2_CAMLOCK(mpt); 3246 } 3247 ccb->ccb_h.ccb_mpt_ptr = mpt; 3248 3249 switch (ccb->ccb_h.func_code) { 3250 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3251 /* 3252 * Do a couple of preliminary checks... 3253 */ 3254 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3255 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3256 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3257 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3258 break; 3259 } 3260 } 3261 /* Max supported CDB length is 16 bytes */ 3262 /* XXX Unless we implement the new 32byte message type */ 3263 if (ccb->csio.cdb_len > 3264 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3265 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3266 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3267 break; 3268 } 3269 #ifdef MPT_TEST_MULTIPATH 3270 if (mpt->failure_id == ccb->ccb_h.target_id) { 3271 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3272 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3273 break; 3274 } 3275 #endif 3276 ccb->csio.scsi_status = SCSI_STATUS_OK; 3277 mpt_start(sim, ccb); 3278 return; 3279 3280 case XPT_RESET_BUS: 3281 if (raid_passthru) { 3282 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3283 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3284 break; 3285 } 3286 case XPT_RESET_DEV: 3287 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3288 if (bootverbose) { 3289 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3290 } 3291 } else { 3292 xpt_print(ccb->ccb_h.path, "reset device\n"); 3293 } 3294 CAMLOCK_2_MPTLOCK(mpt); 3295 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3296 MPTLOCK_2_CAMLOCK(mpt); 3297 3298 /* 3299 * mpt_bus_reset is always successful in that it 3300 * will fall back to a hard reset should a bus 3301 * reset attempt fail. 3302 */ 3303 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3304 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3305 break; 3306 3307 case XPT_ABORT: 3308 { 3309 union ccb *accb = ccb->cab.abort_ccb; 3310 CAMLOCK_2_MPTLOCK(mpt); 3311 switch (accb->ccb_h.func_code) { 3312 case XPT_ACCEPT_TARGET_IO: 3313 case XPT_IMMED_NOTIFY: 3314 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3315 break; 3316 case XPT_CONT_TARGET_IO: 3317 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3318 ccb->ccb_h.status = CAM_UA_ABORT; 3319 break; 3320 case XPT_SCSI_IO: 3321 ccb->ccb_h.status = CAM_UA_ABORT; 3322 break; 3323 default: 3324 ccb->ccb_h.status = CAM_REQ_INVALID; 3325 break; 3326 } 3327 MPTLOCK_2_CAMLOCK(mpt); 3328 break; 3329 } 3330 3331 #ifdef CAM_NEW_TRAN_CODE 3332 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3333 #else 3334 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) 3335 #endif 3336 #define DP_DISC_ENABLE 0x1 3337 #define DP_DISC_DISABL 0x2 3338 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3339 3340 #define DP_TQING_ENABLE 0x4 3341 #define DP_TQING_DISABL 0x8 3342 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3343 3344 #define DP_WIDE 0x10 3345 #define DP_NARROW 0x20 3346 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3347 3348 #define DP_SYNC 0x40 3349 3350 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3351 { 3352 #ifdef CAM_NEW_TRAN_CODE 3353 struct ccb_trans_settings_scsi *scsi; 3354 struct ccb_trans_settings_spi *spi; 3355 #endif 3356 uint8_t dval; 3357 u_int period; 3358 u_int offset; 3359 int i, j; 3360 3361 cts = &ccb->cts; 3362 3363 if (mpt->is_fc || mpt->is_sas) { 3364 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3365 break; 3366 } 3367 3368 #ifdef CAM_NEW_TRAN_CODE 3369 scsi = &cts->proto_specific.scsi; 3370 spi = &cts->xport_specific.spi; 3371 3372 /* 3373 * We can be called just to valid transport and proto versions 3374 */ 3375 if (scsi->valid == 0 && spi->valid == 0) { 3376 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3377 break; 3378 } 3379 #endif 3380 3381 /* 3382 * Skip attempting settings on RAID volume disks. 3383 * Other devices on the bus get the normal treatment. 3384 */ 3385 if (mpt->phydisk_sim && raid_passthru == 0 && 3386 mpt_is_raid_volume(mpt, tgt) != 0) { 3387 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3388 "no transfer settings for RAID vols\n"); 3389 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3390 break; 3391 } 3392 3393 i = mpt->mpt_port_page2.PortSettings & 3394 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3395 j = mpt->mpt_port_page2.PortFlags & 3396 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3397 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3398 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3399 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3400 "honoring BIOS transfer negotiations\n"); 3401 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3402 break; 3403 } 3404 3405 dval = 0; 3406 period = 0; 3407 offset = 0; 3408 3409 #ifndef CAM_NEW_TRAN_CODE 3410 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 3411 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? 3412 DP_DISC_ENABLE : DP_DISC_DISABL; 3413 } 3414 3415 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 3416 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? 3417 DP_TQING_ENABLE : DP_TQING_DISABL; 3418 } 3419 3420 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 3421 dval |= cts->bus_width ? DP_WIDE : DP_NARROW; 3422 } 3423 3424 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 3425 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { 3426 dval |= DP_SYNC; 3427 period = cts->sync_period; 3428 offset = cts->sync_offset; 3429 } 3430 #else 3431 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3432 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3433 DP_DISC_ENABLE : DP_DISC_DISABL; 3434 } 3435 3436 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3437 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3438 DP_TQING_ENABLE : DP_TQING_DISABL; 3439 } 3440 3441 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3442 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3443 DP_WIDE : DP_NARROW; 3444 } 3445 3446 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3447 dval |= DP_SYNC; 3448 offset = spi->sync_offset; 3449 } else { 3450 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3451 &mpt->mpt_dev_page1[tgt]; 3452 offset = ptr->RequestedParameters; 3453 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3454 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3455 } 3456 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3457 dval |= DP_SYNC; 3458 period = spi->sync_period; 3459 } else { 3460 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3461 &mpt->mpt_dev_page1[tgt]; 3462 period = ptr->RequestedParameters; 3463 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3464 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3465 } 3466 #endif 3467 CAMLOCK_2_MPTLOCK(mpt); 3468 if (dval & DP_DISC_ENABLE) { 3469 mpt->mpt_disc_enable |= (1 << tgt); 3470 } else if (dval & DP_DISC_DISABL) { 3471 mpt->mpt_disc_enable &= ~(1 << tgt); 3472 } 3473 if (dval & DP_TQING_ENABLE) { 3474 mpt->mpt_tag_enable |= (1 << tgt); 3475 } else if (dval & DP_TQING_DISABL) { 3476 mpt->mpt_tag_enable &= ~(1 << tgt); 3477 } 3478 if (dval & DP_WIDTH) { 3479 mpt_setwidth(mpt, tgt, 1); 3480 } 3481 if (dval & DP_SYNC) { 3482 mpt_setsync(mpt, tgt, period, offset); 3483 } 3484 if (dval == 0) { 3485 MPTLOCK_2_CAMLOCK(mpt); 3486 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3487 break; 3488 } 3489 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3490 "set [%d]: 0x%x period 0x%x offset %d\n", 3491 tgt, dval, period, offset); 3492 if (mpt_update_spi_config(mpt, tgt)) { 3493 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3494 } else { 3495 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3496 } 3497 MPTLOCK_2_CAMLOCK(mpt); 3498 break; 3499 } 3500 case XPT_GET_TRAN_SETTINGS: 3501 { 3502 #ifdef CAM_NEW_TRAN_CODE 3503 struct ccb_trans_settings_scsi *scsi; 3504 cts = &ccb->cts; 3505 cts->protocol = PROTO_SCSI; 3506 if (mpt->is_fc) { 3507 struct ccb_trans_settings_fc *fc = 3508 &cts->xport_specific.fc; 3509 cts->protocol_version = SCSI_REV_SPC; 3510 cts->transport = XPORT_FC; 3511 cts->transport_version = 0; 3512 fc->valid = CTS_FC_VALID_SPEED; 3513 fc->bitrate = 100000; 3514 } else if (mpt->is_sas) { 3515 struct ccb_trans_settings_sas *sas = 3516 &cts->xport_specific.sas; 3517 cts->protocol_version = SCSI_REV_SPC2; 3518 cts->transport = XPORT_SAS; 3519 cts->transport_version = 0; 3520 sas->valid = CTS_SAS_VALID_SPEED; 3521 sas->bitrate = 300000; 3522 } else { 3523 cts->protocol_version = SCSI_REV_2; 3524 cts->transport = XPORT_SPI; 3525 cts->transport_version = 2; 3526 if (mpt_get_spi_settings(mpt, cts) != 0) { 3527 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3528 break; 3529 } 3530 } 3531 scsi = &cts->proto_specific.scsi; 3532 scsi->valid = CTS_SCSI_VALID_TQ; 3533 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3534 #else 3535 cts = &ccb->cts; 3536 if (mpt->is_fc) { 3537 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3538 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3539 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3540 } else if (mpt->is_sas) { 3541 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3542 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3543 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3544 } else if (mpt_get_spi_settings(mpt, cts) != 0) { 3545 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3546 break; 3547 } 3548 #endif 3549 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3550 break; 3551 } 3552 case XPT_CALC_GEOMETRY: 3553 { 3554 struct ccb_calc_geometry *ccg; 3555 3556 ccg = &ccb->ccg; 3557 if (ccg->block_size == 0) { 3558 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3559 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3560 break; 3561 } 3562 mpt_calc_geometry(ccg, /*extended*/1); 3563 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 3564 break; 3565 } 3566 case XPT_PATH_INQ: /* Path routing inquiry */ 3567 { 3568 struct ccb_pathinq *cpi = &ccb->cpi; 3569 3570 cpi->version_num = 1; 3571 cpi->target_sprt = 0; 3572 cpi->hba_eng_cnt = 0; 3573 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3574 /* 3575 * FC cards report MAX_DEVICES of 512, but 3576 * the MSG_SCSI_IO_REQUEST target id field 3577 * is only 8 bits. Until we fix the driver 3578 * to support 'channels' for bus overflow, 3579 * just limit it. 3580 */ 3581 if (cpi->max_target > 255) { 3582 cpi->max_target = 255; 3583 } 3584 3585 /* 3586 * VMware ESX reports > 16 devices and then dies when we probe. 3587 */ 3588 if (mpt->is_spi && cpi->max_target > 15) { 3589 cpi->max_target = 15; 3590 } 3591 cpi->max_lun = 7; 3592 cpi->initiator_id = mpt->mpt_ini_id; 3593 cpi->bus_id = cam_sim_bus(sim); 3594 3595 /* 3596 * The base speed is the speed of the underlying connection. 3597 */ 3598 #ifdef CAM_NEW_TRAN_CODE 3599 cpi->protocol = PROTO_SCSI; 3600 if (mpt->is_fc) { 3601 cpi->hba_misc = PIM_NOBUSRESET; 3602 cpi->base_transfer_speed = 100000; 3603 cpi->hba_inquiry = PI_TAG_ABLE; 3604 cpi->transport = XPORT_FC; 3605 cpi->transport_version = 0; 3606 cpi->protocol_version = SCSI_REV_SPC; 3607 } else if (mpt->is_sas) { 3608 cpi->hba_misc = PIM_NOBUSRESET; 3609 cpi->base_transfer_speed = 300000; 3610 cpi->hba_inquiry = PI_TAG_ABLE; 3611 cpi->transport = XPORT_SAS; 3612 cpi->transport_version = 0; 3613 cpi->protocol_version = SCSI_REV_SPC2; 3614 } else { 3615 cpi->hba_misc = PIM_SEQSCAN; 3616 cpi->base_transfer_speed = 3300; 3617 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3618 cpi->transport = XPORT_SPI; 3619 cpi->transport_version = 2; 3620 cpi->protocol_version = SCSI_REV_2; 3621 } 3622 #else 3623 if (mpt->is_fc) { 3624 cpi->hba_misc = PIM_NOBUSRESET; 3625 cpi->base_transfer_speed = 100000; 3626 cpi->hba_inquiry = PI_TAG_ABLE; 3627 } else if (mpt->is_sas) { 3628 cpi->hba_misc = PIM_NOBUSRESET; 3629 cpi->base_transfer_speed = 300000; 3630 cpi->hba_inquiry = PI_TAG_ABLE; 3631 } else { 3632 cpi->hba_misc = PIM_SEQSCAN; 3633 cpi->base_transfer_speed = 3300; 3634 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3635 } 3636 #endif 3637 3638 /* 3639 * We give our fake RAID passhtru bus a width that is MaxVolumes 3640 * wide and restrict it to one lun. 3641 */ 3642 if (raid_passthru) { 3643 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3644 cpi->initiator_id = cpi->max_target + 1; 3645 cpi->max_lun = 0; 3646 } 3647 3648 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3649 cpi->hba_misc |= PIM_NOINITIATOR; 3650 } 3651 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3652 cpi->target_sprt = 3653 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3654 } else { 3655 cpi->target_sprt = 0; 3656 } 3657 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3658 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3659 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3660 cpi->unit_number = cam_sim_unit(sim); 3661 cpi->ccb_h.status = CAM_REQ_CMP; 3662 break; 3663 } 3664 case XPT_EN_LUN: /* Enable LUN as a target */ 3665 { 3666 int result; 3667 3668 CAMLOCK_2_MPTLOCK(mpt); 3669 if (ccb->cel.enable) 3670 result = mpt_enable_lun(mpt, 3671 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3672 else 3673 result = mpt_disable_lun(mpt, 3674 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3675 MPTLOCK_2_CAMLOCK(mpt); 3676 if (result == 0) { 3677 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3678 } else { 3679 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3680 } 3681 break; 3682 } 3683 case XPT_NOTIFY_ACK: /* recycle notify ack */ 3684 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 3685 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3686 { 3687 tgt_resource_t *trtp; 3688 lun_id_t lun = ccb->ccb_h.target_lun; 3689 ccb->ccb_h.sim_priv.entries[0].field = 0; 3690 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3691 ccb->ccb_h.flags = 0; 3692 3693 if (lun == CAM_LUN_WILDCARD) { 3694 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3695 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3696 break; 3697 } 3698 trtp = &mpt->trt_wildcard; 3699 } else if (lun >= MPT_MAX_LUNS) { 3700 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3701 break; 3702 } else { 3703 trtp = &mpt->trt[lun]; 3704 } 3705 CAMLOCK_2_MPTLOCK(mpt); 3706 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3707 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3708 "Put FREE ATIO %p lun %d\n", ccb, lun); 3709 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3710 sim_links.stqe); 3711 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 3712 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3713 "Put FREE INOT lun %d\n", lun); 3714 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3715 sim_links.stqe); 3716 } else { 3717 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3718 } 3719 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3720 MPTLOCK_2_CAMLOCK(mpt); 3721 return; 3722 } 3723 case XPT_CONT_TARGET_IO: 3724 CAMLOCK_2_MPTLOCK(mpt); 3725 mpt_target_start_io(mpt, ccb); 3726 MPTLOCK_2_CAMLOCK(mpt); 3727 return; 3728 3729 default: 3730 ccb->ccb_h.status = CAM_REQ_INVALID; 3731 break; 3732 } 3733 xpt_done(ccb); 3734 } 3735 3736 static int 3737 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3738 { 3739 #ifdef CAM_NEW_TRAN_CODE 3740 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3741 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3742 #endif 3743 target_id_t tgt; 3744 uint32_t dval, pval, oval; 3745 int rv; 3746 3747 if (IS_CURRENT_SETTINGS(cts) == 0) { 3748 tgt = cts->ccb_h.target_id; 3749 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3750 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3751 return (-1); 3752 } 3753 } else { 3754 tgt = cts->ccb_h.target_id; 3755 } 3756 3757 /* 3758 * We aren't looking at Port Page 2 BIOS settings here- 3759 * sometimes these have been known to be bogus XXX. 3760 * 3761 * For user settings, we pick the max from port page 0 3762 * 3763 * For current settings we read the current settings out from 3764 * device page 0 for that target. 3765 */ 3766 if (IS_CURRENT_SETTINGS(cts)) { 3767 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3768 dval = 0; 3769 3770 CAMLOCK_2_MPTLOCK(mpt); 3771 tmp = mpt->mpt_dev_page0[tgt]; 3772 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3773 sizeof(tmp), FALSE, 5000); 3774 if (rv) { 3775 MPTLOCK_2_CAMLOCK(mpt); 3776 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3777 return (rv); 3778 } 3779 MPTLOCK_2_CAMLOCK(mpt); 3780 mpt_lprt(mpt, MPT_PRT_DEBUG, 3781 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3782 tmp.NegotiatedParameters, tmp.Information); 3783 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3784 DP_WIDE : DP_NARROW; 3785 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3786 DP_DISC_ENABLE : DP_DISC_DISABL; 3787 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3788 DP_TQING_ENABLE : DP_TQING_DISABL; 3789 oval = tmp.NegotiatedParameters; 3790 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3791 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3792 pval = tmp.NegotiatedParameters; 3793 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3794 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3795 mpt->mpt_dev_page0[tgt] = tmp; 3796 } else { 3797 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3798 oval = mpt->mpt_port_page0.Capabilities; 3799 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3800 pval = mpt->mpt_port_page0.Capabilities; 3801 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3802 } 3803 3804 #ifndef CAM_NEW_TRAN_CODE 3805 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 3806 cts->valid = 0; 3807 cts->sync_period = pval; 3808 cts->sync_offset = oval; 3809 cts->valid |= CCB_TRANS_SYNC_RATE_VALID; 3810 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; 3811 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; 3812 if (dval & DP_WIDE) { 3813 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3814 } else { 3815 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3816 } 3817 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3818 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3819 if (dval & DP_DISC_ENABLE) { 3820 cts->flags |= CCB_TRANS_DISC_ENB; 3821 } 3822 if (dval & DP_TQING_ENABLE) { 3823 cts->flags |= CCB_TRANS_TAG_ENB; 3824 } 3825 } 3826 #else 3827 spi->valid = 0; 3828 scsi->valid = 0; 3829 spi->flags = 0; 3830 scsi->flags = 0; 3831 spi->sync_offset = oval; 3832 spi->sync_period = pval; 3833 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3834 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3835 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3836 if (dval & DP_WIDE) { 3837 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3838 } else { 3839 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3840 } 3841 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3842 scsi->valid = CTS_SCSI_VALID_TQ; 3843 if (dval & DP_TQING_ENABLE) { 3844 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3845 } 3846 spi->valid |= CTS_SPI_VALID_DISC; 3847 if (dval & DP_DISC_ENABLE) { 3848 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3849 } 3850 } 3851 #endif 3852 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3853 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3854 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); 3855 return (0); 3856 } 3857 3858 static void 3859 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3860 { 3861 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3862 3863 ptr = &mpt->mpt_dev_page1[tgt]; 3864 if (onoff) { 3865 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3866 } else { 3867 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3868 } 3869 } 3870 3871 static void 3872 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3873 { 3874 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3875 3876 ptr = &mpt->mpt_dev_page1[tgt]; 3877 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3878 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3879 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3880 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3881 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3882 if (period == 0) { 3883 return; 3884 } 3885 ptr->RequestedParameters |= 3886 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3887 ptr->RequestedParameters |= 3888 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3889 if (period < 0xa) { 3890 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3891 } 3892 if (period < 0x9) { 3893 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3894 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3895 } 3896 } 3897 3898 static int 3899 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3900 { 3901 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3902 int rv; 3903 3904 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3905 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3906 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3907 tmp = mpt->mpt_dev_page1[tgt]; 3908 rv = mpt_write_cur_cfg_page(mpt, tgt, 3909 &tmp.Header, sizeof(tmp), FALSE, 5000); 3910 if (rv) { 3911 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3912 return (-1); 3913 } 3914 return (0); 3915 } 3916 3917 static void 3918 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) 3919 { 3920 #if __FreeBSD_version >= 500000 3921 cam_calc_geometry(ccg, extended); 3922 #else 3923 uint32_t size_mb; 3924 uint32_t secs_per_cylinder; 3925 3926 if (ccg->block_size == 0) { 3927 ccg->ccb_h.status = CAM_REQ_INVALID; 3928 return; 3929 } 3930 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); 3931 if (size_mb > 1024 && extended) { 3932 ccg->heads = 255; 3933 ccg->secs_per_track = 63; 3934 } else { 3935 ccg->heads = 64; 3936 ccg->secs_per_track = 32; 3937 } 3938 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3939 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3940 ccg->ccb_h.status = CAM_REQ_CMP; 3941 #endif 3942 } 3943 3944 /****************************** Timeout Recovery ******************************/ 3945 static int 3946 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3947 { 3948 int error; 3949 3950 error = mpt_kthread_create(mpt_recovery_thread, mpt, 3951 &mpt->recovery_thread, /*flags*/0, 3952 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3953 return (error); 3954 } 3955 3956 static void 3957 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3958 { 3959 if (mpt->recovery_thread == NULL) { 3960 return; 3961 } 3962 mpt->shutdwn_recovery = 1; 3963 wakeup(mpt); 3964 /* 3965 * Sleep on a slightly different location 3966 * for this interlock just for added safety. 3967 */ 3968 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 3969 } 3970 3971 static void 3972 mpt_recovery_thread(void *arg) 3973 { 3974 struct mpt_softc *mpt; 3975 3976 mpt = (struct mpt_softc *)arg; 3977 MPT_LOCK(mpt); 3978 for (;;) { 3979 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3980 if (mpt->shutdwn_recovery == 0) { 3981 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 3982 } 3983 } 3984 if (mpt->shutdwn_recovery != 0) { 3985 break; 3986 } 3987 mpt_recover_commands(mpt); 3988 } 3989 mpt->recovery_thread = NULL; 3990 wakeup(&mpt->recovery_thread); 3991 MPT_UNLOCK(mpt); 3992 mpt_kthread_exit(0); 3993 } 3994 3995 static int 3996 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 3997 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 3998 { 3999 MSG_SCSI_TASK_MGMT *tmf_req; 4000 int error; 4001 4002 /* 4003 * Wait for any current TMF request to complete. 4004 * We're only allowed to issue one TMF at a time. 4005 */ 4006 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 4007 sleep_ok, MPT_TMF_MAX_TIMEOUT); 4008 if (error != 0) { 4009 mpt_reset(mpt, TRUE); 4010 return (ETIMEDOUT); 4011 } 4012 4013 mpt_assign_serno(mpt, mpt->tmf_req); 4014 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 4015 4016 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 4017 memset(tmf_req, 0, sizeof(*tmf_req)); 4018 tmf_req->TargetID = target; 4019 tmf_req->Bus = channel; 4020 tmf_req->ChainOffset = 0; 4021 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 4022 tmf_req->Reserved = 0; 4023 tmf_req->TaskType = type; 4024 tmf_req->Reserved1 = 0; 4025 tmf_req->MsgFlags = flags; 4026 tmf_req->MsgContext = 4027 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 4028 memset(&tmf_req->LUN, 0, 4029 sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2)); 4030 if (lun > 256) { 4031 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4032 tmf_req->LUN[1] = lun & 0xff; 4033 } else { 4034 tmf_req->LUN[1] = lun; 4035 } 4036 tmf_req->TaskMsgContext = abort_ctx; 4037 4038 mpt_lprt(mpt, MPT_PRT_DEBUG, 4039 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 4040 mpt->tmf_req->serno, tmf_req->MsgContext); 4041 if (mpt->verbose > MPT_PRT_DEBUG) { 4042 mpt_print_request(tmf_req); 4043 } 4044 4045 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 4046 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 4047 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 4048 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 4049 if (error != MPT_OK) { 4050 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 4051 mpt->tmf_req->state = REQ_STATE_FREE; 4052 mpt_reset(mpt, TRUE); 4053 } 4054 return (error); 4055 } 4056 4057 /* 4058 * When a command times out, it is placed on the requeust_timeout_list 4059 * and we wake our recovery thread. The MPT-Fusion architecture supports 4060 * only a single TMF operation at a time, so we serially abort/bdr, etc, 4061 * the timedout transactions. The next TMF is issued either by the 4062 * completion handler of the current TMF waking our recovery thread, 4063 * or the TMF timeout handler causing a hard reset sequence. 4064 */ 4065 static void 4066 mpt_recover_commands(struct mpt_softc *mpt) 4067 { 4068 request_t *req; 4069 union ccb *ccb; 4070 int error; 4071 4072 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4073 /* 4074 * No work to do- leave. 4075 */ 4076 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 4077 return; 4078 } 4079 4080 /* 4081 * Flush any commands whose completion coincides with their timeout. 4082 */ 4083 mpt_intr(mpt); 4084 4085 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4086 /* 4087 * The timedout commands have already 4088 * completed. This typically means 4089 * that either the timeout value was on 4090 * the hairy edge of what the device 4091 * requires or - more likely - interrupts 4092 * are not happening. 4093 */ 4094 mpt_prt(mpt, "Timedout requests already complete. " 4095 "Interrupts may not be functioning.\n"); 4096 mpt_enable_ints(mpt); 4097 return; 4098 } 4099 4100 /* 4101 * We have no visibility into the current state of the 4102 * controller, so attempt to abort the commands in the 4103 * order they timed-out. For initiator commands, we 4104 * depend on the reply handler pulling requests off 4105 * the timeout list. 4106 */ 4107 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4108 uint16_t status; 4109 uint8_t response; 4110 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4111 4112 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4113 req, req->serno, hdrp->Function); 4114 ccb = req->ccb; 4115 if (ccb == NULL) { 4116 mpt_prt(mpt, "null ccb in timed out request. " 4117 "Resetting Controller.\n"); 4118 mpt_reset(mpt, TRUE); 4119 continue; 4120 } 4121 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4122 4123 /* 4124 * Check to see if this is not an initiator command and 4125 * deal with it differently if it is. 4126 */ 4127 switch (hdrp->Function) { 4128 case MPI_FUNCTION_SCSI_IO_REQUEST: 4129 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4130 break; 4131 default: 4132 /* 4133 * XXX: FIX ME: need to abort target assists... 4134 */ 4135 mpt_prt(mpt, "just putting it back on the pend q\n"); 4136 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4137 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4138 links); 4139 continue; 4140 } 4141 4142 error = mpt_scsi_send_tmf(mpt, 4143 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4144 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4145 htole32(req->index | scsi_io_handler_id), TRUE); 4146 4147 if (error != 0) { 4148 /* 4149 * mpt_scsi_send_tmf hard resets on failure, so no 4150 * need to do so here. Our queue should be emptied 4151 * by the hard reset. 4152 */ 4153 continue; 4154 } 4155 4156 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4157 REQ_STATE_DONE, TRUE, 500); 4158 4159 status = mpt->tmf_req->IOCStatus; 4160 response = mpt->tmf_req->ResponseCode; 4161 mpt->tmf_req->state = REQ_STATE_FREE; 4162 4163 if (error != 0) { 4164 /* 4165 * If we've errored out,, reset the controller. 4166 */ 4167 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4168 "Resetting controller\n"); 4169 mpt_reset(mpt, TRUE); 4170 continue; 4171 } 4172 4173 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4174 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4175 "Resetting controller.\n", status); 4176 mpt_reset(mpt, TRUE); 4177 continue; 4178 } 4179 4180 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4181 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4182 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4183 "Resetting controller.\n", response); 4184 mpt_reset(mpt, TRUE); 4185 continue; 4186 } 4187 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4188 } 4189 } 4190 4191 /************************ Target Mode Support ****************************/ 4192 static void 4193 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4194 { 4195 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4196 PTR_SGE_TRANSACTION32 tep; 4197 PTR_SGE_SIMPLE32 se; 4198 bus_addr_t paddr; 4199 uint32_t fl; 4200 4201 paddr = req->req_pbuf; 4202 paddr += MPT_RQSL(mpt); 4203 4204 fc = req->req_vbuf; 4205 memset(fc, 0, MPT_REQUEST_AREA); 4206 fc->BufferCount = 1; 4207 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4208 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4209 4210 /* 4211 * Okay, set up ELS buffer pointers. ELS buffer pointers 4212 * consist of a TE SGL element (with details length of zero) 4213 * followe by a SIMPLE SGL element which holds the address 4214 * of the buffer. 4215 */ 4216 4217 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4218 4219 tep->ContextSize = 4; 4220 tep->Flags = 0; 4221 tep->TransactionContext[0] = htole32(ioindex); 4222 4223 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4224 fl = 4225 MPI_SGE_FLAGS_HOST_TO_IOC | 4226 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4227 MPI_SGE_FLAGS_LAST_ELEMENT | 4228 MPI_SGE_FLAGS_END_OF_LIST | 4229 MPI_SGE_FLAGS_END_OF_BUFFER; 4230 fl <<= MPI_SGE_FLAGS_SHIFT; 4231 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4232 se->FlagsLength = htole32(fl); 4233 se->Address = htole32((uint32_t) paddr); 4234 mpt_lprt(mpt, MPT_PRT_DEBUG, 4235 "add ELS index %d ioindex %d for %p:%u\n", 4236 req->index, ioindex, req, req->serno); 4237 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4238 ("mpt_fc_post_els: request not locked")); 4239 mpt_send_cmd(mpt, req); 4240 } 4241 4242 static void 4243 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4244 { 4245 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4246 PTR_CMD_BUFFER_DESCRIPTOR cb; 4247 bus_addr_t paddr; 4248 4249 paddr = req->req_pbuf; 4250 paddr += MPT_RQSL(mpt); 4251 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4252 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4253 4254 fc = req->req_vbuf; 4255 fc->BufferCount = 1; 4256 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4257 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4258 4259 cb = &fc->Buffer[0]; 4260 cb->IoIndex = htole16(ioindex); 4261 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4262 4263 mpt_check_doorbell(mpt); 4264 mpt_send_cmd(mpt, req); 4265 } 4266 4267 static int 4268 mpt_add_els_buffers(struct mpt_softc *mpt) 4269 { 4270 int i; 4271 4272 if (mpt->is_fc == 0) { 4273 return (TRUE); 4274 } 4275 4276 if (mpt->els_cmds_allocated) { 4277 return (TRUE); 4278 } 4279 4280 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), 4281 M_DEVBUF, M_NOWAIT | M_ZERO); 4282 4283 if (mpt->els_cmd_ptrs == NULL) { 4284 return (FALSE); 4285 } 4286 4287 /* 4288 * Feed the chip some ELS buffer resources 4289 */ 4290 for (i = 0; i < MPT_MAX_ELS; i++) { 4291 request_t *req = mpt_get_request(mpt, FALSE); 4292 if (req == NULL) { 4293 break; 4294 } 4295 req->state |= REQ_STATE_LOCKED; 4296 mpt->els_cmd_ptrs[i] = req; 4297 mpt_fc_post_els(mpt, req, i); 4298 } 4299 4300 if (i == 0) { 4301 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4302 free(mpt->els_cmd_ptrs, M_DEVBUF); 4303 mpt->els_cmd_ptrs = NULL; 4304 return (FALSE); 4305 } 4306 if (i != MPT_MAX_ELS) { 4307 mpt_lprt(mpt, MPT_PRT_INFO, 4308 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4309 } 4310 mpt->els_cmds_allocated = i; 4311 return(TRUE); 4312 } 4313 4314 static int 4315 mpt_add_target_commands(struct mpt_softc *mpt) 4316 { 4317 int i, max; 4318 4319 if (mpt->tgt_cmd_ptrs) { 4320 return (TRUE); 4321 } 4322 4323 max = MPT_MAX_REQUESTS(mpt) >> 1; 4324 if (max > mpt->mpt_max_tgtcmds) { 4325 max = mpt->mpt_max_tgtcmds; 4326 } 4327 mpt->tgt_cmd_ptrs = 4328 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4329 if (mpt->tgt_cmd_ptrs == NULL) { 4330 mpt_prt(mpt, 4331 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4332 return (FALSE); 4333 } 4334 4335 for (i = 0; i < max; i++) { 4336 request_t *req; 4337 4338 req = mpt_get_request(mpt, FALSE); 4339 if (req == NULL) { 4340 break; 4341 } 4342 req->state |= REQ_STATE_LOCKED; 4343 mpt->tgt_cmd_ptrs[i] = req; 4344 mpt_post_target_command(mpt, req, i); 4345 } 4346 4347 4348 if (i == 0) { 4349 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4350 free(mpt->tgt_cmd_ptrs, M_DEVBUF); 4351 mpt->tgt_cmd_ptrs = NULL; 4352 return (FALSE); 4353 } 4354 4355 mpt->tgt_cmds_allocated = i; 4356 4357 if (i < max) { 4358 mpt_lprt(mpt, MPT_PRT_INFO, 4359 "added %d of %d target bufs\n", i, max); 4360 } 4361 return (i); 4362 } 4363 4364 static int 4365 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4366 { 4367 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4368 mpt->twildcard = 1; 4369 } else if (lun >= MPT_MAX_LUNS) { 4370 return (EINVAL); 4371 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4372 return (EINVAL); 4373 } 4374 if (mpt->tenabled == 0) { 4375 if (mpt->is_fc) { 4376 (void) mpt_fc_reset_link(mpt, 0); 4377 } 4378 mpt->tenabled = 1; 4379 } 4380 if (lun == CAM_LUN_WILDCARD) { 4381 mpt->trt_wildcard.enabled = 1; 4382 } else { 4383 mpt->trt[lun].enabled = 1; 4384 } 4385 return (0); 4386 } 4387 4388 static int 4389 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4390 { 4391 int i; 4392 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4393 mpt->twildcard = 0; 4394 } else if (lun >= MPT_MAX_LUNS) { 4395 return (EINVAL); 4396 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4397 return (EINVAL); 4398 } 4399 if (lun == CAM_LUN_WILDCARD) { 4400 mpt->trt_wildcard.enabled = 0; 4401 } else { 4402 mpt->trt[lun].enabled = 0; 4403 } 4404 for (i = 0; i < MPT_MAX_LUNS; i++) { 4405 if (mpt->trt[lun].enabled) { 4406 break; 4407 } 4408 } 4409 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4410 if (mpt->is_fc) { 4411 (void) mpt_fc_reset_link(mpt, 0); 4412 } 4413 mpt->tenabled = 0; 4414 } 4415 return (0); 4416 } 4417 4418 /* 4419 * Called with MPT lock held 4420 */ 4421 static void 4422 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4423 { 4424 struct ccb_scsiio *csio = &ccb->csio; 4425 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4426 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4427 4428 switch (tgt->state) { 4429 case TGT_STATE_IN_CAM: 4430 break; 4431 case TGT_STATE_MOVING_DATA: 4432 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4433 xpt_freeze_simq(mpt->sim, 1); 4434 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4435 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4436 MPTLOCK_2_CAMLOCK(mpt); 4437 xpt_done(ccb); 4438 CAMLOCK_2_MPTLOCK(mpt); 4439 return; 4440 default: 4441 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4442 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4443 mpt_tgt_dump_req_state(mpt, cmd_req); 4444 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4445 MPTLOCK_2_CAMLOCK(mpt); 4446 xpt_done(ccb); 4447 CAMLOCK_2_MPTLOCK(mpt); 4448 return; 4449 } 4450 4451 if (csio->dxfer_len) { 4452 bus_dmamap_callback_t *cb; 4453 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4454 request_t *req; 4455 4456 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4457 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len)); 4458 4459 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4460 if (mpt->outofbeer == 0) { 4461 mpt->outofbeer = 1; 4462 xpt_freeze_simq(mpt->sim, 1); 4463 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4464 } 4465 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4466 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4467 MPTLOCK_2_CAMLOCK(mpt); 4468 xpt_done(ccb); 4469 CAMLOCK_2_MPTLOCK(mpt); 4470 return; 4471 } 4472 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4473 if (sizeof (bus_addr_t) > 4) { 4474 cb = mpt_execute_req_a64; 4475 } else { 4476 cb = mpt_execute_req; 4477 } 4478 4479 req->ccb = ccb; 4480 ccb->ccb_h.ccb_req_ptr = req; 4481 4482 /* 4483 * Record the currently active ccb and the 4484 * request for it in our target state area. 4485 */ 4486 tgt->ccb = ccb; 4487 tgt->req = req; 4488 4489 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4490 ta = req->req_vbuf; 4491 4492 if (mpt->is_sas) { 4493 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4494 cmd_req->req_vbuf; 4495 ta->QueueTag = ssp->InitiatorTag; 4496 } else if (mpt->is_spi) { 4497 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4498 cmd_req->req_vbuf; 4499 ta->QueueTag = sp->Tag; 4500 } 4501 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4502 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4503 ta->ReplyWord = htole32(tgt->reply_desc); 4504 if (csio->ccb_h.target_lun > 256) { 4505 ta->LUN[0] = 4506 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4507 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4508 } else { 4509 ta->LUN[1] = csio->ccb_h.target_lun; 4510 } 4511 4512 ta->RelativeOffset = tgt->bytes_xfered; 4513 ta->DataLength = ccb->csio.dxfer_len; 4514 if (ta->DataLength > tgt->resid) { 4515 ta->DataLength = tgt->resid; 4516 } 4517 4518 /* 4519 * XXX Should be done after data transfer completes? 4520 */ 4521 tgt->resid -= csio->dxfer_len; 4522 tgt->bytes_xfered += csio->dxfer_len; 4523 4524 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4525 ta->TargetAssistFlags |= 4526 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4527 } 4528 4529 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4530 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4531 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4532 ta->TargetAssistFlags |= 4533 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4534 } 4535 #endif 4536 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4537 4538 mpt_lprt(mpt, MPT_PRT_DEBUG, 4539 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4540 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4541 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4542 4543 MPTLOCK_2_CAMLOCK(mpt); 4544 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4545 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4546 int error; 4547 int s = splsoftvm(); 4548 error = bus_dmamap_load(mpt->buffer_dmat, 4549 req->dmap, csio->data_ptr, csio->dxfer_len, 4550 cb, req, 0); 4551 splx(s); 4552 if (error == EINPROGRESS) { 4553 xpt_freeze_simq(mpt->sim, 1); 4554 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4555 } 4556 } else { 4557 /* 4558 * We have been given a pointer to single 4559 * physical buffer. 4560 */ 4561 struct bus_dma_segment seg; 4562 seg.ds_addr = (bus_addr_t) 4563 (vm_offset_t)csio->data_ptr; 4564 seg.ds_len = csio->dxfer_len; 4565 (*cb)(req, &seg, 1, 0); 4566 } 4567 } else { 4568 /* 4569 * We have been given a list of addresses. 4570 * This case could be easily supported but they are not 4571 * currently generated by the CAM subsystem so there 4572 * is no point in wasting the time right now. 4573 */ 4574 struct bus_dma_segment *sgs; 4575 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4576 (*cb)(req, NULL, 0, EFAULT); 4577 } else { 4578 /* Just use the segments provided */ 4579 sgs = (struct bus_dma_segment *)csio->data_ptr; 4580 (*cb)(req, sgs, csio->sglist_cnt, 0); 4581 } 4582 } 4583 CAMLOCK_2_MPTLOCK(mpt); 4584 } else { 4585 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4586 4587 /* 4588 * XXX: I don't know why this seems to happen, but 4589 * XXX: completing the CCB seems to make things happy. 4590 * XXX: This seems to happen if the initiator requests 4591 * XXX: enough data that we have to do multiple CTIOs. 4592 */ 4593 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4594 mpt_lprt(mpt, MPT_PRT_DEBUG, 4595 "Meaningless STATUS CCB (%p): flags %x status %x " 4596 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4597 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4598 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4599 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4600 MPTLOCK_2_CAMLOCK(mpt); 4601 xpt_done(ccb); 4602 CAMLOCK_2_MPTLOCK(mpt); 4603 return; 4604 } 4605 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4606 sp = sense; 4607 memcpy(sp, &csio->sense_data, 4608 min(csio->sense_len, MPT_SENSE_SIZE)); 4609 } 4610 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4611 } 4612 } 4613 4614 static void 4615 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4616 uint32_t lun, int send, uint8_t *data, size_t length) 4617 { 4618 mpt_tgt_state_t *tgt; 4619 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4620 SGE_SIMPLE32 *se; 4621 uint32_t flags; 4622 uint8_t *dptr; 4623 bus_addr_t pptr; 4624 request_t *req; 4625 4626 /* 4627 * We enter with resid set to the data load for the command. 4628 */ 4629 tgt = MPT_TGT_STATE(mpt, cmd_req); 4630 if (length == 0 || tgt->resid == 0) { 4631 tgt->resid = 0; 4632 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4633 return; 4634 } 4635 4636 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4637 mpt_prt(mpt, "out of resources- dropping local response\n"); 4638 return; 4639 } 4640 tgt->is_local = 1; 4641 4642 4643 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4644 ta = req->req_vbuf; 4645 4646 if (mpt->is_sas) { 4647 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4648 ta->QueueTag = ssp->InitiatorTag; 4649 } else if (mpt->is_spi) { 4650 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4651 ta->QueueTag = sp->Tag; 4652 } 4653 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4654 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4655 ta->ReplyWord = htole32(tgt->reply_desc); 4656 if (lun > 256) { 4657 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4658 ta->LUN[1] = lun & 0xff; 4659 } else { 4660 ta->LUN[1] = lun; 4661 } 4662 ta->RelativeOffset = 0; 4663 ta->DataLength = length; 4664 4665 dptr = req->req_vbuf; 4666 dptr += MPT_RQSL(mpt); 4667 pptr = req->req_pbuf; 4668 pptr += MPT_RQSL(mpt); 4669 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4670 4671 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4672 memset(se, 0,sizeof (*se)); 4673 4674 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4675 if (send) { 4676 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4677 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4678 } 4679 se->Address = pptr; 4680 MPI_pSGE_SET_LENGTH(se, length); 4681 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4682 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4683 MPI_pSGE_SET_FLAGS(se, flags); 4684 4685 tgt->ccb = NULL; 4686 tgt->req = req; 4687 tgt->resid -= length; 4688 tgt->bytes_xfered = length; 4689 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4690 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4691 #else 4692 tgt->state = TGT_STATE_MOVING_DATA; 4693 #endif 4694 mpt_send_cmd(mpt, req); 4695 } 4696 4697 /* 4698 * Abort queued up CCBs 4699 */ 4700 static cam_status 4701 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4702 { 4703 struct mpt_hdr_stailq *lp; 4704 struct ccb_hdr *srch; 4705 int found = 0; 4706 union ccb *accb = ccb->cab.abort_ccb; 4707 tgt_resource_t *trtp; 4708 4709 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4710 4711 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4712 trtp = &mpt->trt_wildcard; 4713 } else { 4714 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4715 } 4716 4717 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4718 lp = &trtp->atios; 4719 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 4720 lp = &trtp->inots; 4721 } else { 4722 return (CAM_REQ_INVALID); 4723 } 4724 4725 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4726 if (srch == &accb->ccb_h) { 4727 found = 1; 4728 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4729 break; 4730 } 4731 } 4732 if (found) { 4733 accb->ccb_h.status = CAM_REQ_ABORTED; 4734 xpt_done(accb); 4735 return (CAM_REQ_CMP); 4736 } 4737 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4738 return (CAM_PATH_INVALID); 4739 } 4740 4741 /* 4742 * Ask the MPT to abort the current target command 4743 */ 4744 static int 4745 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4746 { 4747 int error; 4748 request_t *req; 4749 PTR_MSG_TARGET_MODE_ABORT abtp; 4750 4751 req = mpt_get_request(mpt, FALSE); 4752 if (req == NULL) { 4753 return (-1); 4754 } 4755 abtp = req->req_vbuf; 4756 memset(abtp, 0, sizeof (*abtp)); 4757 4758 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4759 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4760 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4761 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4762 error = 0; 4763 if (mpt->is_fc || mpt->is_sas) { 4764 mpt_send_cmd(mpt, req); 4765 } else { 4766 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4767 } 4768 return (error); 4769 } 4770 4771 /* 4772 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4773 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4774 * FC929 to set bogus FC_RSP fields (nonzero residuals 4775 * but w/o RESID fields set). This causes QLogic initiators 4776 * to think maybe that a frame was lost. 4777 * 4778 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4779 * we use allocated requests to do TARGET_ASSIST and we 4780 * need to know when to release them. 4781 */ 4782 4783 static void 4784 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4785 uint8_t status, uint8_t const *sense_data) 4786 { 4787 uint8_t *cmd_vbuf; 4788 mpt_tgt_state_t *tgt; 4789 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4790 request_t *req; 4791 bus_addr_t paddr; 4792 int resplen = 0; 4793 uint32_t fl; 4794 4795 cmd_vbuf = cmd_req->req_vbuf; 4796 cmd_vbuf += MPT_RQSL(mpt); 4797 tgt = MPT_TGT_STATE(mpt, cmd_req); 4798 4799 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4800 if (mpt->outofbeer == 0) { 4801 mpt->outofbeer = 1; 4802 xpt_freeze_simq(mpt->sim, 1); 4803 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4804 } 4805 if (ccb) { 4806 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4807 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4808 MPTLOCK_2_CAMLOCK(mpt); 4809 xpt_done(ccb); 4810 CAMLOCK_2_MPTLOCK(mpt); 4811 } else { 4812 mpt_prt(mpt, 4813 "could not allocate status request- dropping\n"); 4814 } 4815 return; 4816 } 4817 req->ccb = ccb; 4818 if (ccb) { 4819 ccb->ccb_h.ccb_mpt_ptr = mpt; 4820 ccb->ccb_h.ccb_req_ptr = req; 4821 } 4822 4823 /* 4824 * Record the currently active ccb, if any, and the 4825 * request for it in our target state area. 4826 */ 4827 tgt->ccb = ccb; 4828 tgt->req = req; 4829 tgt->state = TGT_STATE_SENDING_STATUS; 4830 4831 tp = req->req_vbuf; 4832 paddr = req->req_pbuf; 4833 paddr += MPT_RQSL(mpt); 4834 4835 memset(tp, 0, sizeof (*tp)); 4836 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4837 if (mpt->is_fc) { 4838 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4839 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4840 uint8_t *sts_vbuf; 4841 uint32_t *rsp; 4842 4843 sts_vbuf = req->req_vbuf; 4844 sts_vbuf += MPT_RQSL(mpt); 4845 rsp = (uint32_t *) sts_vbuf; 4846 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4847 4848 /* 4849 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4850 * It has to be big-endian in memory and is organized 4851 * in 32 bit words, which are much easier to deal with 4852 * as words which are swizzled as needed. 4853 * 4854 * All we're filling here is the FC_RSP payload. 4855 * We may just have the chip synthesize it if 4856 * we have no residual and an OK status. 4857 * 4858 */ 4859 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4860 4861 rsp[2] = status; 4862 if (tgt->resid) { 4863 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4864 rsp[3] = htobe32(tgt->resid); 4865 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4866 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4867 #endif 4868 } 4869 if (status == SCSI_STATUS_CHECK_COND) { 4870 int i; 4871 4872 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4873 rsp[4] = htobe32(MPT_SENSE_SIZE); 4874 if (sense_data) { 4875 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4876 } else { 4877 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4878 "TION but no sense data?\n"); 4879 memset(&rsp, 0, MPT_SENSE_SIZE); 4880 } 4881 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4882 rsp[i] = htobe32(rsp[i]); 4883 } 4884 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4885 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4886 #endif 4887 } 4888 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4889 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4890 #endif 4891 rsp[2] = htobe32(rsp[2]); 4892 } else if (mpt->is_sas) { 4893 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4894 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4895 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4896 } else { 4897 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4898 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4899 tp->StatusCode = status; 4900 tp->QueueTag = htole16(sp->Tag); 4901 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4902 } 4903 4904 tp->ReplyWord = htole32(tgt->reply_desc); 4905 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4906 4907 #ifdef WE_CAN_USE_AUTO_REPOST 4908 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4909 #endif 4910 if (status == SCSI_STATUS_OK && resplen == 0) { 4911 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4912 } else { 4913 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4914 fl = 4915 MPI_SGE_FLAGS_HOST_TO_IOC | 4916 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4917 MPI_SGE_FLAGS_LAST_ELEMENT | 4918 MPI_SGE_FLAGS_END_OF_LIST | 4919 MPI_SGE_FLAGS_END_OF_BUFFER; 4920 fl <<= MPI_SGE_FLAGS_SHIFT; 4921 fl |= resplen; 4922 tp->StatusDataSGE.FlagsLength = htole32(fl); 4923 } 4924 4925 mpt_lprt(mpt, MPT_PRT_DEBUG, 4926 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 4927 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 4928 req->serno, tgt->resid); 4929 if (ccb) { 4930 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4931 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); 4932 } 4933 mpt_send_cmd(mpt, req); 4934 } 4935 4936 static void 4937 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4938 tgt_resource_t *trtp, int init_id) 4939 { 4940 struct ccb_immed_notify *inot; 4941 mpt_tgt_state_t *tgt; 4942 4943 tgt = MPT_TGT_STATE(mpt, req); 4944 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); 4945 if (inot == NULL) { 4946 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4947 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 4948 return; 4949 } 4950 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4951 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4952 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 4953 4954 memset(&inot->sense_data, 0, sizeof (inot->sense_data)); 4955 inot->sense_len = 0; 4956 memset(inot->message_args, 0, sizeof (inot->message_args)); 4957 inot->initiator_id = init_id; /* XXX */ 4958 4959 /* 4960 * This is a somewhat grotesque attempt to map from task management 4961 * to old style SCSI messages. God help us all. 4962 */ 4963 switch (fc) { 4964 case MPT_ABORT_TASK_SET: 4965 inot->message_args[0] = MSG_ABORT_TAG; 4966 break; 4967 case MPT_CLEAR_TASK_SET: 4968 inot->message_args[0] = MSG_CLEAR_TASK_SET; 4969 break; 4970 case MPT_TARGET_RESET: 4971 inot->message_args[0] = MSG_TARGET_RESET; 4972 break; 4973 case MPT_CLEAR_ACA: 4974 inot->message_args[0] = MSG_CLEAR_ACA; 4975 break; 4976 case MPT_TERMINATE_TASK: 4977 inot->message_args[0] = MSG_ABORT_TAG; 4978 break; 4979 default: 4980 inot->message_args[0] = MSG_NOOP; 4981 break; 4982 } 4983 tgt->ccb = (union ccb *) inot; 4984 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 4985 MPTLOCK_2_CAMLOCK(mpt); 4986 xpt_done((union ccb *)inot); 4987 CAMLOCK_2_MPTLOCK(mpt); 4988 } 4989 4990 static void 4991 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 4992 { 4993 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 4994 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 4995 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 4996 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 4997 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 4998 '0', '0', '0', '1' 4999 }; 5000 struct ccb_accept_tio *atiop; 5001 lun_id_t lun; 5002 int tag_action = 0; 5003 mpt_tgt_state_t *tgt; 5004 tgt_resource_t *trtp = NULL; 5005 U8 *lunptr; 5006 U8 *vbuf; 5007 U16 itag; 5008 U16 ioindex; 5009 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 5010 uint8_t *cdbp; 5011 5012 /* 5013 * First, DMA sync the received command- 5014 * which is in the *request* * phys area. 5015 * 5016 * XXX: We could optimize this for a range 5017 */ 5018 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 5019 BUS_DMASYNC_POSTREAD); 5020 5021 /* 5022 * Stash info for the current command where we can get at it later. 5023 */ 5024 vbuf = req->req_vbuf; 5025 vbuf += MPT_RQSL(mpt); 5026 5027 /* 5028 * Get our state pointer set up. 5029 */ 5030 tgt = MPT_TGT_STATE(mpt, req); 5031 if (tgt->state != TGT_STATE_LOADED) { 5032 mpt_tgt_dump_req_state(mpt, req); 5033 panic("bad target state in mpt_scsi_tgt_atio"); 5034 } 5035 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 5036 tgt->state = TGT_STATE_IN_CAM; 5037 tgt->reply_desc = reply_desc; 5038 ioindex = GET_IO_INDEX(reply_desc); 5039 if (mpt->verbose >= MPT_PRT_DEBUG) { 5040 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 5041 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 5042 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 5043 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 5044 } 5045 if (mpt->is_fc) { 5046 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 5047 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 5048 if (fc->FcpCntl[2]) { 5049 /* 5050 * Task Management Request 5051 */ 5052 switch (fc->FcpCntl[2]) { 5053 case 0x2: 5054 fct = MPT_ABORT_TASK_SET; 5055 break; 5056 case 0x4: 5057 fct = MPT_CLEAR_TASK_SET; 5058 break; 5059 case 0x20: 5060 fct = MPT_TARGET_RESET; 5061 break; 5062 case 0x40: 5063 fct = MPT_CLEAR_ACA; 5064 break; 5065 case 0x80: 5066 fct = MPT_TERMINATE_TASK; 5067 break; 5068 default: 5069 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 5070 fc->FcpCntl[2]); 5071 mpt_scsi_tgt_status(mpt, 0, req, 5072 SCSI_STATUS_OK, 0); 5073 return; 5074 } 5075 } else { 5076 switch (fc->FcpCntl[1]) { 5077 case 0: 5078 tag_action = MSG_SIMPLE_Q_TAG; 5079 break; 5080 case 1: 5081 tag_action = MSG_HEAD_OF_Q_TAG; 5082 break; 5083 case 2: 5084 tag_action = MSG_ORDERED_Q_TAG; 5085 break; 5086 default: 5087 /* 5088 * Bah. Ignore Untagged Queing and ACA 5089 */ 5090 tag_action = MSG_SIMPLE_Q_TAG; 5091 break; 5092 } 5093 } 5094 tgt->resid = be32toh(fc->FcpDl); 5095 cdbp = fc->FcpCdb; 5096 lunptr = fc->FcpLun; 5097 itag = be16toh(fc->OptionalOxid); 5098 } else if (mpt->is_sas) { 5099 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 5100 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 5101 cdbp = ssp->CDB; 5102 lunptr = ssp->LogicalUnitNumber; 5103 itag = ssp->InitiatorTag; 5104 } else { 5105 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 5106 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 5107 cdbp = sp->CDB; 5108 lunptr = sp->LogicalUnitNumber; 5109 itag = sp->Tag; 5110 } 5111 5112 /* 5113 * Generate a simple lun 5114 */ 5115 switch (lunptr[0] & 0xc0) { 5116 case 0x40: 5117 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 5118 break; 5119 case 0: 5120 lun = lunptr[1]; 5121 break; 5122 default: 5123 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 5124 lun = 0xffff; 5125 break; 5126 } 5127 5128 /* 5129 * Deal with non-enabled or bad luns here. 5130 */ 5131 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 5132 mpt->trt[lun].enabled == 0) { 5133 if (mpt->twildcard) { 5134 trtp = &mpt->trt_wildcard; 5135 } else if (fct == MPT_NIL_TMT_VALUE) { 5136 /* 5137 * In this case, we haven't got an upstream listener 5138 * for either a specific lun or wildcard luns. We 5139 * have to make some sensible response. For regular 5140 * inquiry, just return some NOT HERE inquiry data. 5141 * For VPD inquiry, report illegal field in cdb. 5142 * For REQUEST SENSE, just return NO SENSE data. 5143 * REPORT LUNS gets illegal command. 5144 * All other commands get 'no such device'. 5145 */ 5146 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 5147 size_t len; 5148 5149 memset(buf, 0, MPT_SENSE_SIZE); 5150 cond = SCSI_STATUS_CHECK_COND; 5151 buf[0] = 0xf0; 5152 buf[2] = 0x5; 5153 buf[7] = 0x8; 5154 sp = buf; 5155 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5156 5157 switch (cdbp[0]) { 5158 case INQUIRY: 5159 { 5160 if (cdbp[1] != 0) { 5161 buf[12] = 0x26; 5162 buf[13] = 0x01; 5163 break; 5164 } 5165 len = min(tgt->resid, cdbp[4]); 5166 len = min(len, sizeof (null_iqd)); 5167 mpt_lprt(mpt, MPT_PRT_DEBUG, 5168 "local inquiry %ld bytes\n", (long) len); 5169 mpt_scsi_tgt_local(mpt, req, lun, 1, 5170 null_iqd, len); 5171 return; 5172 } 5173 case REQUEST_SENSE: 5174 { 5175 buf[2] = 0x0; 5176 len = min(tgt->resid, cdbp[4]); 5177 len = min(len, sizeof (buf)); 5178 mpt_lprt(mpt, MPT_PRT_DEBUG, 5179 "local reqsense %ld bytes\n", (long) len); 5180 mpt_scsi_tgt_local(mpt, req, lun, 1, 5181 buf, len); 5182 return; 5183 } 5184 case REPORT_LUNS: 5185 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5186 buf[12] = 0x26; 5187 return; 5188 default: 5189 mpt_lprt(mpt, MPT_PRT_DEBUG, 5190 "CMD 0x%x to unmanaged lun %u\n", 5191 cdbp[0], lun); 5192 buf[12] = 0x25; 5193 break; 5194 } 5195 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 5196 return; 5197 } 5198 /* otherwise, leave trtp NULL */ 5199 } else { 5200 trtp = &mpt->trt[lun]; 5201 } 5202 5203 /* 5204 * Deal with any task management 5205 */ 5206 if (fct != MPT_NIL_TMT_VALUE) { 5207 if (trtp == NULL) { 5208 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5209 fct); 5210 mpt_scsi_tgt_status(mpt, 0, req, 5211 SCSI_STATUS_OK, 0); 5212 } else { 5213 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5214 GET_INITIATOR_INDEX(reply_desc)); 5215 } 5216 return; 5217 } 5218 5219 5220 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5221 if (atiop == NULL) { 5222 mpt_lprt(mpt, MPT_PRT_WARN, 5223 "no ATIOs for lun %u- sending back %s\n", lun, 5224 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5225 mpt_scsi_tgt_status(mpt, NULL, req, 5226 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5227 NULL); 5228 return; 5229 } 5230 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5231 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5232 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 5233 atiop->ccb_h.ccb_mpt_ptr = mpt; 5234 atiop->ccb_h.status = CAM_CDB_RECVD; 5235 atiop->ccb_h.target_lun = lun; 5236 atiop->sense_len = 0; 5237 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5238 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 5239 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5240 5241 /* 5242 * The tag we construct here allows us to find the 5243 * original request that the command came in with. 5244 * 5245 * This way we don't have to depend on anything but the 5246 * tag to find things when CCBs show back up from CAM. 5247 */ 5248 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5249 tgt->tag_id = atiop->tag_id; 5250 if (tag_action) { 5251 atiop->tag_action = tag_action; 5252 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 5253 } 5254 if (mpt->verbose >= MPT_PRT_DEBUG) { 5255 int i; 5256 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 5257 atiop->ccb_h.target_lun); 5258 for (i = 0; i < atiop->cdb_len; i++) { 5259 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5260 (i == (atiop->cdb_len - 1))? '>' : ' '); 5261 } 5262 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5263 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 5264 } 5265 5266 MPTLOCK_2_CAMLOCK(mpt); 5267 xpt_done((union ccb *)atiop); 5268 CAMLOCK_2_MPTLOCK(mpt); 5269 } 5270 5271 static void 5272 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5273 { 5274 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5275 5276 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5277 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 5278 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 5279 tgt->tag_id, tgt->state); 5280 } 5281 5282 static void 5283 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5284 { 5285 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5286 req->index, req->index, req->state); 5287 mpt_tgt_dump_tgt_state(mpt, req); 5288 } 5289 5290 static int 5291 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5292 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5293 { 5294 int dbg; 5295 union ccb *ccb; 5296 U16 status; 5297 5298 if (reply_frame == NULL) { 5299 /* 5300 * Figure out what the state of the command is. 5301 */ 5302 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5303 5304 #ifdef INVARIANTS 5305 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5306 if (tgt->req) { 5307 mpt_req_not_spcl(mpt, tgt->req, 5308 "turbo scsi_tgt_reply associated req", __LINE__); 5309 } 5310 #endif 5311 switch(tgt->state) { 5312 case TGT_STATE_LOADED: 5313 /* 5314 * This is a new command starting. 5315 */ 5316 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5317 break; 5318 case TGT_STATE_MOVING_DATA: 5319 { 5320 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 5321 5322 ccb = tgt->ccb; 5323 if (tgt->req == NULL) { 5324 panic("mpt: turbo target reply with null " 5325 "associated request moving data"); 5326 /* NOTREACHED */ 5327 } 5328 if (ccb == NULL) { 5329 if (tgt->is_local == 0) { 5330 panic("mpt: turbo target reply with " 5331 "null associated ccb moving data"); 5332 /* NOTREACHED */ 5333 } 5334 mpt_lprt(mpt, MPT_PRT_DEBUG, 5335 "TARGET_ASSIST local done\n"); 5336 TAILQ_REMOVE(&mpt->request_pending_list, 5337 tgt->req, links); 5338 mpt_free_request(mpt, tgt->req); 5339 tgt->req = NULL; 5340 mpt_scsi_tgt_status(mpt, NULL, req, 5341 0, NULL); 5342 return (TRUE); 5343 } 5344 tgt->ccb = NULL; 5345 tgt->nxfers++; 5346 mpt_req_untimeout(req, mpt_timeout, ccb); 5347 mpt_lprt(mpt, MPT_PRT_DEBUG, 5348 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5349 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5350 /* 5351 * Free the Target Assist Request 5352 */ 5353 KASSERT(tgt->req->ccb == ccb, 5354 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5355 tgt->req->serno, tgt->req->ccb)); 5356 TAILQ_REMOVE(&mpt->request_pending_list, 5357 tgt->req, links); 5358 mpt_free_request(mpt, tgt->req); 5359 tgt->req = NULL; 5360 5361 /* 5362 * Do we need to send status now? That is, are 5363 * we done with all our data transfers? 5364 */ 5365 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5366 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5367 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5368 KASSERT(ccb->ccb_h.status, 5369 ("zero ccb sts at %d\n", __LINE__)); 5370 tgt->state = TGT_STATE_IN_CAM; 5371 if (mpt->outofbeer) { 5372 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5373 mpt->outofbeer = 0; 5374 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5375 } 5376 MPTLOCK_2_CAMLOCK(mpt); 5377 xpt_done(ccb); 5378 CAMLOCK_2_MPTLOCK(mpt); 5379 break; 5380 } 5381 /* 5382 * Otherwise, send status (and sense) 5383 */ 5384 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5385 sp = sense; 5386 memcpy(sp, &ccb->csio.sense_data, 5387 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5388 } 5389 mpt_scsi_tgt_status(mpt, ccb, req, 5390 ccb->csio.scsi_status, sp); 5391 break; 5392 } 5393 case TGT_STATE_SENDING_STATUS: 5394 case TGT_STATE_MOVING_DATA_AND_STATUS: 5395 { 5396 int ioindex; 5397 ccb = tgt->ccb; 5398 5399 if (tgt->req == NULL) { 5400 panic("mpt: turbo target reply with null " 5401 "associated request sending status"); 5402 /* NOTREACHED */ 5403 } 5404 5405 if (ccb) { 5406 tgt->ccb = NULL; 5407 if (tgt->state == 5408 TGT_STATE_MOVING_DATA_AND_STATUS) { 5409 tgt->nxfers++; 5410 } 5411 mpt_req_untimeout(req, mpt_timeout, ccb); 5412 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5413 ccb->ccb_h.status |= CAM_SENT_SENSE; 5414 } 5415 mpt_lprt(mpt, MPT_PRT_DEBUG, 5416 "TARGET_STATUS tag %x sts %x flgs %x req " 5417 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5418 ccb->ccb_h.flags, tgt->req); 5419 /* 5420 * Free the Target Send Status Request 5421 */ 5422 KASSERT(tgt->req->ccb == ccb, 5423 ("tgt->req %p:%u tgt->req->ccb %p", 5424 tgt->req, tgt->req->serno, tgt->req->ccb)); 5425 /* 5426 * Notify CAM that we're done 5427 */ 5428 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5429 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5430 KASSERT(ccb->ccb_h.status, 5431 ("ZERO ccb sts at %d\n", __LINE__)); 5432 tgt->ccb = NULL; 5433 } else { 5434 mpt_lprt(mpt, MPT_PRT_DEBUG, 5435 "TARGET_STATUS non-CAM for req %p:%u\n", 5436 tgt->req, tgt->req->serno); 5437 } 5438 TAILQ_REMOVE(&mpt->request_pending_list, 5439 tgt->req, links); 5440 mpt_free_request(mpt, tgt->req); 5441 tgt->req = NULL; 5442 5443 /* 5444 * And re-post the Command Buffer. 5445 * This will reset the state. 5446 */ 5447 ioindex = GET_IO_INDEX(reply_desc); 5448 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5449 tgt->is_local = 0; 5450 mpt_post_target_command(mpt, req, ioindex); 5451 5452 /* 5453 * And post a done for anyone who cares 5454 */ 5455 if (ccb) { 5456 if (mpt->outofbeer) { 5457 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5458 mpt->outofbeer = 0; 5459 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5460 } 5461 MPTLOCK_2_CAMLOCK(mpt); 5462 xpt_done(ccb); 5463 CAMLOCK_2_MPTLOCK(mpt); 5464 } 5465 break; 5466 } 5467 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5468 tgt->state = TGT_STATE_LOADED; 5469 break; 5470 default: 5471 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5472 "Reply Function\n", tgt->state); 5473 } 5474 return (TRUE); 5475 } 5476 5477 status = le16toh(reply_frame->IOCStatus); 5478 if (status != MPI_IOCSTATUS_SUCCESS) { 5479 dbg = MPT_PRT_ERROR; 5480 } else { 5481 dbg = MPT_PRT_DEBUG1; 5482 } 5483 5484 mpt_lprt(mpt, dbg, 5485 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5486 req, req->serno, reply_frame, reply_frame->Function, status); 5487 5488 switch (reply_frame->Function) { 5489 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5490 { 5491 mpt_tgt_state_t *tgt; 5492 #ifdef INVARIANTS 5493 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5494 #endif 5495 if (status != MPI_IOCSTATUS_SUCCESS) { 5496 /* 5497 * XXX What to do? 5498 */ 5499 break; 5500 } 5501 tgt = MPT_TGT_STATE(mpt, req); 5502 KASSERT(tgt->state == TGT_STATE_LOADING, 5503 ("bad state 0x%x on reply to buffer post\n", tgt->state)); 5504 mpt_assign_serno(mpt, req); 5505 tgt->state = TGT_STATE_LOADED; 5506 break; 5507 } 5508 case MPI_FUNCTION_TARGET_ASSIST: 5509 #ifdef INVARIANTS 5510 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5511 #endif 5512 mpt_prt(mpt, "target assist completion\n"); 5513 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5514 mpt_free_request(mpt, req); 5515 break; 5516 case MPI_FUNCTION_TARGET_STATUS_SEND: 5517 #ifdef INVARIANTS 5518 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5519 #endif 5520 mpt_prt(mpt, "status send completion\n"); 5521 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5522 mpt_free_request(mpt, req); 5523 break; 5524 case MPI_FUNCTION_TARGET_MODE_ABORT: 5525 { 5526 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5527 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5528 PTR_MSG_TARGET_MODE_ABORT abtp = 5529 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5530 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5531 #ifdef INVARIANTS 5532 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5533 #endif 5534 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5535 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5536 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5537 mpt_free_request(mpt, req); 5538 break; 5539 } 5540 default: 5541 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5542 "0x%x\n", reply_frame->Function); 5543 break; 5544 } 5545 return (TRUE); 5546 } 5547