1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 */ 96 #include <sys/cdefs.h> 97 __FBSDID("$FreeBSD$"); 98 99 #include <dev/mpt/mpt.h> 100 #include <dev/mpt/mpt_cam.h> 101 #include <dev/mpt/mpt_raid.h> 102 103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/mpt/mpilib/mpi_init.h" 105 #include "dev/mpt/mpilib/mpi_targ.h" 106 #include "dev/mpt/mpilib/mpi_fc.h" 107 #include "dev/mpt/mpilib/mpi_sas.h" 108 #if __FreeBSD_version >= 500000 109 #include <sys/sysctl.h> 110 #endif 111 #include <sys/callout.h> 112 #include <sys/kthread.h> 113 114 #if __FreeBSD_version >= 700025 115 #ifndef CAM_NEW_TRAN_CODE 116 #define CAM_NEW_TRAN_CODE 1 117 #endif 118 #endif 119 120 static void mpt_poll(struct cam_sim *); 121 static timeout_t mpt_timeout; 122 static void mpt_action(struct cam_sim *, union ccb *); 123 static int 124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 125 static void mpt_setwidth(struct mpt_softc *, int, int); 126 static void mpt_setsync(struct mpt_softc *, int, int, int); 127 static int mpt_update_spi_config(struct mpt_softc *, int); 128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); 129 130 static mpt_reply_handler_t mpt_scsi_reply_handler; 131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 132 static mpt_reply_handler_t mpt_fc_els_reply_handler; 133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 134 MSG_DEFAULT_REPLY *); 135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 136 static int mpt_fc_reset_link(struct mpt_softc *, int); 137 138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 140 static void mpt_recovery_thread(void *arg); 141 static void mpt_recover_commands(struct mpt_softc *mpt); 142 143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 144 u_int, u_int, u_int, int); 145 146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 148 static int mpt_add_els_buffers(struct mpt_softc *mpt); 149 static int mpt_add_target_commands(struct mpt_softc *mpt); 150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 152 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 156 uint8_t, uint8_t const *); 157 static void 158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 159 tgt_resource_t *, int); 160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 163 static mpt_reply_handler_t mpt_sata_pass_reply_handler; 164 165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 169 170 static mpt_probe_handler_t mpt_cam_probe; 171 static mpt_attach_handler_t mpt_cam_attach; 172 static mpt_enable_handler_t mpt_cam_enable; 173 static mpt_ready_handler_t mpt_cam_ready; 174 static mpt_event_handler_t mpt_cam_event; 175 static mpt_reset_handler_t mpt_cam_ioc_reset; 176 static mpt_detach_handler_t mpt_cam_detach; 177 178 static struct mpt_personality mpt_cam_personality = 179 { 180 .name = "mpt_cam", 181 .probe = mpt_cam_probe, 182 .attach = mpt_cam_attach, 183 .enable = mpt_cam_enable, 184 .ready = mpt_cam_ready, 185 .event = mpt_cam_event, 186 .reset = mpt_cam_ioc_reset, 187 .detach = mpt_cam_detach, 188 }; 189 190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 192 193 int mpt_enable_sata_wc = -1; 194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 195 196 int 197 mpt_cam_probe(struct mpt_softc *mpt) 198 { 199 int role; 200 201 /* 202 * Only attach to nodes that support the initiator or target role 203 * (or want to) or have RAID physical devices that need CAM pass-thru 204 * support. 205 */ 206 if (mpt->do_cfg_role) { 207 role = mpt->cfg_role; 208 } else { 209 role = mpt->role; 210 } 211 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 212 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 213 return (0); 214 } 215 return (ENODEV); 216 } 217 218 int 219 mpt_cam_attach(struct mpt_softc *mpt) 220 { 221 struct cam_devq *devq; 222 mpt_handler_t handler; 223 int maxq; 224 int error; 225 226 MPT_LOCK(mpt); 227 TAILQ_INIT(&mpt->request_timeout_list); 228 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 229 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 230 231 handler.reply_handler = mpt_scsi_reply_handler; 232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 233 &scsi_io_handler_id); 234 if (error != 0) { 235 MPT_UNLOCK(mpt); 236 goto cleanup; 237 } 238 239 handler.reply_handler = mpt_scsi_tmf_reply_handler; 240 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 241 &scsi_tmf_handler_id); 242 if (error != 0) { 243 MPT_UNLOCK(mpt); 244 goto cleanup; 245 } 246 247 /* 248 * If we're fibre channel and could support target mode, we register 249 * an ELS reply handler and give it resources. 250 */ 251 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 252 handler.reply_handler = mpt_fc_els_reply_handler; 253 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 254 &fc_els_handler_id); 255 if (error != 0) { 256 MPT_UNLOCK(mpt); 257 goto cleanup; 258 } 259 if (mpt_add_els_buffers(mpt) == FALSE) { 260 error = ENOMEM; 261 MPT_UNLOCK(mpt); 262 goto cleanup; 263 } 264 maxq -= mpt->els_cmds_allocated; 265 } 266 267 /* 268 * If we support target mode, we register a reply handler for it, 269 * but don't add command resources until we actually enable target 270 * mode. 271 */ 272 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 273 handler.reply_handler = mpt_scsi_tgt_reply_handler; 274 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 275 &mpt->scsi_tgt_handler_id); 276 if (error != 0) { 277 MPT_UNLOCK(mpt); 278 goto cleanup; 279 } 280 } 281 282 if (mpt->is_sas) { 283 handler.reply_handler = mpt_sata_pass_reply_handler; 284 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 285 &sata_pass_handler_id); 286 if (error != 0) { 287 MPT_UNLOCK(mpt); 288 goto cleanup; 289 } 290 } 291 292 /* 293 * We keep one request reserved for timeout TMF requests. 294 */ 295 mpt->tmf_req = mpt_get_request(mpt, FALSE); 296 if (mpt->tmf_req == NULL) { 297 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 298 error = ENOMEM; 299 MPT_UNLOCK(mpt); 300 goto cleanup; 301 } 302 303 /* 304 * Mark the request as free even though not on the free list. 305 * There is only one TMF request allowed to be outstanding at 306 * a time and the TMF routines perform their own allocation 307 * tracking using the standard state flags. 308 */ 309 mpt->tmf_req->state = REQ_STATE_FREE; 310 maxq--; 311 312 /* 313 * The rest of this is CAM foo, for which we need to drop our lock 314 */ 315 MPT_UNLOCK(mpt); 316 317 if (mpt_spawn_recovery_thread(mpt) != 0) { 318 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 319 error = ENOMEM; 320 goto cleanup; 321 } 322 323 /* 324 * Create the device queue for our SIM(s). 325 */ 326 devq = cam_simq_alloc(maxq); 327 if (devq == NULL) { 328 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 329 error = ENOMEM; 330 goto cleanup; 331 } 332 333 /* 334 * Construct our SIM entry. 335 */ 336 mpt->sim = 337 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 338 if (mpt->sim == NULL) { 339 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 340 cam_simq_free(devq); 341 error = ENOMEM; 342 goto cleanup; 343 } 344 345 /* 346 * Register exactly this bus. 347 */ 348 MPT_LOCK(mpt); 349 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { 350 mpt_prt(mpt, "Bus registration Failed!\n"); 351 error = ENOMEM; 352 MPT_UNLOCK(mpt); 353 goto cleanup; 354 } 355 356 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 357 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 358 mpt_prt(mpt, "Unable to allocate Path!\n"); 359 error = ENOMEM; 360 MPT_UNLOCK(mpt); 361 goto cleanup; 362 } 363 MPT_UNLOCK(mpt); 364 365 /* 366 * Only register a second bus for RAID physical 367 * devices if the controller supports RAID. 368 */ 369 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 370 return (0); 371 } 372 373 /* 374 * Create a "bus" to export all hidden disks to CAM. 375 */ 376 mpt->phydisk_sim = 377 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 378 if (mpt->phydisk_sim == NULL) { 379 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 380 error = ENOMEM; 381 goto cleanup; 382 } 383 384 /* 385 * Register this bus. 386 */ 387 MPT_LOCK(mpt); 388 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != 389 CAM_SUCCESS) { 390 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 391 error = ENOMEM; 392 MPT_UNLOCK(mpt); 393 goto cleanup; 394 } 395 396 if (xpt_create_path(&mpt->phydisk_path, NULL, 397 cam_sim_path(mpt->phydisk_sim), 398 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 399 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 400 error = ENOMEM; 401 MPT_UNLOCK(mpt); 402 goto cleanup; 403 } 404 MPT_UNLOCK(mpt); 405 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 406 return (0); 407 408 cleanup: 409 mpt_cam_detach(mpt); 410 return (error); 411 } 412 413 /* 414 * Read FC configuration information 415 */ 416 static int 417 mpt_read_config_info_fc(struct mpt_softc *mpt) 418 { 419 char *topology = NULL; 420 int rv; 421 422 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 423 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 424 if (rv) { 425 return (-1); 426 } 427 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 428 mpt->mpt_fcport_page0.Header.PageVersion, 429 mpt->mpt_fcport_page0.Header.PageLength, 430 mpt->mpt_fcport_page0.Header.PageNumber, 431 mpt->mpt_fcport_page0.Header.PageType); 432 433 434 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 435 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 436 if (rv) { 437 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 438 return (-1); 439 } 440 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 441 442 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 443 444 switch (mpt->mpt_fcport_page0.Flags & 445 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 447 mpt->mpt_fcport_speed = 0; 448 topology = "<NO LOOP>"; 449 break; 450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 451 topology = "N-Port"; 452 break; 453 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 454 topology = "NL-Port"; 455 break; 456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 457 topology = "F-Port"; 458 break; 459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 460 topology = "FL-Port"; 461 break; 462 default: 463 mpt->mpt_fcport_speed = 0; 464 topology = "?"; 465 break; 466 } 467 468 mpt_lprt(mpt, MPT_PRT_INFO, 469 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 470 "Speed %u-Gbit\n", topology, 471 mpt->mpt_fcport_page0.WWNN.High, 472 mpt->mpt_fcport_page0.WWNN.Low, 473 mpt->mpt_fcport_page0.WWPN.High, 474 mpt->mpt_fcport_page0.WWPN.Low, 475 mpt->mpt_fcport_speed); 476 #if __FreeBSD_version >= 500000 477 MPT_UNLOCK(mpt); 478 { 479 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 480 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 481 482 snprintf(mpt->scinfo.fc.wwnn, 483 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 484 mpt->mpt_fcport_page0.WWNN.High, 485 mpt->mpt_fcport_page0.WWNN.Low); 486 487 snprintf(mpt->scinfo.fc.wwpn, 488 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 489 mpt->mpt_fcport_page0.WWPN.High, 490 mpt->mpt_fcport_page0.WWPN.Low); 491 492 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 493 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 494 "World Wide Node Name"); 495 496 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 497 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 498 "World Wide Port Name"); 499 500 } 501 MPT_LOCK(mpt); 502 #endif 503 return (0); 504 } 505 506 /* 507 * Set FC configuration information. 508 */ 509 static int 510 mpt_set_initial_config_fc(struct mpt_softc *mpt) 511 { 512 513 CONFIG_PAGE_FC_PORT_1 fc; 514 U32 fl; 515 int r, doit = 0; 516 int role; 517 518 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 519 &fc.Header, FALSE, 5000); 520 if (r) { 521 mpt_prt(mpt, "failed to read FC page 1 header\n"); 522 return (mpt_fc_reset_link(mpt, 1)); 523 } 524 525 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 526 &fc.Header, sizeof (fc), FALSE, 5000); 527 if (r) { 528 mpt_prt(mpt, "failed to read FC page 1\n"); 529 return (mpt_fc_reset_link(mpt, 1)); 530 } 531 mpt2host_config_page_fc_port_1(&fc); 532 533 /* 534 * Check our flags to make sure we support the role we want. 535 */ 536 doit = 0; 537 role = 0; 538 fl = fc.Flags; 539 540 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 541 role |= MPT_ROLE_INITIATOR; 542 } 543 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 544 role |= MPT_ROLE_TARGET; 545 } 546 547 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 548 549 if (mpt->do_cfg_role == 0) { 550 role = mpt->cfg_role; 551 } else { 552 mpt->do_cfg_role = 0; 553 } 554 555 if (role != mpt->cfg_role) { 556 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 557 if ((role & MPT_ROLE_INITIATOR) == 0) { 558 mpt_prt(mpt, "adding initiator role\n"); 559 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 560 doit++; 561 } else { 562 mpt_prt(mpt, "keeping initiator role\n"); 563 } 564 } else if (role & MPT_ROLE_INITIATOR) { 565 mpt_prt(mpt, "removing initiator role\n"); 566 doit++; 567 } 568 if (mpt->cfg_role & MPT_ROLE_TARGET) { 569 if ((role & MPT_ROLE_TARGET) == 0) { 570 mpt_prt(mpt, "adding target role\n"); 571 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 572 doit++; 573 } else { 574 mpt_prt(mpt, "keeping target role\n"); 575 } 576 } else if (role & MPT_ROLE_TARGET) { 577 mpt_prt(mpt, "removing target role\n"); 578 doit++; 579 } 580 mpt->role = mpt->cfg_role; 581 } 582 583 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 584 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 585 mpt_prt(mpt, "adding OXID option\n"); 586 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 587 doit++; 588 } 589 } 590 591 if (doit) { 592 fc.Flags = fl; 593 host2mpt_config_page_fc_port_1(&fc); 594 r = mpt_write_cfg_page(mpt, 595 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 596 sizeof(fc), FALSE, 5000); 597 if (r != 0) { 598 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 599 return (0); 600 } 601 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 602 "effect until next reboot or IOC reset\n"); 603 } 604 return (0); 605 } 606 607 static int 608 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 609 { 610 ConfigExtendedPageHeader_t hdr; 611 struct mptsas_phyinfo *phyinfo; 612 SasIOUnitPage0_t *buffer; 613 int error, len, i; 614 615 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 616 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 617 &hdr, 0, 10000); 618 if (error) 619 goto out; 620 if (hdr.ExtPageLength == 0) { 621 error = ENXIO; 622 goto out; 623 } 624 625 len = hdr.ExtPageLength * 4; 626 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 627 if (buffer == NULL) { 628 error = ENOMEM; 629 goto out; 630 } 631 632 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 633 0, &hdr, buffer, len, 0, 10000); 634 if (error) { 635 free(buffer, M_DEVBUF); 636 goto out; 637 } 638 639 portinfo->num_phys = buffer->NumPhys; 640 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * 641 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 642 if (portinfo->phy_info == NULL) { 643 free(buffer, M_DEVBUF); 644 error = ENOMEM; 645 goto out; 646 } 647 648 for (i = 0; i < portinfo->num_phys; i++) { 649 phyinfo = &portinfo->phy_info[i]; 650 phyinfo->phy_num = i; 651 phyinfo->port_id = buffer->PhyData[i].Port; 652 phyinfo->negotiated_link_rate = 653 buffer->PhyData[i].NegotiatedLinkRate; 654 phyinfo->handle = 655 le16toh(buffer->PhyData[i].ControllerDevHandle); 656 } 657 658 free(buffer, M_DEVBUF); 659 out: 660 return (error); 661 } 662 663 static int 664 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 665 uint32_t form, uint32_t form_specific) 666 { 667 ConfigExtendedPageHeader_t hdr; 668 SasPhyPage0_t *buffer; 669 int error; 670 671 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 672 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 673 0, 10000); 674 if (error) 675 goto out; 676 if (hdr.ExtPageLength == 0) { 677 error = ENXIO; 678 goto out; 679 } 680 681 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 682 if (buffer == NULL) { 683 error = ENOMEM; 684 goto out; 685 } 686 687 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 688 form + form_specific, &hdr, buffer, 689 sizeof(SasPhyPage0_t), 0, 10000); 690 if (error) { 691 free(buffer, M_DEVBUF); 692 goto out; 693 } 694 695 phy_info->hw_link_rate = buffer->HwLinkRate; 696 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 697 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 698 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 699 700 free(buffer, M_DEVBUF); 701 out: 702 return (error); 703 } 704 705 static int 706 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 707 uint32_t form, uint32_t form_specific) 708 { 709 ConfigExtendedPageHeader_t hdr; 710 SasDevicePage0_t *buffer; 711 uint64_t sas_address; 712 int error = 0; 713 714 bzero(device_info, sizeof(*device_info)); 715 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 716 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 717 &hdr, 0, 10000); 718 if (error) 719 goto out; 720 if (hdr.ExtPageLength == 0) { 721 error = ENXIO; 722 goto out; 723 } 724 725 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 726 if (buffer == NULL) { 727 error = ENOMEM; 728 goto out; 729 } 730 731 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 732 form + form_specific, &hdr, buffer, 733 sizeof(SasDevicePage0_t), 0, 10000); 734 if (error) { 735 free(buffer, M_DEVBUF); 736 goto out; 737 } 738 739 device_info->dev_handle = le16toh(buffer->DevHandle); 740 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 741 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 742 device_info->slot = le16toh(buffer->Slot); 743 device_info->phy_num = buffer->PhyNum; 744 device_info->physical_port = buffer->PhysicalPort; 745 device_info->target_id = buffer->TargetID; 746 device_info->bus = buffer->Bus; 747 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 748 device_info->sas_address = le64toh(sas_address); 749 device_info->device_info = le32toh(buffer->DeviceInfo); 750 751 free(buffer, M_DEVBUF); 752 out: 753 return (error); 754 } 755 756 /* 757 * Read SAS configuration information. Nothing to do yet. 758 */ 759 static int 760 mpt_read_config_info_sas(struct mpt_softc *mpt) 761 { 762 struct mptsas_portinfo *portinfo; 763 struct mptsas_phyinfo *phyinfo; 764 int error, i; 765 766 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 767 if (portinfo == NULL) 768 return (ENOMEM); 769 770 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 771 if (error) { 772 free(portinfo, M_DEVBUF); 773 return (0); 774 } 775 776 for (i = 0; i < portinfo->num_phys; i++) { 777 phyinfo = &portinfo->phy_info[i]; 778 error = mptsas_sas_phy_pg0(mpt, phyinfo, 779 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 780 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 781 if (error) 782 break; 783 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 784 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 785 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 786 phyinfo->handle); 787 if (error) 788 break; 789 phyinfo->identify.phy_num = phyinfo->phy_num = i; 790 if (phyinfo->attached.dev_handle) 791 error = mptsas_sas_device_pg0(mpt, 792 &phyinfo->attached, 793 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 794 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 795 phyinfo->attached.dev_handle); 796 if (error) 797 break; 798 } 799 mpt->sas_portinfo = portinfo; 800 return (0); 801 } 802 803 static void 804 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 805 int enabled) 806 { 807 SataPassthroughRequest_t *pass; 808 request_t *req; 809 int error, status; 810 811 req = mpt_get_request(mpt, 0); 812 if (req == NULL) 813 return; 814 815 pass = req->req_vbuf; 816 bzero(pass, sizeof(SataPassthroughRequest_t)); 817 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 818 pass->TargetID = devinfo->target_id; 819 pass->Bus = devinfo->bus; 820 pass->PassthroughFlags = 0; 821 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 822 pass->DataLength = 0; 823 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 824 pass->CommandFIS[0] = 0x27; 825 pass->CommandFIS[1] = 0x80; 826 pass->CommandFIS[2] = 0xef; 827 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 828 pass->CommandFIS[7] = 0x40; 829 pass->CommandFIS[15] = 0x08; 830 831 mpt_check_doorbell(mpt); 832 mpt_send_cmd(mpt, req); 833 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 834 10 * 1000); 835 if (error) { 836 mpt_free_request(mpt, req); 837 printf("error %d sending passthrough\n", error); 838 return; 839 } 840 841 status = le16toh(req->IOCStatus); 842 if (status != MPI_IOCSTATUS_SUCCESS) { 843 mpt_free_request(mpt, req); 844 printf("IOCSTATUS %d\n", status); 845 return; 846 } 847 848 mpt_free_request(mpt, req); 849 } 850 851 /* 852 * Set SAS configuration information. Nothing to do yet. 853 */ 854 static int 855 mpt_set_initial_config_sas(struct mpt_softc *mpt) 856 { 857 struct mptsas_phyinfo *phyinfo; 858 int i; 859 860 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 861 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 862 phyinfo = &mpt->sas_portinfo->phy_info[i]; 863 if (phyinfo->attached.dev_handle == 0) 864 continue; 865 if ((phyinfo->attached.device_info & 866 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 867 continue; 868 if (bootverbose) 869 device_printf(mpt->dev, 870 "%sabling SATA WC on phy %d\n", 871 (mpt_enable_sata_wc) ? "En" : "Dis", i); 872 mptsas_set_sata_wc(mpt, &phyinfo->attached, 873 mpt_enable_sata_wc); 874 } 875 } 876 877 return (0); 878 } 879 880 static int 881 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 882 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 883 { 884 if (req != NULL) { 885 886 if (reply_frame != NULL) { 887 req->IOCStatus = le16toh(reply_frame->IOCStatus); 888 } 889 req->state &= ~REQ_STATE_QUEUED; 890 req->state |= REQ_STATE_DONE; 891 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 892 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 893 wakeup(req); 894 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 895 /* 896 * Whew- we can free this request (late completion) 897 */ 898 mpt_free_request(mpt, req); 899 } 900 } 901 902 return (TRUE); 903 } 904 905 /* 906 * Read SCSI configuration information 907 */ 908 static int 909 mpt_read_config_info_spi(struct mpt_softc *mpt) 910 { 911 int rv, i; 912 913 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 914 &mpt->mpt_port_page0.Header, FALSE, 5000); 915 if (rv) { 916 return (-1); 917 } 918 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 919 mpt->mpt_port_page0.Header.PageVersion, 920 mpt->mpt_port_page0.Header.PageLength, 921 mpt->mpt_port_page0.Header.PageNumber, 922 mpt->mpt_port_page0.Header.PageType); 923 924 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 925 &mpt->mpt_port_page1.Header, FALSE, 5000); 926 if (rv) { 927 return (-1); 928 } 929 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 930 mpt->mpt_port_page1.Header.PageVersion, 931 mpt->mpt_port_page1.Header.PageLength, 932 mpt->mpt_port_page1.Header.PageNumber, 933 mpt->mpt_port_page1.Header.PageType); 934 935 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 936 &mpt->mpt_port_page2.Header, FALSE, 5000); 937 if (rv) { 938 return (-1); 939 } 940 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 941 mpt->mpt_port_page2.Header.PageVersion, 942 mpt->mpt_port_page2.Header.PageLength, 943 mpt->mpt_port_page2.Header.PageNumber, 944 mpt->mpt_port_page2.Header.PageType); 945 946 for (i = 0; i < 16; i++) { 947 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 948 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 949 if (rv) { 950 return (-1); 951 } 952 mpt_lprt(mpt, MPT_PRT_DEBUG, 953 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 954 mpt->mpt_dev_page0[i].Header.PageVersion, 955 mpt->mpt_dev_page0[i].Header.PageLength, 956 mpt->mpt_dev_page0[i].Header.PageNumber, 957 mpt->mpt_dev_page0[i].Header.PageType); 958 959 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 960 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 961 if (rv) { 962 return (-1); 963 } 964 mpt_lprt(mpt, MPT_PRT_DEBUG, 965 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 966 mpt->mpt_dev_page1[i].Header.PageVersion, 967 mpt->mpt_dev_page1[i].Header.PageLength, 968 mpt->mpt_dev_page1[i].Header.PageNumber, 969 mpt->mpt_dev_page1[i].Header.PageType); 970 } 971 972 /* 973 * At this point, we don't *have* to fail. As long as we have 974 * valid config header information, we can (barely) lurch 975 * along. 976 */ 977 978 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 979 sizeof(mpt->mpt_port_page0), FALSE, 5000); 980 if (rv) { 981 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 982 } else { 983 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 984 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 985 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 986 mpt->mpt_port_page0.Capabilities, 987 mpt->mpt_port_page0.PhysicalInterface); 988 } 989 990 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 991 sizeof(mpt->mpt_port_page1), FALSE, 5000); 992 if (rv) { 993 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 994 } else { 995 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 996 mpt_lprt(mpt, MPT_PRT_DEBUG, 997 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 998 mpt->mpt_port_page1.Configuration, 999 mpt->mpt_port_page1.OnBusTimerValue); 1000 } 1001 1002 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 1003 sizeof(mpt->mpt_port_page2), FALSE, 5000); 1004 if (rv) { 1005 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 1006 } else { 1007 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1008 "Port Page 2: Flags %x Settings %x\n", 1009 mpt->mpt_port_page2.PortFlags, 1010 mpt->mpt_port_page2.PortSettings); 1011 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 1012 for (i = 0; i < 16; i++) { 1013 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1014 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1015 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1016 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1017 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1018 } 1019 } 1020 1021 for (i = 0; i < 16; i++) { 1022 rv = mpt_read_cur_cfg_page(mpt, i, 1023 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1024 FALSE, 5000); 1025 if (rv) { 1026 mpt_prt(mpt, 1027 "cannot read SPI Target %d Device Page 0\n", i); 1028 continue; 1029 } 1030 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 1031 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1032 "target %d page 0: Negotiated Params %x Information %x\n", 1033 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1034 mpt->mpt_dev_page0[i].Information); 1035 1036 rv = mpt_read_cur_cfg_page(mpt, i, 1037 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1038 FALSE, 5000); 1039 if (rv) { 1040 mpt_prt(mpt, 1041 "cannot read SPI Target %d Device Page 1\n", i); 1042 continue; 1043 } 1044 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 1045 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1046 "target %d page 1: Requested Params %x Configuration %x\n", 1047 i, mpt->mpt_dev_page1[i].RequestedParameters, 1048 mpt->mpt_dev_page1[i].Configuration); 1049 } 1050 return (0); 1051 } 1052 1053 /* 1054 * Validate SPI configuration information. 1055 * 1056 * In particular, validate SPI Port Page 1. 1057 */ 1058 static int 1059 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1060 { 1061 int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id; 1062 int error; 1063 1064 mpt->mpt_disc_enable = 0xff; 1065 mpt->mpt_tag_enable = 0; 1066 1067 if (mpt->mpt_port_page1.Configuration != pp1val) { 1068 CONFIG_PAGE_SCSI_PORT_1 tmp; 1069 1070 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1071 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 1072 tmp = mpt->mpt_port_page1; 1073 tmp.Configuration = pp1val; 1074 host2mpt_config_page_scsi_port_1(&tmp); 1075 error = mpt_write_cur_cfg_page(mpt, 0, 1076 &tmp.Header, sizeof(tmp), FALSE, 5000); 1077 if (error) { 1078 return (-1); 1079 } 1080 error = mpt_read_cur_cfg_page(mpt, 0, 1081 &tmp.Header, sizeof(tmp), FALSE, 5000); 1082 if (error) { 1083 return (-1); 1084 } 1085 mpt2host_config_page_scsi_port_1(&tmp); 1086 if (tmp.Configuration != pp1val) { 1087 mpt_prt(mpt, 1088 "failed to reset SPI Port Page 1 Config value\n"); 1089 return (-1); 1090 } 1091 mpt->mpt_port_page1 = tmp; 1092 } 1093 1094 /* 1095 * The purpose of this exercise is to get 1096 * all targets back to async/narrow. 1097 * 1098 * We skip this step if the BIOS has already negotiated 1099 * speeds with the targets. 1100 */ 1101 i = mpt->mpt_port_page2.PortSettings & 1102 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1103 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 1104 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1105 "honoring BIOS transfer negotiations\n"); 1106 } else { 1107 for (i = 0; i < 16; i++) { 1108 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1109 mpt->mpt_dev_page1[i].Configuration = 0; 1110 (void) mpt_update_spi_config(mpt, i); 1111 } 1112 } 1113 return (0); 1114 } 1115 1116 int 1117 mpt_cam_enable(struct mpt_softc *mpt) 1118 { 1119 int error; 1120 1121 MPT_LOCK(mpt); 1122 1123 error = EIO; 1124 if (mpt->is_fc) { 1125 if (mpt_read_config_info_fc(mpt)) { 1126 goto out; 1127 } 1128 if (mpt_set_initial_config_fc(mpt)) { 1129 goto out; 1130 } 1131 } else if (mpt->is_sas) { 1132 if (mpt_read_config_info_sas(mpt)) { 1133 goto out; 1134 } 1135 if (mpt_set_initial_config_sas(mpt)) { 1136 goto out; 1137 } 1138 } else if (mpt->is_spi) { 1139 if (mpt_read_config_info_spi(mpt)) { 1140 goto out; 1141 } 1142 if (mpt_set_initial_config_spi(mpt)) { 1143 goto out; 1144 } 1145 } 1146 error = 0; 1147 1148 out: 1149 MPT_UNLOCK(mpt); 1150 return (error); 1151 } 1152 1153 void 1154 mpt_cam_ready(struct mpt_softc *mpt) 1155 { 1156 /* 1157 * If we're in target mode, hang out resources now 1158 * so we don't cause the world to hang talking to us. 1159 */ 1160 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1161 /* 1162 * Try to add some target command resources 1163 */ 1164 MPT_LOCK(mpt); 1165 if (mpt_add_target_commands(mpt) == FALSE) { 1166 mpt_prt(mpt, "failed to add target commands\n"); 1167 } 1168 MPT_UNLOCK(mpt); 1169 } 1170 mpt->ready = 1; 1171 } 1172 1173 void 1174 mpt_cam_detach(struct mpt_softc *mpt) 1175 { 1176 mpt_handler_t handler; 1177 1178 MPT_LOCK(mpt); 1179 mpt->ready = 0; 1180 mpt_terminate_recovery_thread(mpt); 1181 1182 handler.reply_handler = mpt_scsi_reply_handler; 1183 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1184 scsi_io_handler_id); 1185 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1186 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1187 scsi_tmf_handler_id); 1188 handler.reply_handler = mpt_fc_els_reply_handler; 1189 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1190 fc_els_handler_id); 1191 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1192 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1193 mpt->scsi_tgt_handler_id); 1194 handler.reply_handler = mpt_sata_pass_reply_handler; 1195 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1196 sata_pass_handler_id); 1197 1198 if (mpt->tmf_req != NULL) { 1199 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1200 mpt_free_request(mpt, mpt->tmf_req); 1201 mpt->tmf_req = NULL; 1202 } 1203 if (mpt->sas_portinfo != NULL) { 1204 free(mpt->sas_portinfo, M_DEVBUF); 1205 mpt->sas_portinfo = NULL; 1206 } 1207 MPT_UNLOCK(mpt); 1208 1209 if (mpt->sim != NULL) { 1210 xpt_free_path(mpt->path); 1211 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1212 cam_sim_free(mpt->sim, TRUE); 1213 mpt->sim = NULL; 1214 } 1215 1216 if (mpt->phydisk_sim != NULL) { 1217 xpt_free_path(mpt->phydisk_path); 1218 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1219 cam_sim_free(mpt->phydisk_sim, TRUE); 1220 mpt->phydisk_sim = NULL; 1221 } 1222 } 1223 1224 /* This routine is used after a system crash to dump core onto the swap device. 1225 */ 1226 static void 1227 mpt_poll(struct cam_sim *sim) 1228 { 1229 struct mpt_softc *mpt; 1230 1231 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1232 mpt_intr(mpt); 1233 } 1234 1235 /* 1236 * Watchdog timeout routine for SCSI requests. 1237 */ 1238 static void 1239 mpt_timeout(void *arg) 1240 { 1241 union ccb *ccb; 1242 struct mpt_softc *mpt; 1243 request_t *req; 1244 1245 ccb = (union ccb *)arg; 1246 mpt = ccb->ccb_h.ccb_mpt_ptr; 1247 1248 MPT_LOCK(mpt); 1249 req = ccb->ccb_h.ccb_req_ptr; 1250 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1251 req->serno, ccb, req->ccb); 1252 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1253 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1254 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1255 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1256 req->state |= REQ_STATE_TIMEDOUT; 1257 mpt_wakeup_recovery_thread(mpt); 1258 } 1259 MPT_UNLOCK(mpt); 1260 } 1261 1262 /* 1263 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 1264 * 1265 * Takes a list of physical segments and builds the SGL for SCSI IO command 1266 * and forwards the commard to the IOC after one last check that CAM has not 1267 * aborted the transaction. 1268 */ 1269 static void 1270 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1271 { 1272 request_t *req, *trq; 1273 char *mpt_off; 1274 union ccb *ccb; 1275 struct mpt_softc *mpt; 1276 int seg, first_lim; 1277 uint32_t flags, nxt_off; 1278 void *sglp = NULL; 1279 MSG_REQUEST_HEADER *hdrp; 1280 SGE_SIMPLE64 *se; 1281 SGE_CHAIN64 *ce; 1282 int istgt = 0; 1283 1284 req = (request_t *)arg; 1285 ccb = req->ccb; 1286 1287 mpt = ccb->ccb_h.ccb_mpt_ptr; 1288 req = ccb->ccb_h.ccb_req_ptr; 1289 1290 hdrp = req->req_vbuf; 1291 mpt_off = req->req_vbuf; 1292 1293 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1294 error = EFBIG; 1295 } 1296 1297 if (error == 0) { 1298 switch (hdrp->Function) { 1299 case MPI_FUNCTION_SCSI_IO_REQUEST: 1300 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1301 istgt = 0; 1302 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1303 break; 1304 case MPI_FUNCTION_TARGET_ASSIST: 1305 istgt = 1; 1306 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1307 break; 1308 default: 1309 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1310 hdrp->Function); 1311 error = EINVAL; 1312 break; 1313 } 1314 } 1315 1316 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1317 error = EFBIG; 1318 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1319 nseg, mpt->max_seg_cnt); 1320 } 1321 1322 bad: 1323 if (error != 0) { 1324 if (error != EFBIG && error != ENOMEM) { 1325 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1326 } 1327 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1328 cam_status status; 1329 mpt_freeze_ccb(ccb); 1330 if (error == EFBIG) { 1331 status = CAM_REQ_TOO_BIG; 1332 } else if (error == ENOMEM) { 1333 if (mpt->outofbeer == 0) { 1334 mpt->outofbeer = 1; 1335 xpt_freeze_simq(mpt->sim, 1); 1336 mpt_lprt(mpt, MPT_PRT_DEBUG, 1337 "FREEZEQ\n"); 1338 } 1339 status = CAM_REQUEUE_REQ; 1340 } else { 1341 status = CAM_REQ_CMP_ERR; 1342 } 1343 mpt_set_ccb_status(ccb, status); 1344 } 1345 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1346 request_t *cmd_req = 1347 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1348 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1349 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1350 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1351 } 1352 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1353 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1354 xpt_done(ccb); 1355 CAMLOCK_2_MPTLOCK(mpt); 1356 mpt_free_request(mpt, req); 1357 MPTLOCK_2_CAMLOCK(mpt); 1358 return; 1359 } 1360 1361 /* 1362 * No data to transfer? 1363 * Just make a single simple SGL with zero length. 1364 */ 1365 1366 if (mpt->verbose >= MPT_PRT_DEBUG) { 1367 int tidx = ((char *)sglp) - mpt_off; 1368 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1369 } 1370 1371 if (nseg == 0) { 1372 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1373 MPI_pSGE_SET_FLAGS(se1, 1374 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1375 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1376 se1->FlagsLength = htole32(se1->FlagsLength); 1377 goto out; 1378 } 1379 1380 1381 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1382 if (istgt == 0) { 1383 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1384 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1385 } 1386 } else { 1387 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1388 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1389 } 1390 } 1391 1392 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1393 bus_dmasync_op_t op; 1394 if (istgt == 0) { 1395 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1396 op = BUS_DMASYNC_PREREAD; 1397 } else { 1398 op = BUS_DMASYNC_PREWRITE; 1399 } 1400 } else { 1401 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1402 op = BUS_DMASYNC_PREWRITE; 1403 } else { 1404 op = BUS_DMASYNC_PREREAD; 1405 } 1406 } 1407 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1408 } 1409 1410 /* 1411 * Okay, fill in what we can at the end of the command frame. 1412 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1413 * the command frame. 1414 * 1415 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1416 * SIMPLE64 pointers and start doing CHAIN64 entries after 1417 * that. 1418 */ 1419 1420 if (nseg < MPT_NSGL_FIRST(mpt)) { 1421 first_lim = nseg; 1422 } else { 1423 /* 1424 * Leave room for CHAIN element 1425 */ 1426 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1427 } 1428 1429 se = (SGE_SIMPLE64 *) sglp; 1430 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1431 uint32_t tf; 1432 1433 memset(se, 0, sizeof (*se)); 1434 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1435 if (sizeof(bus_addr_t) > 4) { 1436 se->Address.High = 1437 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1438 } 1439 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1440 tf = flags; 1441 if (seg == first_lim - 1) { 1442 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1443 } 1444 if (seg == nseg - 1) { 1445 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1446 MPI_SGE_FLAGS_END_OF_BUFFER; 1447 } 1448 MPI_pSGE_SET_FLAGS(se, tf); 1449 se->FlagsLength = htole32(se->FlagsLength); 1450 } 1451 1452 if (seg == nseg) { 1453 goto out; 1454 } 1455 1456 /* 1457 * Tell the IOC where to find the first chain element. 1458 */ 1459 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1460 nxt_off = MPT_RQSL(mpt); 1461 trq = req; 1462 1463 /* 1464 * Make up the rest of the data segments out of a chain element 1465 * (contiained in the current request frame) which points to 1466 * SIMPLE64 elements in the next request frame, possibly ending 1467 * with *another* chain element (if there's more). 1468 */ 1469 while (seg < nseg) { 1470 int this_seg_lim; 1471 uint32_t tf, cur_off; 1472 bus_addr_t chain_list_addr; 1473 1474 /* 1475 * Point to the chain descriptor. Note that the chain 1476 * descriptor is at the end of the *previous* list (whether 1477 * chain or simple). 1478 */ 1479 ce = (SGE_CHAIN64 *) se; 1480 1481 /* 1482 * Before we change our current pointer, make sure we won't 1483 * overflow the request area with this frame. Note that we 1484 * test against 'greater than' here as it's okay in this case 1485 * to have next offset be just outside the request area. 1486 */ 1487 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1488 nxt_off = MPT_REQUEST_AREA; 1489 goto next_chain; 1490 } 1491 1492 /* 1493 * Set our SGE element pointer to the beginning of the chain 1494 * list and update our next chain list offset. 1495 */ 1496 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1497 cur_off = nxt_off; 1498 nxt_off += MPT_RQSL(mpt); 1499 1500 /* 1501 * Now initialized the chain descriptor. 1502 */ 1503 memset(ce, 0, sizeof (*ce)); 1504 1505 /* 1506 * Get the physical address of the chain list. 1507 */ 1508 chain_list_addr = trq->req_pbuf; 1509 chain_list_addr += cur_off; 1510 if (sizeof (bus_addr_t) > 4) { 1511 ce->Address.High = 1512 htole32(((uint64_t)chain_list_addr) >> 32); 1513 } 1514 ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 1515 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1516 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1517 1518 /* 1519 * If we have more than a frame's worth of segments left, 1520 * set up the chain list to have the last element be another 1521 * chain descriptor. 1522 */ 1523 if ((nseg - seg) > MPT_NSGL(mpt)) { 1524 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1525 /* 1526 * The length of the chain is the length in bytes of the 1527 * number of segments plus the next chain element. 1528 * 1529 * The next chain descriptor offset is the length, 1530 * in words, of the number of segments. 1531 */ 1532 ce->Length = (this_seg_lim - seg) * 1533 sizeof (SGE_SIMPLE64); 1534 ce->NextChainOffset = ce->Length >> 2; 1535 ce->Length += sizeof (SGE_CHAIN64); 1536 } else { 1537 this_seg_lim = nseg; 1538 ce->Length = (this_seg_lim - seg) * 1539 sizeof (SGE_SIMPLE64); 1540 } 1541 ce->Length = htole16(ce->Length); 1542 1543 /* 1544 * Fill in the chain list SGE elements with our segment data. 1545 * 1546 * If we're the last element in this chain list, set the last 1547 * element flag. If we're the completely last element period, 1548 * set the end of list and end of buffer flags. 1549 */ 1550 while (seg < this_seg_lim) { 1551 memset(se, 0, sizeof (*se)); 1552 se->Address.Low = htole32(dm_segs->ds_addr & 1553 0xffffffff); 1554 if (sizeof (bus_addr_t) > 4) { 1555 se->Address.High = 1556 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1557 } 1558 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1559 tf = flags; 1560 if (seg == this_seg_lim - 1) { 1561 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1562 } 1563 if (seg == nseg - 1) { 1564 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1565 MPI_SGE_FLAGS_END_OF_BUFFER; 1566 } 1567 MPI_pSGE_SET_FLAGS(se, tf); 1568 se->FlagsLength = htole32(se->FlagsLength); 1569 se++; 1570 seg++; 1571 dm_segs++; 1572 } 1573 1574 next_chain: 1575 /* 1576 * If we have more segments to do and we've used up all of 1577 * the space in a request area, go allocate another one 1578 * and chain to that. 1579 */ 1580 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1581 request_t *nrq; 1582 1583 CAMLOCK_2_MPTLOCK(mpt); 1584 nrq = mpt_get_request(mpt, FALSE); 1585 MPTLOCK_2_CAMLOCK(mpt); 1586 1587 if (nrq == NULL) { 1588 error = ENOMEM; 1589 goto bad; 1590 } 1591 1592 /* 1593 * Append the new request area on the tail of our list. 1594 */ 1595 if ((trq = req->chain) == NULL) { 1596 req->chain = nrq; 1597 } else { 1598 while (trq->chain != NULL) { 1599 trq = trq->chain; 1600 } 1601 trq->chain = nrq; 1602 } 1603 trq = nrq; 1604 mpt_off = trq->req_vbuf; 1605 if (mpt->verbose >= MPT_PRT_DEBUG) { 1606 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1607 } 1608 nxt_off = 0; 1609 } 1610 } 1611 out: 1612 1613 /* 1614 * Last time we need to check if this CCB needs to be aborted. 1615 */ 1616 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1617 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1618 request_t *cmd_req = 1619 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1620 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1621 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1622 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1623 } 1624 mpt_prt(mpt, 1625 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1626 ccb->ccb_h.status & CAM_STATUS_MASK); 1627 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1628 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1629 } 1630 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1631 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1632 xpt_done(ccb); 1633 CAMLOCK_2_MPTLOCK(mpt); 1634 mpt_free_request(mpt, req); 1635 MPTLOCK_2_CAMLOCK(mpt); 1636 return; 1637 } 1638 1639 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1640 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1641 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 1642 mpt_timeout, ccb); 1643 } 1644 if (mpt->verbose > MPT_PRT_DEBUG) { 1645 int nc = 0; 1646 mpt_print_request(req->req_vbuf); 1647 for (trq = req->chain; trq; trq = trq->chain) { 1648 printf(" Additional Chain Area %d\n", nc++); 1649 mpt_dump_sgl(trq->req_vbuf, 0); 1650 } 1651 } 1652 1653 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1654 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1655 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1656 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1657 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1658 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1659 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1660 } else { 1661 tgt->state = TGT_STATE_MOVING_DATA; 1662 } 1663 #else 1664 tgt->state = TGT_STATE_MOVING_DATA; 1665 #endif 1666 } 1667 CAMLOCK_2_MPTLOCK(mpt); 1668 mpt_send_cmd(mpt, req); 1669 MPTLOCK_2_CAMLOCK(mpt); 1670 } 1671 1672 static void 1673 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1674 { 1675 request_t *req, *trq; 1676 char *mpt_off; 1677 union ccb *ccb; 1678 struct mpt_softc *mpt; 1679 int seg, first_lim; 1680 uint32_t flags, nxt_off; 1681 void *sglp = NULL; 1682 MSG_REQUEST_HEADER *hdrp; 1683 SGE_SIMPLE32 *se; 1684 SGE_CHAIN32 *ce; 1685 int istgt = 0; 1686 1687 req = (request_t *)arg; 1688 ccb = req->ccb; 1689 1690 mpt = ccb->ccb_h.ccb_mpt_ptr; 1691 req = ccb->ccb_h.ccb_req_ptr; 1692 1693 hdrp = req->req_vbuf; 1694 mpt_off = req->req_vbuf; 1695 1696 1697 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1698 error = EFBIG; 1699 } 1700 1701 if (error == 0) { 1702 switch (hdrp->Function) { 1703 case MPI_FUNCTION_SCSI_IO_REQUEST: 1704 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1705 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1706 break; 1707 case MPI_FUNCTION_TARGET_ASSIST: 1708 istgt = 1; 1709 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1710 break; 1711 default: 1712 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1713 hdrp->Function); 1714 error = EINVAL; 1715 break; 1716 } 1717 } 1718 1719 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1720 error = EFBIG; 1721 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1722 nseg, mpt->max_seg_cnt); 1723 } 1724 1725 bad: 1726 if (error != 0) { 1727 if (error != EFBIG && error != ENOMEM) { 1728 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1729 } 1730 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1731 cam_status status; 1732 mpt_freeze_ccb(ccb); 1733 if (error == EFBIG) { 1734 status = CAM_REQ_TOO_BIG; 1735 } else if (error == ENOMEM) { 1736 if (mpt->outofbeer == 0) { 1737 mpt->outofbeer = 1; 1738 xpt_freeze_simq(mpt->sim, 1); 1739 mpt_lprt(mpt, MPT_PRT_DEBUG, 1740 "FREEZEQ\n"); 1741 } 1742 status = CAM_REQUEUE_REQ; 1743 } else { 1744 status = CAM_REQ_CMP_ERR; 1745 } 1746 mpt_set_ccb_status(ccb, status); 1747 } 1748 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1749 request_t *cmd_req = 1750 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1751 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1752 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1753 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1754 } 1755 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1756 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1757 xpt_done(ccb); 1758 CAMLOCK_2_MPTLOCK(mpt); 1759 mpt_free_request(mpt, req); 1760 MPTLOCK_2_CAMLOCK(mpt); 1761 return; 1762 } 1763 1764 /* 1765 * No data to transfer? 1766 * Just make a single simple SGL with zero length. 1767 */ 1768 1769 if (mpt->verbose >= MPT_PRT_DEBUG) { 1770 int tidx = ((char *)sglp) - mpt_off; 1771 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1772 } 1773 1774 if (nseg == 0) { 1775 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1776 MPI_pSGE_SET_FLAGS(se1, 1777 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1778 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1779 se1->FlagsLength = htole32(se1->FlagsLength); 1780 goto out; 1781 } 1782 1783 1784 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1785 if (istgt == 0) { 1786 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1787 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1788 } 1789 } else { 1790 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1791 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1792 } 1793 } 1794 1795 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1796 bus_dmasync_op_t op; 1797 if (istgt) { 1798 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1799 op = BUS_DMASYNC_PREREAD; 1800 } else { 1801 op = BUS_DMASYNC_PREWRITE; 1802 } 1803 } else { 1804 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1805 op = BUS_DMASYNC_PREWRITE; 1806 } else { 1807 op = BUS_DMASYNC_PREREAD; 1808 } 1809 } 1810 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1811 } 1812 1813 /* 1814 * Okay, fill in what we can at the end of the command frame. 1815 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1816 * the command frame. 1817 * 1818 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1819 * SIMPLE32 pointers and start doing CHAIN32 entries after 1820 * that. 1821 */ 1822 1823 if (nseg < MPT_NSGL_FIRST(mpt)) { 1824 first_lim = nseg; 1825 } else { 1826 /* 1827 * Leave room for CHAIN element 1828 */ 1829 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1830 } 1831 1832 se = (SGE_SIMPLE32 *) sglp; 1833 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1834 uint32_t tf; 1835 1836 memset(se, 0,sizeof (*se)); 1837 se->Address = htole32(dm_segs->ds_addr); 1838 1839 1840 1841 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1842 tf = flags; 1843 if (seg == first_lim - 1) { 1844 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1845 } 1846 if (seg == nseg - 1) { 1847 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1848 MPI_SGE_FLAGS_END_OF_BUFFER; 1849 } 1850 MPI_pSGE_SET_FLAGS(se, tf); 1851 se->FlagsLength = htole32(se->FlagsLength); 1852 } 1853 1854 if (seg == nseg) { 1855 goto out; 1856 } 1857 1858 /* 1859 * Tell the IOC where to find the first chain element. 1860 */ 1861 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1862 nxt_off = MPT_RQSL(mpt); 1863 trq = req; 1864 1865 /* 1866 * Make up the rest of the data segments out of a chain element 1867 * (contiained in the current request frame) which points to 1868 * SIMPLE32 elements in the next request frame, possibly ending 1869 * with *another* chain element (if there's more). 1870 */ 1871 while (seg < nseg) { 1872 int this_seg_lim; 1873 uint32_t tf, cur_off; 1874 bus_addr_t chain_list_addr; 1875 1876 /* 1877 * Point to the chain descriptor. Note that the chain 1878 * descriptor is at the end of the *previous* list (whether 1879 * chain or simple). 1880 */ 1881 ce = (SGE_CHAIN32 *) se; 1882 1883 /* 1884 * Before we change our current pointer, make sure we won't 1885 * overflow the request area with this frame. Note that we 1886 * test against 'greater than' here as it's okay in this case 1887 * to have next offset be just outside the request area. 1888 */ 1889 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1890 nxt_off = MPT_REQUEST_AREA; 1891 goto next_chain; 1892 } 1893 1894 /* 1895 * Set our SGE element pointer to the beginning of the chain 1896 * list and update our next chain list offset. 1897 */ 1898 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1899 cur_off = nxt_off; 1900 nxt_off += MPT_RQSL(mpt); 1901 1902 /* 1903 * Now initialized the chain descriptor. 1904 */ 1905 memset(ce, 0, sizeof (*ce)); 1906 1907 /* 1908 * Get the physical address of the chain list. 1909 */ 1910 chain_list_addr = trq->req_pbuf; 1911 chain_list_addr += cur_off; 1912 1913 1914 1915 ce->Address = htole32(chain_list_addr); 1916 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1917 1918 1919 /* 1920 * If we have more than a frame's worth of segments left, 1921 * set up the chain list to have the last element be another 1922 * chain descriptor. 1923 */ 1924 if ((nseg - seg) > MPT_NSGL(mpt)) { 1925 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1926 /* 1927 * The length of the chain is the length in bytes of the 1928 * number of segments plus the next chain element. 1929 * 1930 * The next chain descriptor offset is the length, 1931 * in words, of the number of segments. 1932 */ 1933 ce->Length = (this_seg_lim - seg) * 1934 sizeof (SGE_SIMPLE32); 1935 ce->NextChainOffset = ce->Length >> 2; 1936 ce->Length += sizeof (SGE_CHAIN32); 1937 } else { 1938 this_seg_lim = nseg; 1939 ce->Length = (this_seg_lim - seg) * 1940 sizeof (SGE_SIMPLE32); 1941 } 1942 ce->Length = htole16(ce->Length); 1943 1944 /* 1945 * Fill in the chain list SGE elements with our segment data. 1946 * 1947 * If we're the last element in this chain list, set the last 1948 * element flag. If we're the completely last element period, 1949 * set the end of list and end of buffer flags. 1950 */ 1951 while (seg < this_seg_lim) { 1952 memset(se, 0, sizeof (*se)); 1953 se->Address = htole32(dm_segs->ds_addr); 1954 1955 1956 1957 1958 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1959 tf = flags; 1960 if (seg == this_seg_lim - 1) { 1961 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1962 } 1963 if (seg == nseg - 1) { 1964 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1965 MPI_SGE_FLAGS_END_OF_BUFFER; 1966 } 1967 MPI_pSGE_SET_FLAGS(se, tf); 1968 se->FlagsLength = htole32(se->FlagsLength); 1969 se++; 1970 seg++; 1971 dm_segs++; 1972 } 1973 1974 next_chain: 1975 /* 1976 * If we have more segments to do and we've used up all of 1977 * the space in a request area, go allocate another one 1978 * and chain to that. 1979 */ 1980 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1981 request_t *nrq; 1982 1983 CAMLOCK_2_MPTLOCK(mpt); 1984 nrq = mpt_get_request(mpt, FALSE); 1985 MPTLOCK_2_CAMLOCK(mpt); 1986 1987 if (nrq == NULL) { 1988 error = ENOMEM; 1989 goto bad; 1990 } 1991 1992 /* 1993 * Append the new request area on the tail of our list. 1994 */ 1995 if ((trq = req->chain) == NULL) { 1996 req->chain = nrq; 1997 } else { 1998 while (trq->chain != NULL) { 1999 trq = trq->chain; 2000 } 2001 trq->chain = nrq; 2002 } 2003 trq = nrq; 2004 mpt_off = trq->req_vbuf; 2005 if (mpt->verbose >= MPT_PRT_DEBUG) { 2006 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 2007 } 2008 nxt_off = 0; 2009 } 2010 } 2011 out: 2012 2013 /* 2014 * Last time we need to check if this CCB needs to be aborted. 2015 */ 2016 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2017 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2018 request_t *cmd_req = 2019 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2020 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2021 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2022 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2023 } 2024 mpt_prt(mpt, 2025 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2026 ccb->ccb_h.status & CAM_STATUS_MASK); 2027 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2028 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2029 } 2030 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2031 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2032 xpt_done(ccb); 2033 CAMLOCK_2_MPTLOCK(mpt); 2034 mpt_free_request(mpt, req); 2035 MPTLOCK_2_CAMLOCK(mpt); 2036 return; 2037 } 2038 2039 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2040 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2041 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 2042 mpt_timeout, ccb); 2043 } 2044 if (mpt->verbose > MPT_PRT_DEBUG) { 2045 int nc = 0; 2046 mpt_print_request(req->req_vbuf); 2047 for (trq = req->chain; trq; trq = trq->chain) { 2048 printf(" Additional Chain Area %d\n", nc++); 2049 mpt_dump_sgl(trq->req_vbuf, 0); 2050 } 2051 } 2052 2053 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2054 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2055 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2056 #ifdef WE_TRUST_AUTO_GOOD_STATUS 2057 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2058 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2059 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2060 } else { 2061 tgt->state = TGT_STATE_MOVING_DATA; 2062 } 2063 #else 2064 tgt->state = TGT_STATE_MOVING_DATA; 2065 #endif 2066 } 2067 CAMLOCK_2_MPTLOCK(mpt); 2068 mpt_send_cmd(mpt, req); 2069 MPTLOCK_2_CAMLOCK(mpt); 2070 } 2071 2072 static void 2073 mpt_start(struct cam_sim *sim, union ccb *ccb) 2074 { 2075 request_t *req; 2076 struct mpt_softc *mpt; 2077 MSG_SCSI_IO_REQUEST *mpt_req; 2078 struct ccb_scsiio *csio = &ccb->csio; 2079 struct ccb_hdr *ccbh = &ccb->ccb_h; 2080 bus_dmamap_callback_t *cb; 2081 target_id_t tgt; 2082 int raid_passthru; 2083 2084 /* Get the pointer for the physical addapter */ 2085 mpt = ccb->ccb_h.ccb_mpt_ptr; 2086 raid_passthru = (sim == mpt->phydisk_sim); 2087 2088 CAMLOCK_2_MPTLOCK(mpt); 2089 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2090 if (mpt->outofbeer == 0) { 2091 mpt->outofbeer = 1; 2092 xpt_freeze_simq(mpt->sim, 1); 2093 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2094 } 2095 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2096 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2097 MPTLOCK_2_CAMLOCK(mpt); 2098 xpt_done(ccb); 2099 return; 2100 } 2101 #ifdef INVARIANTS 2102 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2103 #endif 2104 MPTLOCK_2_CAMLOCK(mpt); 2105 2106 if (sizeof (bus_addr_t) > 4) { 2107 cb = mpt_execute_req_a64; 2108 } else { 2109 cb = mpt_execute_req; 2110 } 2111 2112 /* 2113 * Link the ccb and the request structure so we can find 2114 * the other knowing either the request or the ccb 2115 */ 2116 req->ccb = ccb; 2117 ccb->ccb_h.ccb_req_ptr = req; 2118 2119 /* Now we build the command for the IOC */ 2120 mpt_req = req->req_vbuf; 2121 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2122 2123 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2124 if (raid_passthru) { 2125 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2126 CAMLOCK_2_MPTLOCK(mpt); 2127 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2128 MPTLOCK_2_CAMLOCK(mpt); 2129 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2130 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2131 xpt_done(ccb); 2132 return; 2133 } 2134 MPTLOCK_2_CAMLOCK(mpt); 2135 mpt_req->Bus = 0; /* we never set bus here */ 2136 } else { 2137 tgt = ccb->ccb_h.target_id; 2138 mpt_req->Bus = 0; /* XXX */ 2139 2140 } 2141 mpt_req->SenseBufferLength = 2142 (csio->sense_len < MPT_SENSE_SIZE) ? 2143 csio->sense_len : MPT_SENSE_SIZE; 2144 2145 /* 2146 * We use the message context to find the request structure when we 2147 * Get the command completion interrupt from the IOC. 2148 */ 2149 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2150 2151 /* Which physical device to do the I/O on */ 2152 mpt_req->TargetID = tgt; 2153 2154 /* We assume a single level LUN type */ 2155 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { 2156 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 2157 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 2158 } else { 2159 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 2160 } 2161 2162 /* Set the direction of the transfer */ 2163 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2164 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2165 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2166 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2167 } else { 2168 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2169 } 2170 2171 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2172 switch(ccb->csio.tag_action) { 2173 case MSG_HEAD_OF_Q_TAG: 2174 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2175 break; 2176 case MSG_ACA_TASK: 2177 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2178 break; 2179 case MSG_ORDERED_Q_TAG: 2180 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2181 break; 2182 case MSG_SIMPLE_Q_TAG: 2183 default: 2184 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2185 break; 2186 } 2187 } else { 2188 if (mpt->is_fc || mpt->is_sas) { 2189 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2190 } else { 2191 /* XXX No such thing for a target doing packetized. */ 2192 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2193 } 2194 } 2195 2196 if (mpt->is_spi) { 2197 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2198 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2199 } 2200 } 2201 mpt_req->Control = htole32(mpt_req->Control); 2202 2203 /* Copy the scsi command block into place */ 2204 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2205 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2206 } else { 2207 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2208 } 2209 2210 mpt_req->CDBLength = csio->cdb_len; 2211 mpt_req->DataLength = htole32(csio->dxfer_len); 2212 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2213 2214 /* 2215 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2216 */ 2217 if (mpt->verbose == MPT_PRT_DEBUG) { 2218 U32 df; 2219 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2220 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2221 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2222 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2223 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2224 mpt_prtc(mpt, "(%s %u byte%s ", 2225 (df == MPI_SCSIIO_CONTROL_READ)? 2226 "read" : "write", csio->dxfer_len, 2227 (csio->dxfer_len == 1)? ")" : "s)"); 2228 } 2229 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 2230 ccb->ccb_h.target_lun, req, req->serno); 2231 } 2232 2233 /* 2234 * If we have any data to send with this command map it into bus space. 2235 */ 2236 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2237 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 2238 /* 2239 * We've been given a pointer to a single buffer. 2240 */ 2241 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 2242 /* 2243 * Virtual address that needs to translated into 2244 * one or more physical address ranges. 2245 */ 2246 int error; 2247 int s = splsoftvm(); 2248 error = bus_dmamap_load(mpt->buffer_dmat, 2249 req->dmap, csio->data_ptr, csio->dxfer_len, 2250 cb, req, 0); 2251 splx(s); 2252 if (error == EINPROGRESS) { 2253 /* 2254 * So as to maintain ordering, 2255 * freeze the controller queue 2256 * until our mapping is 2257 * returned. 2258 */ 2259 xpt_freeze_simq(mpt->sim, 1); 2260 ccbh->status |= CAM_RELEASE_SIMQ; 2261 } 2262 } else { 2263 /* 2264 * We have been given a pointer to single 2265 * physical buffer. 2266 */ 2267 struct bus_dma_segment seg; 2268 seg.ds_addr = 2269 (bus_addr_t)(vm_offset_t)csio->data_ptr; 2270 seg.ds_len = csio->dxfer_len; 2271 (*cb)(req, &seg, 1, 0); 2272 } 2273 } else { 2274 /* 2275 * We have been given a list of addresses. 2276 * This case could be easily supported but they are not 2277 * currently generated by the CAM subsystem so there 2278 * is no point in wasting the time right now. 2279 */ 2280 struct bus_dma_segment *segs; 2281 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 2282 (*cb)(req, NULL, 0, EFAULT); 2283 } else { 2284 /* Just use the segments provided */ 2285 segs = (struct bus_dma_segment *)csio->data_ptr; 2286 (*cb)(req, segs, csio->sglist_cnt, 0); 2287 } 2288 } 2289 } else { 2290 (*cb)(req, NULL, 0, 0); 2291 } 2292 } 2293 2294 static int 2295 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2296 int sleep_ok) 2297 { 2298 int error; 2299 uint16_t status; 2300 uint8_t response; 2301 2302 error = mpt_scsi_send_tmf(mpt, 2303 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2304 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2305 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2306 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2307 0, /* XXX How do I get the channel ID? */ 2308 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2309 lun != CAM_LUN_WILDCARD ? lun : 0, 2310 0, sleep_ok); 2311 2312 if (error != 0) { 2313 /* 2314 * mpt_scsi_send_tmf hard resets on failure, so no 2315 * need to do so here. 2316 */ 2317 mpt_prt(mpt, 2318 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2319 return (EIO); 2320 } 2321 2322 /* Wait for bus reset to be processed by the IOC. */ 2323 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2324 REQ_STATE_DONE, sleep_ok, 5000); 2325 2326 status = le16toh(mpt->tmf_req->IOCStatus); 2327 response = mpt->tmf_req->ResponseCode; 2328 mpt->tmf_req->state = REQ_STATE_FREE; 2329 2330 if (error) { 2331 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2332 "Resetting controller.\n"); 2333 mpt_reset(mpt, TRUE); 2334 return (ETIMEDOUT); 2335 } 2336 2337 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2338 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2339 "Resetting controller.\n", status); 2340 mpt_reset(mpt, TRUE); 2341 return (EIO); 2342 } 2343 2344 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2345 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2346 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2347 "Resetting controller.\n", response); 2348 mpt_reset(mpt, TRUE); 2349 return (EIO); 2350 } 2351 return (0); 2352 } 2353 2354 static int 2355 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2356 { 2357 int r = 0; 2358 request_t *req; 2359 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2360 2361 req = mpt_get_request(mpt, FALSE); 2362 if (req == NULL) { 2363 return (ENOMEM); 2364 } 2365 fc = req->req_vbuf; 2366 memset(fc, 0, sizeof(*fc)); 2367 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2368 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2369 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2370 mpt_send_cmd(mpt, req); 2371 if (dowait) { 2372 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2373 REQ_STATE_DONE, FALSE, 60 * 1000); 2374 if (r == 0) { 2375 mpt_free_request(mpt, req); 2376 } 2377 } 2378 return (r); 2379 } 2380 2381 static int 2382 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2383 MSG_EVENT_NOTIFY_REPLY *msg) 2384 { 2385 uint32_t data0, data1; 2386 2387 data0 = le32toh(msg->Data[0]); 2388 data1 = le32toh(msg->Data[1]); 2389 switch(msg->Event & 0xFF) { 2390 case MPI_EVENT_UNIT_ATTENTION: 2391 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2392 (data0 >> 8) & 0xff, data0 & 0xff); 2393 break; 2394 2395 case MPI_EVENT_IOC_BUS_RESET: 2396 /* We generated a bus reset */ 2397 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2398 (data0 >> 8) & 0xff); 2399 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2400 break; 2401 2402 case MPI_EVENT_EXT_BUS_RESET: 2403 /* Someone else generated a bus reset */ 2404 mpt_prt(mpt, "External Bus Reset Detected\n"); 2405 /* 2406 * These replies don't return EventData like the MPI 2407 * spec says they do 2408 */ 2409 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2410 break; 2411 2412 case MPI_EVENT_RESCAN: 2413 #if __FreeBSD_version >= 600000 2414 { 2415 union ccb *ccb; 2416 uint32_t pathid; 2417 /* 2418 * In general this means a device has been added to the loop. 2419 */ 2420 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2421 if (mpt->ready == 0) { 2422 break; 2423 } 2424 if (mpt->phydisk_sim) { 2425 pathid = cam_sim_path(mpt->phydisk_sim); 2426 } else { 2427 pathid = cam_sim_path(mpt->sim); 2428 } 2429 MPTLOCK_2_CAMLOCK(mpt); 2430 /* 2431 * Allocate a CCB, create a wildcard path for this bus, 2432 * and schedule a rescan. 2433 */ 2434 ccb = xpt_alloc_ccb_nowait(); 2435 if (ccb == NULL) { 2436 mpt_prt(mpt, "unable to alloc CCB for rescan\n"); 2437 CAMLOCK_2_MPTLOCK(mpt); 2438 break; 2439 } 2440 2441 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 2442 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2443 CAMLOCK_2_MPTLOCK(mpt); 2444 mpt_prt(mpt, "unable to create path for rescan\n"); 2445 xpt_free_ccb(ccb); 2446 break; 2447 } 2448 xpt_rescan(ccb); 2449 CAMLOCK_2_MPTLOCK(mpt); 2450 break; 2451 } 2452 #else 2453 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2454 break; 2455 #endif 2456 case MPI_EVENT_LINK_STATUS_CHANGE: 2457 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2458 (data1 >> 8) & 0xff, 2459 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2460 break; 2461 2462 case MPI_EVENT_LOOP_STATE_CHANGE: 2463 switch ((data0 >> 16) & 0xff) { 2464 case 0x01: 2465 mpt_prt(mpt, 2466 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2467 "(Loop Initialization)\n", 2468 (data1 >> 8) & 0xff, 2469 (data0 >> 8) & 0xff, 2470 (data0 ) & 0xff); 2471 switch ((data0 >> 8) & 0xff) { 2472 case 0xF7: 2473 if ((data0 & 0xff) == 0xF7) { 2474 mpt_prt(mpt, "Device needs AL_PA\n"); 2475 } else { 2476 mpt_prt(mpt, "Device %02x doesn't like " 2477 "FC performance\n", 2478 data0 & 0xFF); 2479 } 2480 break; 2481 case 0xF8: 2482 if ((data0 & 0xff) == 0xF7) { 2483 mpt_prt(mpt, "Device had loop failure " 2484 "at its receiver prior to acquiring" 2485 " AL_PA\n"); 2486 } else { 2487 mpt_prt(mpt, "Device %02x detected loop" 2488 " failure at its receiver\n", 2489 data0 & 0xFF); 2490 } 2491 break; 2492 default: 2493 mpt_prt(mpt, "Device %02x requests that device " 2494 "%02x reset itself\n", 2495 data0 & 0xFF, 2496 (data0 >> 8) & 0xFF); 2497 break; 2498 } 2499 break; 2500 case 0x02: 2501 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2502 "LPE(%02x,%02x) (Loop Port Enable)\n", 2503 (data1 >> 8) & 0xff, /* Port */ 2504 (data0 >> 8) & 0xff, /* Character 3 */ 2505 (data0 ) & 0xff /* Character 4 */); 2506 break; 2507 case 0x03: 2508 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2509 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2510 (data1 >> 8) & 0xff, /* Port */ 2511 (data0 >> 8) & 0xff, /* Character 3 */ 2512 (data0 ) & 0xff /* Character 4 */); 2513 break; 2514 default: 2515 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2516 "FC event (%02x %02x %02x)\n", 2517 (data1 >> 8) & 0xff, /* Port */ 2518 (data0 >> 16) & 0xff, /* Event */ 2519 (data0 >> 8) & 0xff, /* Character 3 */ 2520 (data0 ) & 0xff /* Character 4 */); 2521 } 2522 break; 2523 2524 case MPI_EVENT_LOGOUT: 2525 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2526 (data1 >> 8) & 0xff, data0); 2527 break; 2528 case MPI_EVENT_QUEUE_FULL: 2529 { 2530 struct cam_sim *sim; 2531 struct cam_path *tmppath; 2532 struct ccb_relsim crs; 2533 PTR_EVENT_DATA_QUEUE_FULL pqf; 2534 lun_id_t lun_id; 2535 2536 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 2537 pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 2538 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2539 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2540 if (mpt->phydisk_sim) { 2541 sim = mpt->phydisk_sim; 2542 } else { 2543 sim = mpt->sim; 2544 } 2545 MPTLOCK_2_CAMLOCK(mpt); 2546 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2547 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2548 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2549 mpt_prt(mpt, "unable to create a path to send " 2550 "XPT_REL_SIMQ"); 2551 CAMLOCK_2_MPTLOCK(mpt); 2552 break; 2553 } 2554 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2555 crs.ccb_h.func_code = XPT_REL_SIMQ; 2556 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2557 crs.openings = pqf->CurrentDepth - 1; 2558 xpt_action((union ccb *)&crs); 2559 if (crs.ccb_h.status != CAM_REQ_CMP) { 2560 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2561 } 2562 xpt_free_path(tmppath); 2563 } 2564 CAMLOCK_2_MPTLOCK(mpt); 2565 break; 2566 } 2567 case MPI_EVENT_EVENT_CHANGE: 2568 case MPI_EVENT_INTEGRATED_RAID: 2569 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2570 case MPI_EVENT_SAS_SES: 2571 break; 2572 default: 2573 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2574 msg->Event & 0xFF); 2575 return (0); 2576 } 2577 return (1); 2578 } 2579 2580 /* 2581 * Reply path for all SCSI I/O requests, called from our 2582 * interrupt handler by extracting our handler index from 2583 * the MsgContext field of the reply from the IOC. 2584 * 2585 * This routine is optimized for the common case of a 2586 * completion without error. All exception handling is 2587 * offloaded to non-inlined helper routines to minimize 2588 * cache footprint. 2589 */ 2590 static int 2591 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2592 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2593 { 2594 MSG_SCSI_IO_REQUEST *scsi_req; 2595 union ccb *ccb; 2596 2597 if (req->state == REQ_STATE_FREE) { 2598 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2599 return (TRUE); 2600 } 2601 2602 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2603 ccb = req->ccb; 2604 if (ccb == NULL) { 2605 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2606 req, req->serno); 2607 return (TRUE); 2608 } 2609 2610 mpt_req_untimeout(req, mpt_timeout, ccb); 2611 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2612 2613 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2614 bus_dmasync_op_t op; 2615 2616 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2617 op = BUS_DMASYNC_POSTREAD; 2618 else 2619 op = BUS_DMASYNC_POSTWRITE; 2620 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2621 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2622 } 2623 2624 if (reply_frame == NULL) { 2625 /* 2626 * Context only reply, completion without error status. 2627 */ 2628 ccb->csio.resid = 0; 2629 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2630 ccb->csio.scsi_status = SCSI_STATUS_OK; 2631 } else { 2632 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2633 } 2634 2635 if (mpt->outofbeer) { 2636 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2637 mpt->outofbeer = 0; 2638 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2639 } 2640 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2641 struct scsi_inquiry_data *iq = 2642 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2643 if (scsi_req->Function == 2644 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2645 /* 2646 * Fake out the device type so that only the 2647 * pass-thru device will attach. 2648 */ 2649 iq->device &= ~0x1F; 2650 iq->device |= T_NODEVICE; 2651 } 2652 } 2653 if (mpt->verbose == MPT_PRT_DEBUG) { 2654 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2655 req, req->serno); 2656 } 2657 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2658 MPTLOCK_2_CAMLOCK(mpt); 2659 xpt_done(ccb); 2660 CAMLOCK_2_MPTLOCK(mpt); 2661 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2662 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2663 } else { 2664 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2665 req, req->serno); 2666 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2667 } 2668 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2669 ("CCB req needed wakeup")); 2670 #ifdef INVARIANTS 2671 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2672 #endif 2673 mpt_free_request(mpt, req); 2674 return (TRUE); 2675 } 2676 2677 static int 2678 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2679 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2680 { 2681 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2682 2683 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2684 #ifdef INVARIANTS 2685 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2686 #endif 2687 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2688 /* Record IOC Status and Response Code of TMF for any waiters. */ 2689 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2690 req->ResponseCode = tmf_reply->ResponseCode; 2691 2692 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2693 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2694 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2695 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2696 req->state |= REQ_STATE_DONE; 2697 wakeup(req); 2698 } else { 2699 mpt->tmf_req->state = REQ_STATE_FREE; 2700 } 2701 return (TRUE); 2702 } 2703 2704 /* 2705 * XXX: Move to definitions file 2706 */ 2707 #define ELS 0x22 2708 #define FC4LS 0x32 2709 #define ABTS 0x81 2710 #define BA_ACC 0x84 2711 2712 #define LS_RJT 0x01 2713 #define LS_ACC 0x02 2714 #define PLOGI 0x03 2715 #define LOGO 0x05 2716 #define SRR 0x14 2717 #define PRLI 0x20 2718 #define PRLO 0x21 2719 #define ADISC 0x52 2720 #define RSCN 0x61 2721 2722 static void 2723 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2724 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2725 { 2726 uint32_t fl; 2727 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2728 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2729 2730 /* 2731 * We are going to reuse the ELS request to send this response back. 2732 */ 2733 rsp = &tmp; 2734 memset(rsp, 0, sizeof(*rsp)); 2735 2736 #ifdef USE_IMMEDIATE_LINK_DATA 2737 /* 2738 * Apparently the IMMEDIATE stuff doesn't seem to work. 2739 */ 2740 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2741 #endif 2742 rsp->RspLength = length; 2743 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2744 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2745 2746 /* 2747 * Copy over information from the original reply frame to 2748 * it's correct place in the response. 2749 */ 2750 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2751 2752 /* 2753 * And now copy back the temporary area to the original frame. 2754 */ 2755 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2756 rsp = req->req_vbuf; 2757 2758 #ifdef USE_IMMEDIATE_LINK_DATA 2759 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2760 #else 2761 { 2762 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2763 bus_addr_t paddr = req->req_pbuf; 2764 paddr += MPT_RQSL(mpt); 2765 2766 fl = 2767 MPI_SGE_FLAGS_HOST_TO_IOC | 2768 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2769 MPI_SGE_FLAGS_LAST_ELEMENT | 2770 MPI_SGE_FLAGS_END_OF_LIST | 2771 MPI_SGE_FLAGS_END_OF_BUFFER; 2772 fl <<= MPI_SGE_FLAGS_SHIFT; 2773 fl |= (length); 2774 se->FlagsLength = htole32(fl); 2775 se->Address = htole32((uint32_t) paddr); 2776 } 2777 #endif 2778 2779 /* 2780 * Send it on... 2781 */ 2782 mpt_send_cmd(mpt, req); 2783 } 2784 2785 static int 2786 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2787 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2788 { 2789 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2790 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2791 U8 rctl; 2792 U8 type; 2793 U8 cmd; 2794 U16 status = le16toh(reply_frame->IOCStatus); 2795 U32 *elsbuf; 2796 int ioindex; 2797 int do_refresh = TRUE; 2798 2799 #ifdef INVARIANTS 2800 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2801 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2802 req, req->serno, rp->Function)); 2803 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2804 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2805 } else { 2806 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2807 } 2808 #endif 2809 mpt_lprt(mpt, MPT_PRT_DEBUG, 2810 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2811 req, req->serno, reply_frame, reply_frame->Function); 2812 2813 if (status != MPI_IOCSTATUS_SUCCESS) { 2814 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2815 status, reply_frame->Function); 2816 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2817 /* 2818 * XXX: to get around shutdown issue 2819 */ 2820 mpt->disabled = 1; 2821 return (TRUE); 2822 } 2823 return (TRUE); 2824 } 2825 2826 /* 2827 * If the function of a link service response, we recycle the 2828 * response to be a refresh for a new link service request. 2829 * 2830 * The request pointer is bogus in this case and we have to fetch 2831 * it based upon the TransactionContext. 2832 */ 2833 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2834 /* Freddie Uncle Charlie Katie */ 2835 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2836 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2837 if (mpt->els_cmd_ptrs[ioindex] == req) { 2838 break; 2839 } 2840 2841 KASSERT(ioindex < mpt->els_cmds_allocated, 2842 ("can't find my mommie!")); 2843 2844 /* remove from active list as we're going to re-post it */ 2845 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2846 req->state &= ~REQ_STATE_QUEUED; 2847 req->state |= REQ_STATE_DONE; 2848 mpt_fc_post_els(mpt, req, ioindex); 2849 return (TRUE); 2850 } 2851 2852 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2853 /* remove from active list as we're done */ 2854 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2855 req->state &= ~REQ_STATE_QUEUED; 2856 req->state |= REQ_STATE_DONE; 2857 if (req->state & REQ_STATE_TIMEDOUT) { 2858 mpt_lprt(mpt, MPT_PRT_DEBUG, 2859 "Sync Primitive Send Completed After Timeout\n"); 2860 mpt_free_request(mpt, req); 2861 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2862 mpt_lprt(mpt, MPT_PRT_DEBUG, 2863 "Async Primitive Send Complete\n"); 2864 mpt_free_request(mpt, req); 2865 } else { 2866 mpt_lprt(mpt, MPT_PRT_DEBUG, 2867 "Sync Primitive Send Complete- Waking Waiter\n"); 2868 wakeup(req); 2869 } 2870 return (TRUE); 2871 } 2872 2873 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2874 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2875 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2876 rp->MsgLength, rp->MsgFlags); 2877 return (TRUE); 2878 } 2879 2880 if (rp->MsgLength <= 5) { 2881 /* 2882 * This is just a ack of an original ELS buffer post 2883 */ 2884 mpt_lprt(mpt, MPT_PRT_DEBUG, 2885 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2886 return (TRUE); 2887 } 2888 2889 2890 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2891 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2892 2893 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2894 cmd = be32toh(elsbuf[0]) >> 24; 2895 2896 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2897 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2898 return (TRUE); 2899 } 2900 2901 ioindex = le32toh(rp->TransactionContext); 2902 req = mpt->els_cmd_ptrs[ioindex]; 2903 2904 if (rctl == ELS && type == 1) { 2905 switch (cmd) { 2906 case PRLI: 2907 /* 2908 * Send back a PRLI ACC 2909 */ 2910 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2911 le32toh(rp->Wwn.PortNameHigh), 2912 le32toh(rp->Wwn.PortNameLow)); 2913 elsbuf[0] = htobe32(0x02100014); 2914 elsbuf[1] |= htobe32(0x00000100); 2915 elsbuf[4] = htobe32(0x00000002); 2916 if (mpt->role & MPT_ROLE_TARGET) 2917 elsbuf[4] |= htobe32(0x00000010); 2918 if (mpt->role & MPT_ROLE_INITIATOR) 2919 elsbuf[4] |= htobe32(0x00000020); 2920 /* remove from active list as we're done */ 2921 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2922 req->state &= ~REQ_STATE_QUEUED; 2923 req->state |= REQ_STATE_DONE; 2924 mpt_fc_els_send_response(mpt, req, rp, 20); 2925 do_refresh = FALSE; 2926 break; 2927 case PRLO: 2928 memset(elsbuf, 0, 5 * (sizeof (U32))); 2929 elsbuf[0] = htobe32(0x02100014); 2930 elsbuf[1] = htobe32(0x08000100); 2931 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2932 le32toh(rp->Wwn.PortNameHigh), 2933 le32toh(rp->Wwn.PortNameLow)); 2934 /* remove from active list as we're done */ 2935 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2936 req->state &= ~REQ_STATE_QUEUED; 2937 req->state |= REQ_STATE_DONE; 2938 mpt_fc_els_send_response(mpt, req, rp, 20); 2939 do_refresh = FALSE; 2940 break; 2941 default: 2942 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2943 break; 2944 } 2945 } else if (rctl == ABTS && type == 0) { 2946 uint16_t rx_id = le16toh(rp->Rxid); 2947 uint16_t ox_id = le16toh(rp->Oxid); 2948 request_t *tgt_req = NULL; 2949 2950 mpt_prt(mpt, 2951 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2952 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2953 le32toh(rp->Wwn.PortNameLow)); 2954 if (rx_id >= mpt->mpt_max_tgtcmds) { 2955 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2956 } else if (mpt->tgt_cmd_ptrs == NULL) { 2957 mpt_prt(mpt, "No TGT CMD PTRS\n"); 2958 } else { 2959 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 2960 } 2961 if (tgt_req) { 2962 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 2963 union ccb *ccb = tgt->ccb; 2964 uint32_t ct_id; 2965 2966 /* 2967 * Check to make sure we have the correct command 2968 * The reply descriptor in the target state should 2969 * should contain an IoIndex that should match the 2970 * RX_ID. 2971 * 2972 * It'd be nice to have OX_ID to crosscheck with 2973 * as well. 2974 */ 2975 ct_id = GET_IO_INDEX(tgt->reply_desc); 2976 2977 if (ct_id != rx_id) { 2978 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2979 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 2980 rx_id, ct_id); 2981 goto skip; 2982 } 2983 2984 ccb = tgt->ccb; 2985 if (ccb) { 2986 mpt_prt(mpt, 2987 "CCB (%p): lun %u flags %x status %x\n", 2988 ccb, ccb->ccb_h.target_lun, 2989 ccb->ccb_h.flags, ccb->ccb_h.status); 2990 } 2991 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 2992 "%x nxfers %x\n", tgt->state, 2993 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 2994 tgt->nxfers); 2995 skip: 2996 if (mpt_abort_target_cmd(mpt, tgt_req)) { 2997 mpt_prt(mpt, "unable to start TargetAbort\n"); 2998 } 2999 } else { 3000 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 3001 } 3002 memset(elsbuf, 0, 5 * (sizeof (U32))); 3003 elsbuf[0] = htobe32(0); 3004 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 3005 elsbuf[2] = htobe32(0x000ffff); 3006 /* 3007 * Dork with the reply frame so that the reponse to it 3008 * will be correct. 3009 */ 3010 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 3011 /* remove from active list as we're done */ 3012 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3013 req->state &= ~REQ_STATE_QUEUED; 3014 req->state |= REQ_STATE_DONE; 3015 mpt_fc_els_send_response(mpt, req, rp, 12); 3016 do_refresh = FALSE; 3017 } else { 3018 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 3019 } 3020 if (do_refresh == TRUE) { 3021 /* remove from active list as we're done */ 3022 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3023 req->state &= ~REQ_STATE_QUEUED; 3024 req->state |= REQ_STATE_DONE; 3025 mpt_fc_post_els(mpt, req, ioindex); 3026 } 3027 return (TRUE); 3028 } 3029 3030 /* 3031 * Clean up all SCSI Initiator personality state in response 3032 * to a controller reset. 3033 */ 3034 static void 3035 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3036 { 3037 /* 3038 * The pending list is already run down by 3039 * the generic handler. Perform the same 3040 * operation on the timed out request list. 3041 */ 3042 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3043 MPI_IOCSTATUS_INVALID_STATE); 3044 3045 /* 3046 * XXX: We need to repost ELS and Target Command Buffers? 3047 */ 3048 3049 /* 3050 * Inform the XPT that a bus reset has occurred. 3051 */ 3052 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3053 } 3054 3055 /* 3056 * Parse additional completion information in the reply 3057 * frame for SCSI I/O requests. 3058 */ 3059 static int 3060 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3061 MSG_DEFAULT_REPLY *reply_frame) 3062 { 3063 union ccb *ccb; 3064 MSG_SCSI_IO_REPLY *scsi_io_reply; 3065 u_int ioc_status; 3066 u_int sstate; 3067 3068 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3069 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3070 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3071 ("MPT SCSI I/O Handler called with incorrect reply type")); 3072 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3073 ("MPT SCSI I/O Handler called with continuation reply")); 3074 3075 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3076 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3077 ioc_status &= MPI_IOCSTATUS_MASK; 3078 sstate = scsi_io_reply->SCSIState; 3079 3080 ccb = req->ccb; 3081 ccb->csio.resid = 3082 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3083 3084 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3085 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3086 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3087 ccb->csio.sense_resid = 3088 ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount); 3089 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3090 min(ccb->csio.sense_len, 3091 le32toh(scsi_io_reply->SenseCount))); 3092 } 3093 3094 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3095 /* 3096 * Tag messages rejected, but non-tagged retry 3097 * was successful. 3098 XXXX 3099 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3100 */ 3101 } 3102 3103 switch(ioc_status) { 3104 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3105 /* 3106 * XXX 3107 * Linux driver indicates that a zero 3108 * transfer length with this error code 3109 * indicates a CRC error. 3110 * 3111 * No need to swap the bytes for checking 3112 * against zero. 3113 */ 3114 if (scsi_io_reply->TransferCount == 0) { 3115 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3116 break; 3117 } 3118 /* FALLTHROUGH */ 3119 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3120 case MPI_IOCSTATUS_SUCCESS: 3121 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3122 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3123 /* 3124 * Status was never returned for this transaction. 3125 */ 3126 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3127 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3128 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3129 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3130 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3131 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3132 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3133 3134 /* XXX Handle SPI-Packet and FCP-2 reponse info. */ 3135 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3136 } else 3137 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3138 break; 3139 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3140 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3141 break; 3142 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3143 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3144 break; 3145 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3146 /* 3147 * Since selection timeouts and "device really not 3148 * there" are grouped into this error code, report 3149 * selection timeout. Selection timeouts are 3150 * typically retried before giving up on the device 3151 * whereas "device not there" errors are considered 3152 * unretryable. 3153 */ 3154 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3155 break; 3156 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3157 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3158 break; 3159 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3160 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3161 break; 3162 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3163 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3164 break; 3165 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3166 ccb->ccb_h.status = CAM_UA_TERMIO; 3167 break; 3168 case MPI_IOCSTATUS_INVALID_STATE: 3169 /* 3170 * The IOC has been reset. Emulate a bus reset. 3171 */ 3172 /* FALLTHROUGH */ 3173 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3174 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3175 break; 3176 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3177 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3178 /* 3179 * Don't clobber any timeout status that has 3180 * already been set for this transaction. We 3181 * want the SCSI layer to be able to differentiate 3182 * between the command we aborted due to timeout 3183 * and any innocent bystanders. 3184 */ 3185 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3186 break; 3187 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3188 break; 3189 3190 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3191 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3192 break; 3193 case MPI_IOCSTATUS_BUSY: 3194 mpt_set_ccb_status(ccb, CAM_BUSY); 3195 break; 3196 case MPI_IOCSTATUS_INVALID_FUNCTION: 3197 case MPI_IOCSTATUS_INVALID_SGL: 3198 case MPI_IOCSTATUS_INTERNAL_ERROR: 3199 case MPI_IOCSTATUS_INVALID_FIELD: 3200 default: 3201 /* XXX 3202 * Some of the above may need to kick 3203 * of a recovery action!!!! 3204 */ 3205 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3206 break; 3207 } 3208 3209 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3210 mpt_freeze_ccb(ccb); 3211 } 3212 3213 return (TRUE); 3214 } 3215 3216 static void 3217 mpt_action(struct cam_sim *sim, union ccb *ccb) 3218 { 3219 struct mpt_softc *mpt; 3220 struct ccb_trans_settings *cts; 3221 target_id_t tgt; 3222 lun_id_t lun; 3223 int raid_passthru; 3224 3225 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3226 3227 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3228 raid_passthru = (sim == mpt->phydisk_sim); 3229 MPT_LOCK_ASSERT(mpt); 3230 3231 tgt = ccb->ccb_h.target_id; 3232 lun = ccb->ccb_h.target_lun; 3233 if (raid_passthru && 3234 ccb->ccb_h.func_code != XPT_PATH_INQ && 3235 ccb->ccb_h.func_code != XPT_RESET_BUS && 3236 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3237 CAMLOCK_2_MPTLOCK(mpt); 3238 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3239 MPTLOCK_2_CAMLOCK(mpt); 3240 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3241 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3242 xpt_done(ccb); 3243 return; 3244 } 3245 MPTLOCK_2_CAMLOCK(mpt); 3246 } 3247 ccb->ccb_h.ccb_mpt_ptr = mpt; 3248 3249 switch (ccb->ccb_h.func_code) { 3250 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3251 /* 3252 * Do a couple of preliminary checks... 3253 */ 3254 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3255 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3256 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3257 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3258 break; 3259 } 3260 } 3261 /* Max supported CDB length is 16 bytes */ 3262 /* XXX Unless we implement the new 32byte message type */ 3263 if (ccb->csio.cdb_len > 3264 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3265 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3266 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3267 break; 3268 } 3269 #ifdef MPT_TEST_MULTIPATH 3270 if (mpt->failure_id == ccb->ccb_h.target_id) { 3271 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3272 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3273 break; 3274 } 3275 #endif 3276 ccb->csio.scsi_status = SCSI_STATUS_OK; 3277 mpt_start(sim, ccb); 3278 return; 3279 3280 case XPT_RESET_BUS: 3281 if (raid_passthru) { 3282 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3283 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3284 break; 3285 } 3286 case XPT_RESET_DEV: 3287 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3288 if (bootverbose) { 3289 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3290 } 3291 } else { 3292 xpt_print(ccb->ccb_h.path, "reset device\n"); 3293 } 3294 CAMLOCK_2_MPTLOCK(mpt); 3295 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3296 MPTLOCK_2_CAMLOCK(mpt); 3297 3298 /* 3299 * mpt_bus_reset is always successful in that it 3300 * will fall back to a hard reset should a bus 3301 * reset attempt fail. 3302 */ 3303 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3304 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3305 break; 3306 3307 case XPT_ABORT: 3308 { 3309 union ccb *accb = ccb->cab.abort_ccb; 3310 CAMLOCK_2_MPTLOCK(mpt); 3311 switch (accb->ccb_h.func_code) { 3312 case XPT_ACCEPT_TARGET_IO: 3313 case XPT_IMMED_NOTIFY: 3314 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3315 break; 3316 case XPT_CONT_TARGET_IO: 3317 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3318 ccb->ccb_h.status = CAM_UA_ABORT; 3319 break; 3320 case XPT_SCSI_IO: 3321 ccb->ccb_h.status = CAM_UA_ABORT; 3322 break; 3323 default: 3324 ccb->ccb_h.status = CAM_REQ_INVALID; 3325 break; 3326 } 3327 MPTLOCK_2_CAMLOCK(mpt); 3328 break; 3329 } 3330 3331 #ifdef CAM_NEW_TRAN_CODE 3332 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3333 #else 3334 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) 3335 #endif 3336 #define DP_DISC_ENABLE 0x1 3337 #define DP_DISC_DISABL 0x2 3338 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3339 3340 #define DP_TQING_ENABLE 0x4 3341 #define DP_TQING_DISABL 0x8 3342 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3343 3344 #define DP_WIDE 0x10 3345 #define DP_NARROW 0x20 3346 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3347 3348 #define DP_SYNC 0x40 3349 3350 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3351 { 3352 #ifdef CAM_NEW_TRAN_CODE 3353 struct ccb_trans_settings_scsi *scsi; 3354 struct ccb_trans_settings_spi *spi; 3355 #endif 3356 uint8_t dval; 3357 u_int period; 3358 u_int offset; 3359 int i, j; 3360 3361 cts = &ccb->cts; 3362 3363 if (mpt->is_fc || mpt->is_sas) { 3364 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3365 break; 3366 } 3367 3368 #ifdef CAM_NEW_TRAN_CODE 3369 scsi = &cts->proto_specific.scsi; 3370 spi = &cts->xport_specific.spi; 3371 3372 /* 3373 * We can be called just to valid transport and proto versions 3374 */ 3375 if (scsi->valid == 0 && spi->valid == 0) { 3376 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3377 break; 3378 } 3379 #endif 3380 3381 /* 3382 * Skip attempting settings on RAID volume disks. 3383 * Other devices on the bus get the normal treatment. 3384 */ 3385 if (mpt->phydisk_sim && raid_passthru == 0 && 3386 mpt_is_raid_volume(mpt, tgt) != 0) { 3387 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3388 "no transfer settings for RAID vols\n"); 3389 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3390 break; 3391 } 3392 3393 i = mpt->mpt_port_page2.PortSettings & 3394 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3395 j = mpt->mpt_port_page2.PortFlags & 3396 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3397 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3398 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3399 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3400 "honoring BIOS transfer negotiations\n"); 3401 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3402 break; 3403 } 3404 3405 dval = 0; 3406 period = 0; 3407 offset = 0; 3408 3409 #ifndef CAM_NEW_TRAN_CODE 3410 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 3411 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? 3412 DP_DISC_ENABLE : DP_DISC_DISABL; 3413 } 3414 3415 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 3416 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? 3417 DP_TQING_ENABLE : DP_TQING_DISABL; 3418 } 3419 3420 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 3421 dval |= cts->bus_width ? DP_WIDE : DP_NARROW; 3422 } 3423 3424 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 3425 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { 3426 dval |= DP_SYNC; 3427 period = cts->sync_period; 3428 offset = cts->sync_offset; 3429 } 3430 #else 3431 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3432 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3433 DP_DISC_ENABLE : DP_DISC_DISABL; 3434 } 3435 3436 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3437 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3438 DP_TQING_ENABLE : DP_TQING_DISABL; 3439 } 3440 3441 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3442 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3443 DP_WIDE : DP_NARROW; 3444 } 3445 3446 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3447 dval |= DP_SYNC; 3448 offset = spi->sync_offset; 3449 } else { 3450 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3451 &mpt->mpt_dev_page1[tgt]; 3452 offset = ptr->RequestedParameters; 3453 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3454 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3455 } 3456 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3457 dval |= DP_SYNC; 3458 period = spi->sync_period; 3459 } else { 3460 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3461 &mpt->mpt_dev_page1[tgt]; 3462 period = ptr->RequestedParameters; 3463 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3464 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3465 } 3466 #endif 3467 CAMLOCK_2_MPTLOCK(mpt); 3468 if (dval & DP_DISC_ENABLE) { 3469 mpt->mpt_disc_enable |= (1 << tgt); 3470 } else if (dval & DP_DISC_DISABL) { 3471 mpt->mpt_disc_enable &= ~(1 << tgt); 3472 } 3473 if (dval & DP_TQING_ENABLE) { 3474 mpt->mpt_tag_enable |= (1 << tgt); 3475 } else if (dval & DP_TQING_DISABL) { 3476 mpt->mpt_tag_enable &= ~(1 << tgt); 3477 } 3478 if (dval & DP_WIDTH) { 3479 mpt_setwidth(mpt, tgt, 1); 3480 } 3481 if (dval & DP_SYNC) { 3482 mpt_setsync(mpt, tgt, period, offset); 3483 } 3484 if (dval == 0) { 3485 MPTLOCK_2_CAMLOCK(mpt); 3486 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3487 break; 3488 } 3489 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3490 "set [%d]: 0x%x period 0x%x offset %d\n", 3491 tgt, dval, period, offset); 3492 if (mpt_update_spi_config(mpt, tgt)) { 3493 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3494 } else { 3495 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3496 } 3497 MPTLOCK_2_CAMLOCK(mpt); 3498 break; 3499 } 3500 case XPT_GET_TRAN_SETTINGS: 3501 { 3502 #ifdef CAM_NEW_TRAN_CODE 3503 struct ccb_trans_settings_scsi *scsi; 3504 cts = &ccb->cts; 3505 cts->protocol = PROTO_SCSI; 3506 if (mpt->is_fc) { 3507 struct ccb_trans_settings_fc *fc = 3508 &cts->xport_specific.fc; 3509 cts->protocol_version = SCSI_REV_SPC; 3510 cts->transport = XPORT_FC; 3511 cts->transport_version = 0; 3512 fc->valid = CTS_FC_VALID_SPEED; 3513 fc->bitrate = 100000; 3514 } else if (mpt->is_sas) { 3515 struct ccb_trans_settings_sas *sas = 3516 &cts->xport_specific.sas; 3517 cts->protocol_version = SCSI_REV_SPC2; 3518 cts->transport = XPORT_SAS; 3519 cts->transport_version = 0; 3520 sas->valid = CTS_SAS_VALID_SPEED; 3521 sas->bitrate = 300000; 3522 } else { 3523 cts->protocol_version = SCSI_REV_2; 3524 cts->transport = XPORT_SPI; 3525 cts->transport_version = 2; 3526 if (mpt_get_spi_settings(mpt, cts) != 0) { 3527 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3528 break; 3529 } 3530 } 3531 scsi = &cts->proto_specific.scsi; 3532 scsi->valid = CTS_SCSI_VALID_TQ; 3533 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3534 #else 3535 cts = &ccb->cts; 3536 if (mpt->is_fc) { 3537 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3538 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3539 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3540 } else if (mpt->is_sas) { 3541 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3542 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3543 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3544 } else if (mpt_get_spi_settings(mpt, cts) != 0) { 3545 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3546 break; 3547 } 3548 #endif 3549 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3550 break; 3551 } 3552 case XPT_CALC_GEOMETRY: 3553 { 3554 struct ccb_calc_geometry *ccg; 3555 3556 ccg = &ccb->ccg; 3557 if (ccg->block_size == 0) { 3558 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3559 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3560 break; 3561 } 3562 mpt_calc_geometry(ccg, /*extended*/1); 3563 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 3564 break; 3565 } 3566 case XPT_PATH_INQ: /* Path routing inquiry */ 3567 { 3568 struct ccb_pathinq *cpi = &ccb->cpi; 3569 3570 cpi->version_num = 1; 3571 cpi->target_sprt = 0; 3572 cpi->hba_eng_cnt = 0; 3573 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3574 /* 3575 * FC cards report MAX_DEVICES of 512, but 3576 * the MSG_SCSI_IO_REQUEST target id field 3577 * is only 8 bits. Until we fix the driver 3578 * to support 'channels' for bus overflow, 3579 * just limit it. 3580 */ 3581 if (cpi->max_target > 255) { 3582 cpi->max_target = 255; 3583 } 3584 3585 /* 3586 * VMware ESX reports > 16 devices and then dies when we probe. 3587 */ 3588 if (mpt->is_spi && cpi->max_target > 15) { 3589 cpi->max_target = 15; 3590 } 3591 if (mpt->is_spi) 3592 cpi->max_lun = 7; 3593 else 3594 cpi->max_lun = MPT_MAX_LUNS; 3595 cpi->initiator_id = mpt->mpt_ini_id; 3596 cpi->bus_id = cam_sim_bus(sim); 3597 3598 /* 3599 * The base speed is the speed of the underlying connection. 3600 */ 3601 #ifdef CAM_NEW_TRAN_CODE 3602 cpi->protocol = PROTO_SCSI; 3603 if (mpt->is_fc) { 3604 cpi->hba_misc = PIM_NOBUSRESET; 3605 cpi->base_transfer_speed = 100000; 3606 cpi->hba_inquiry = PI_TAG_ABLE; 3607 cpi->transport = XPORT_FC; 3608 cpi->transport_version = 0; 3609 cpi->protocol_version = SCSI_REV_SPC; 3610 } else if (mpt->is_sas) { 3611 cpi->hba_misc = PIM_NOBUSRESET; 3612 cpi->base_transfer_speed = 300000; 3613 cpi->hba_inquiry = PI_TAG_ABLE; 3614 cpi->transport = XPORT_SAS; 3615 cpi->transport_version = 0; 3616 cpi->protocol_version = SCSI_REV_SPC2; 3617 } else { 3618 cpi->hba_misc = PIM_SEQSCAN; 3619 cpi->base_transfer_speed = 3300; 3620 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3621 cpi->transport = XPORT_SPI; 3622 cpi->transport_version = 2; 3623 cpi->protocol_version = SCSI_REV_2; 3624 } 3625 #else 3626 if (mpt->is_fc) { 3627 cpi->hba_misc = PIM_NOBUSRESET; 3628 cpi->base_transfer_speed = 100000; 3629 cpi->hba_inquiry = PI_TAG_ABLE; 3630 } else if (mpt->is_sas) { 3631 cpi->hba_misc = PIM_NOBUSRESET; 3632 cpi->base_transfer_speed = 300000; 3633 cpi->hba_inquiry = PI_TAG_ABLE; 3634 } else { 3635 cpi->hba_misc = PIM_SEQSCAN; 3636 cpi->base_transfer_speed = 3300; 3637 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3638 } 3639 #endif 3640 3641 /* 3642 * We give our fake RAID passhtru bus a width that is MaxVolumes 3643 * wide and restrict it to one lun. 3644 */ 3645 if (raid_passthru) { 3646 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3647 cpi->initiator_id = cpi->max_target + 1; 3648 cpi->max_lun = 0; 3649 } 3650 3651 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3652 cpi->hba_misc |= PIM_NOINITIATOR; 3653 } 3654 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3655 cpi->target_sprt = 3656 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3657 } else { 3658 cpi->target_sprt = 0; 3659 } 3660 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3661 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3662 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3663 cpi->unit_number = cam_sim_unit(sim); 3664 cpi->ccb_h.status = CAM_REQ_CMP; 3665 break; 3666 } 3667 case XPT_EN_LUN: /* Enable LUN as a target */ 3668 { 3669 int result; 3670 3671 CAMLOCK_2_MPTLOCK(mpt); 3672 if (ccb->cel.enable) 3673 result = mpt_enable_lun(mpt, 3674 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3675 else 3676 result = mpt_disable_lun(mpt, 3677 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3678 MPTLOCK_2_CAMLOCK(mpt); 3679 if (result == 0) { 3680 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3681 } else { 3682 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3683 } 3684 break; 3685 } 3686 case XPT_NOTIFY_ACK: /* recycle notify ack */ 3687 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 3688 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3689 { 3690 tgt_resource_t *trtp; 3691 lun_id_t lun = ccb->ccb_h.target_lun; 3692 ccb->ccb_h.sim_priv.entries[0].field = 0; 3693 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3694 ccb->ccb_h.flags = 0; 3695 3696 if (lun == CAM_LUN_WILDCARD) { 3697 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3698 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3699 break; 3700 } 3701 trtp = &mpt->trt_wildcard; 3702 } else if (lun >= MPT_MAX_LUNS) { 3703 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3704 break; 3705 } else { 3706 trtp = &mpt->trt[lun]; 3707 } 3708 CAMLOCK_2_MPTLOCK(mpt); 3709 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3710 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3711 "Put FREE ATIO %p lun %d\n", ccb, lun); 3712 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3713 sim_links.stqe); 3714 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 3715 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3716 "Put FREE INOT lun %d\n", lun); 3717 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3718 sim_links.stqe); 3719 } else { 3720 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3721 } 3722 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3723 MPTLOCK_2_CAMLOCK(mpt); 3724 return; 3725 } 3726 case XPT_CONT_TARGET_IO: 3727 CAMLOCK_2_MPTLOCK(mpt); 3728 mpt_target_start_io(mpt, ccb); 3729 MPTLOCK_2_CAMLOCK(mpt); 3730 return; 3731 3732 default: 3733 ccb->ccb_h.status = CAM_REQ_INVALID; 3734 break; 3735 } 3736 xpt_done(ccb); 3737 } 3738 3739 static int 3740 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3741 { 3742 #ifdef CAM_NEW_TRAN_CODE 3743 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3744 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3745 #endif 3746 target_id_t tgt; 3747 uint32_t dval, pval, oval; 3748 int rv; 3749 3750 if (IS_CURRENT_SETTINGS(cts) == 0) { 3751 tgt = cts->ccb_h.target_id; 3752 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3753 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3754 return (-1); 3755 } 3756 } else { 3757 tgt = cts->ccb_h.target_id; 3758 } 3759 3760 /* 3761 * We aren't looking at Port Page 2 BIOS settings here- 3762 * sometimes these have been known to be bogus XXX. 3763 * 3764 * For user settings, we pick the max from port page 0 3765 * 3766 * For current settings we read the current settings out from 3767 * device page 0 for that target. 3768 */ 3769 if (IS_CURRENT_SETTINGS(cts)) { 3770 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3771 dval = 0; 3772 3773 CAMLOCK_2_MPTLOCK(mpt); 3774 tmp = mpt->mpt_dev_page0[tgt]; 3775 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3776 sizeof(tmp), FALSE, 5000); 3777 if (rv) { 3778 MPTLOCK_2_CAMLOCK(mpt); 3779 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3780 return (rv); 3781 } 3782 mpt2host_config_page_scsi_device_0(&tmp); 3783 3784 MPTLOCK_2_CAMLOCK(mpt); 3785 mpt_lprt(mpt, MPT_PRT_DEBUG, 3786 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3787 tmp.NegotiatedParameters, tmp.Information); 3788 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3789 DP_WIDE : DP_NARROW; 3790 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3791 DP_DISC_ENABLE : DP_DISC_DISABL; 3792 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3793 DP_TQING_ENABLE : DP_TQING_DISABL; 3794 oval = tmp.NegotiatedParameters; 3795 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3796 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3797 pval = tmp.NegotiatedParameters; 3798 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3799 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3800 mpt->mpt_dev_page0[tgt] = tmp; 3801 } else { 3802 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3803 oval = mpt->mpt_port_page0.Capabilities; 3804 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3805 pval = mpt->mpt_port_page0.Capabilities; 3806 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3807 } 3808 3809 #ifndef CAM_NEW_TRAN_CODE 3810 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 3811 cts->valid = 0; 3812 cts->sync_period = pval; 3813 cts->sync_offset = oval; 3814 cts->valid |= CCB_TRANS_SYNC_RATE_VALID; 3815 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; 3816 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; 3817 if (dval & DP_WIDE) { 3818 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3819 } else { 3820 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3821 } 3822 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3823 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3824 if (dval & DP_DISC_ENABLE) { 3825 cts->flags |= CCB_TRANS_DISC_ENB; 3826 } 3827 if (dval & DP_TQING_ENABLE) { 3828 cts->flags |= CCB_TRANS_TAG_ENB; 3829 } 3830 } 3831 #else 3832 spi->valid = 0; 3833 scsi->valid = 0; 3834 spi->flags = 0; 3835 scsi->flags = 0; 3836 spi->sync_offset = oval; 3837 spi->sync_period = pval; 3838 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3839 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3840 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3841 if (dval & DP_WIDE) { 3842 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3843 } else { 3844 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3845 } 3846 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3847 scsi->valid = CTS_SCSI_VALID_TQ; 3848 if (dval & DP_TQING_ENABLE) { 3849 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3850 } 3851 spi->valid |= CTS_SPI_VALID_DISC; 3852 if (dval & DP_DISC_ENABLE) { 3853 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3854 } 3855 } 3856 #endif 3857 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3858 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3859 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); 3860 return (0); 3861 } 3862 3863 static void 3864 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3865 { 3866 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3867 3868 ptr = &mpt->mpt_dev_page1[tgt]; 3869 if (onoff) { 3870 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3871 } else { 3872 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3873 } 3874 } 3875 3876 static void 3877 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3878 { 3879 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3880 3881 ptr = &mpt->mpt_dev_page1[tgt]; 3882 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3883 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3884 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3885 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3886 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3887 if (period == 0) { 3888 return; 3889 } 3890 ptr->RequestedParameters |= 3891 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3892 ptr->RequestedParameters |= 3893 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3894 if (period < 0xa) { 3895 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3896 } 3897 if (period < 0x9) { 3898 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3899 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3900 } 3901 } 3902 3903 static int 3904 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3905 { 3906 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3907 int rv; 3908 3909 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3910 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3911 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3912 tmp = mpt->mpt_dev_page1[tgt]; 3913 host2mpt_config_page_scsi_device_1(&tmp); 3914 rv = mpt_write_cur_cfg_page(mpt, tgt, 3915 &tmp.Header, sizeof(tmp), FALSE, 5000); 3916 if (rv) { 3917 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3918 return (-1); 3919 } 3920 return (0); 3921 } 3922 3923 static void 3924 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) 3925 { 3926 #if __FreeBSD_version >= 500000 3927 cam_calc_geometry(ccg, extended); 3928 #else 3929 uint32_t size_mb; 3930 uint32_t secs_per_cylinder; 3931 3932 if (ccg->block_size == 0) { 3933 ccg->ccb_h.status = CAM_REQ_INVALID; 3934 return; 3935 } 3936 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); 3937 if (size_mb > 1024 && extended) { 3938 ccg->heads = 255; 3939 ccg->secs_per_track = 63; 3940 } else { 3941 ccg->heads = 64; 3942 ccg->secs_per_track = 32; 3943 } 3944 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3945 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3946 ccg->ccb_h.status = CAM_REQ_CMP; 3947 #endif 3948 } 3949 3950 /****************************** Timeout Recovery ******************************/ 3951 static int 3952 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3953 { 3954 int error; 3955 3956 error = mpt_kthread_create(mpt_recovery_thread, mpt, 3957 &mpt->recovery_thread, /*flags*/0, 3958 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3959 return (error); 3960 } 3961 3962 static void 3963 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3964 { 3965 if (mpt->recovery_thread == NULL) { 3966 return; 3967 } 3968 mpt->shutdwn_recovery = 1; 3969 wakeup(mpt); 3970 /* 3971 * Sleep on a slightly different location 3972 * for this interlock just for added safety. 3973 */ 3974 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 3975 } 3976 3977 static void 3978 mpt_recovery_thread(void *arg) 3979 { 3980 struct mpt_softc *mpt; 3981 3982 mpt = (struct mpt_softc *)arg; 3983 MPT_LOCK(mpt); 3984 for (;;) { 3985 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3986 if (mpt->shutdwn_recovery == 0) { 3987 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 3988 } 3989 } 3990 if (mpt->shutdwn_recovery != 0) { 3991 break; 3992 } 3993 mpt_recover_commands(mpt); 3994 } 3995 mpt->recovery_thread = NULL; 3996 wakeup(&mpt->recovery_thread); 3997 MPT_UNLOCK(mpt); 3998 mpt_kthread_exit(0); 3999 } 4000 4001 static int 4002 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 4003 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 4004 { 4005 MSG_SCSI_TASK_MGMT *tmf_req; 4006 int error; 4007 4008 /* 4009 * Wait for any current TMF request to complete. 4010 * We're only allowed to issue one TMF at a time. 4011 */ 4012 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 4013 sleep_ok, MPT_TMF_MAX_TIMEOUT); 4014 if (error != 0) { 4015 mpt_reset(mpt, TRUE); 4016 return (ETIMEDOUT); 4017 } 4018 4019 mpt_assign_serno(mpt, mpt->tmf_req); 4020 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 4021 4022 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 4023 memset(tmf_req, 0, sizeof(*tmf_req)); 4024 tmf_req->TargetID = target; 4025 tmf_req->Bus = channel; 4026 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 4027 tmf_req->TaskType = type; 4028 tmf_req->MsgFlags = flags; 4029 tmf_req->MsgContext = 4030 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 4031 if (lun > MPT_MAX_LUNS) { 4032 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4033 tmf_req->LUN[1] = lun & 0xff; 4034 } else { 4035 tmf_req->LUN[1] = lun; 4036 } 4037 tmf_req->TaskMsgContext = abort_ctx; 4038 4039 mpt_lprt(mpt, MPT_PRT_DEBUG, 4040 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 4041 mpt->tmf_req->serno, tmf_req->MsgContext); 4042 if (mpt->verbose > MPT_PRT_DEBUG) { 4043 mpt_print_request(tmf_req); 4044 } 4045 4046 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 4047 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 4048 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 4049 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 4050 if (error != MPT_OK) { 4051 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 4052 mpt->tmf_req->state = REQ_STATE_FREE; 4053 mpt_reset(mpt, TRUE); 4054 } 4055 return (error); 4056 } 4057 4058 /* 4059 * When a command times out, it is placed on the requeust_timeout_list 4060 * and we wake our recovery thread. The MPT-Fusion architecture supports 4061 * only a single TMF operation at a time, so we serially abort/bdr, etc, 4062 * the timedout transactions. The next TMF is issued either by the 4063 * completion handler of the current TMF waking our recovery thread, 4064 * or the TMF timeout handler causing a hard reset sequence. 4065 */ 4066 static void 4067 mpt_recover_commands(struct mpt_softc *mpt) 4068 { 4069 request_t *req; 4070 union ccb *ccb; 4071 int error; 4072 4073 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4074 /* 4075 * No work to do- leave. 4076 */ 4077 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 4078 return; 4079 } 4080 4081 /* 4082 * Flush any commands whose completion coincides with their timeout. 4083 */ 4084 mpt_intr(mpt); 4085 4086 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4087 /* 4088 * The timedout commands have already 4089 * completed. This typically means 4090 * that either the timeout value was on 4091 * the hairy edge of what the device 4092 * requires or - more likely - interrupts 4093 * are not happening. 4094 */ 4095 mpt_prt(mpt, "Timedout requests already complete. " 4096 "Interrupts may not be functioning.\n"); 4097 mpt_enable_ints(mpt); 4098 return; 4099 } 4100 4101 /* 4102 * We have no visibility into the current state of the 4103 * controller, so attempt to abort the commands in the 4104 * order they timed-out. For initiator commands, we 4105 * depend on the reply handler pulling requests off 4106 * the timeout list. 4107 */ 4108 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4109 uint16_t status; 4110 uint8_t response; 4111 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4112 4113 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4114 req, req->serno, hdrp->Function); 4115 ccb = req->ccb; 4116 if (ccb == NULL) { 4117 mpt_prt(mpt, "null ccb in timed out request. " 4118 "Resetting Controller.\n"); 4119 mpt_reset(mpt, TRUE); 4120 continue; 4121 } 4122 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4123 4124 /* 4125 * Check to see if this is not an initiator command and 4126 * deal with it differently if it is. 4127 */ 4128 switch (hdrp->Function) { 4129 case MPI_FUNCTION_SCSI_IO_REQUEST: 4130 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4131 break; 4132 default: 4133 /* 4134 * XXX: FIX ME: need to abort target assists... 4135 */ 4136 mpt_prt(mpt, "just putting it back on the pend q\n"); 4137 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4138 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4139 links); 4140 continue; 4141 } 4142 4143 error = mpt_scsi_send_tmf(mpt, 4144 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4145 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4146 htole32(req->index | scsi_io_handler_id), TRUE); 4147 4148 if (error != 0) { 4149 /* 4150 * mpt_scsi_send_tmf hard resets on failure, so no 4151 * need to do so here. Our queue should be emptied 4152 * by the hard reset. 4153 */ 4154 continue; 4155 } 4156 4157 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4158 REQ_STATE_DONE, TRUE, 500); 4159 4160 status = le16toh(mpt->tmf_req->IOCStatus); 4161 response = mpt->tmf_req->ResponseCode; 4162 mpt->tmf_req->state = REQ_STATE_FREE; 4163 4164 if (error != 0) { 4165 /* 4166 * If we've errored out,, reset the controller. 4167 */ 4168 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4169 "Resetting controller\n"); 4170 mpt_reset(mpt, TRUE); 4171 continue; 4172 } 4173 4174 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4175 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4176 "Resetting controller.\n", status); 4177 mpt_reset(mpt, TRUE); 4178 continue; 4179 } 4180 4181 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4182 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4183 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4184 "Resetting controller.\n", response); 4185 mpt_reset(mpt, TRUE); 4186 continue; 4187 } 4188 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4189 } 4190 } 4191 4192 /************************ Target Mode Support ****************************/ 4193 static void 4194 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4195 { 4196 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4197 PTR_SGE_TRANSACTION32 tep; 4198 PTR_SGE_SIMPLE32 se; 4199 bus_addr_t paddr; 4200 uint32_t fl; 4201 4202 paddr = req->req_pbuf; 4203 paddr += MPT_RQSL(mpt); 4204 4205 fc = req->req_vbuf; 4206 memset(fc, 0, MPT_REQUEST_AREA); 4207 fc->BufferCount = 1; 4208 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4209 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4210 4211 /* 4212 * Okay, set up ELS buffer pointers. ELS buffer pointers 4213 * consist of a TE SGL element (with details length of zero) 4214 * followe by a SIMPLE SGL element which holds the address 4215 * of the buffer. 4216 */ 4217 4218 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4219 4220 tep->ContextSize = 4; 4221 tep->Flags = 0; 4222 tep->TransactionContext[0] = htole32(ioindex); 4223 4224 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4225 fl = 4226 MPI_SGE_FLAGS_HOST_TO_IOC | 4227 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4228 MPI_SGE_FLAGS_LAST_ELEMENT | 4229 MPI_SGE_FLAGS_END_OF_LIST | 4230 MPI_SGE_FLAGS_END_OF_BUFFER; 4231 fl <<= MPI_SGE_FLAGS_SHIFT; 4232 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4233 se->FlagsLength = htole32(fl); 4234 se->Address = htole32((uint32_t) paddr); 4235 mpt_lprt(mpt, MPT_PRT_DEBUG, 4236 "add ELS index %d ioindex %d for %p:%u\n", 4237 req->index, ioindex, req, req->serno); 4238 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4239 ("mpt_fc_post_els: request not locked")); 4240 mpt_send_cmd(mpt, req); 4241 } 4242 4243 static void 4244 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4245 { 4246 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4247 PTR_CMD_BUFFER_DESCRIPTOR cb; 4248 bus_addr_t paddr; 4249 4250 paddr = req->req_pbuf; 4251 paddr += MPT_RQSL(mpt); 4252 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4253 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4254 4255 fc = req->req_vbuf; 4256 fc->BufferCount = 1; 4257 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4258 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4259 4260 cb = &fc->Buffer[0]; 4261 cb->IoIndex = htole16(ioindex); 4262 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4263 4264 mpt_check_doorbell(mpt); 4265 mpt_send_cmd(mpt, req); 4266 } 4267 4268 static int 4269 mpt_add_els_buffers(struct mpt_softc *mpt) 4270 { 4271 int i; 4272 4273 if (mpt->is_fc == 0) { 4274 return (TRUE); 4275 } 4276 4277 if (mpt->els_cmds_allocated) { 4278 return (TRUE); 4279 } 4280 4281 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), 4282 M_DEVBUF, M_NOWAIT | M_ZERO); 4283 4284 if (mpt->els_cmd_ptrs == NULL) { 4285 return (FALSE); 4286 } 4287 4288 /* 4289 * Feed the chip some ELS buffer resources 4290 */ 4291 for (i = 0; i < MPT_MAX_ELS; i++) { 4292 request_t *req = mpt_get_request(mpt, FALSE); 4293 if (req == NULL) { 4294 break; 4295 } 4296 req->state |= REQ_STATE_LOCKED; 4297 mpt->els_cmd_ptrs[i] = req; 4298 mpt_fc_post_els(mpt, req, i); 4299 } 4300 4301 if (i == 0) { 4302 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4303 free(mpt->els_cmd_ptrs, M_DEVBUF); 4304 mpt->els_cmd_ptrs = NULL; 4305 return (FALSE); 4306 } 4307 if (i != MPT_MAX_ELS) { 4308 mpt_lprt(mpt, MPT_PRT_INFO, 4309 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4310 } 4311 mpt->els_cmds_allocated = i; 4312 return(TRUE); 4313 } 4314 4315 static int 4316 mpt_add_target_commands(struct mpt_softc *mpt) 4317 { 4318 int i, max; 4319 4320 if (mpt->tgt_cmd_ptrs) { 4321 return (TRUE); 4322 } 4323 4324 max = MPT_MAX_REQUESTS(mpt) >> 1; 4325 if (max > mpt->mpt_max_tgtcmds) { 4326 max = mpt->mpt_max_tgtcmds; 4327 } 4328 mpt->tgt_cmd_ptrs = 4329 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4330 if (mpt->tgt_cmd_ptrs == NULL) { 4331 mpt_prt(mpt, 4332 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4333 return (FALSE); 4334 } 4335 4336 for (i = 0; i < max; i++) { 4337 request_t *req; 4338 4339 req = mpt_get_request(mpt, FALSE); 4340 if (req == NULL) { 4341 break; 4342 } 4343 req->state |= REQ_STATE_LOCKED; 4344 mpt->tgt_cmd_ptrs[i] = req; 4345 mpt_post_target_command(mpt, req, i); 4346 } 4347 4348 4349 if (i == 0) { 4350 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4351 free(mpt->tgt_cmd_ptrs, M_DEVBUF); 4352 mpt->tgt_cmd_ptrs = NULL; 4353 return (FALSE); 4354 } 4355 4356 mpt->tgt_cmds_allocated = i; 4357 4358 if (i < max) { 4359 mpt_lprt(mpt, MPT_PRT_INFO, 4360 "added %d of %d target bufs\n", i, max); 4361 } 4362 return (i); 4363 } 4364 4365 static int 4366 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4367 { 4368 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4369 mpt->twildcard = 1; 4370 } else if (lun >= MPT_MAX_LUNS) { 4371 return (EINVAL); 4372 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4373 return (EINVAL); 4374 } 4375 if (mpt->tenabled == 0) { 4376 if (mpt->is_fc) { 4377 (void) mpt_fc_reset_link(mpt, 0); 4378 } 4379 mpt->tenabled = 1; 4380 } 4381 if (lun == CAM_LUN_WILDCARD) { 4382 mpt->trt_wildcard.enabled = 1; 4383 } else { 4384 mpt->trt[lun].enabled = 1; 4385 } 4386 return (0); 4387 } 4388 4389 static int 4390 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4391 { 4392 int i; 4393 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4394 mpt->twildcard = 0; 4395 } else if (lun >= MPT_MAX_LUNS) { 4396 return (EINVAL); 4397 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4398 return (EINVAL); 4399 } 4400 if (lun == CAM_LUN_WILDCARD) { 4401 mpt->trt_wildcard.enabled = 0; 4402 } else { 4403 mpt->trt[lun].enabled = 0; 4404 } 4405 for (i = 0; i < MPT_MAX_LUNS; i++) { 4406 if (mpt->trt[lun].enabled) { 4407 break; 4408 } 4409 } 4410 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4411 if (mpt->is_fc) { 4412 (void) mpt_fc_reset_link(mpt, 0); 4413 } 4414 mpt->tenabled = 0; 4415 } 4416 return (0); 4417 } 4418 4419 /* 4420 * Called with MPT lock held 4421 */ 4422 static void 4423 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4424 { 4425 struct ccb_scsiio *csio = &ccb->csio; 4426 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4427 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4428 4429 switch (tgt->state) { 4430 case TGT_STATE_IN_CAM: 4431 break; 4432 case TGT_STATE_MOVING_DATA: 4433 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4434 xpt_freeze_simq(mpt->sim, 1); 4435 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4436 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4437 MPTLOCK_2_CAMLOCK(mpt); 4438 xpt_done(ccb); 4439 CAMLOCK_2_MPTLOCK(mpt); 4440 return; 4441 default: 4442 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4443 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4444 mpt_tgt_dump_req_state(mpt, cmd_req); 4445 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4446 MPTLOCK_2_CAMLOCK(mpt); 4447 xpt_done(ccb); 4448 CAMLOCK_2_MPTLOCK(mpt); 4449 return; 4450 } 4451 4452 if (csio->dxfer_len) { 4453 bus_dmamap_callback_t *cb; 4454 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4455 request_t *req; 4456 4457 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4458 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len)); 4459 4460 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4461 if (mpt->outofbeer == 0) { 4462 mpt->outofbeer = 1; 4463 xpt_freeze_simq(mpt->sim, 1); 4464 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4465 } 4466 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4467 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4468 MPTLOCK_2_CAMLOCK(mpt); 4469 xpt_done(ccb); 4470 CAMLOCK_2_MPTLOCK(mpt); 4471 return; 4472 } 4473 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4474 if (sizeof (bus_addr_t) > 4) { 4475 cb = mpt_execute_req_a64; 4476 } else { 4477 cb = mpt_execute_req; 4478 } 4479 4480 req->ccb = ccb; 4481 ccb->ccb_h.ccb_req_ptr = req; 4482 4483 /* 4484 * Record the currently active ccb and the 4485 * request for it in our target state area. 4486 */ 4487 tgt->ccb = ccb; 4488 tgt->req = req; 4489 4490 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4491 ta = req->req_vbuf; 4492 4493 if (mpt->is_sas) { 4494 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4495 cmd_req->req_vbuf; 4496 ta->QueueTag = ssp->InitiatorTag; 4497 } else if (mpt->is_spi) { 4498 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4499 cmd_req->req_vbuf; 4500 ta->QueueTag = sp->Tag; 4501 } 4502 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4503 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4504 ta->ReplyWord = htole32(tgt->reply_desc); 4505 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { 4506 ta->LUN[0] = 4507 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4508 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4509 } else { 4510 ta->LUN[1] = csio->ccb_h.target_lun; 4511 } 4512 4513 ta->RelativeOffset = tgt->bytes_xfered; 4514 ta->DataLength = ccb->csio.dxfer_len; 4515 if (ta->DataLength > tgt->resid) { 4516 ta->DataLength = tgt->resid; 4517 } 4518 4519 /* 4520 * XXX Should be done after data transfer completes? 4521 */ 4522 tgt->resid -= csio->dxfer_len; 4523 tgt->bytes_xfered += csio->dxfer_len; 4524 4525 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4526 ta->TargetAssistFlags |= 4527 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4528 } 4529 4530 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4531 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4532 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4533 ta->TargetAssistFlags |= 4534 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4535 } 4536 #endif 4537 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4538 4539 mpt_lprt(mpt, MPT_PRT_DEBUG, 4540 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4541 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4542 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4543 4544 MPTLOCK_2_CAMLOCK(mpt); 4545 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4546 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4547 int error; 4548 int s = splsoftvm(); 4549 error = bus_dmamap_load(mpt->buffer_dmat, 4550 req->dmap, csio->data_ptr, csio->dxfer_len, 4551 cb, req, 0); 4552 splx(s); 4553 if (error == EINPROGRESS) { 4554 xpt_freeze_simq(mpt->sim, 1); 4555 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4556 } 4557 } else { 4558 /* 4559 * We have been given a pointer to single 4560 * physical buffer. 4561 */ 4562 struct bus_dma_segment seg; 4563 seg.ds_addr = (bus_addr_t) 4564 (vm_offset_t)csio->data_ptr; 4565 seg.ds_len = csio->dxfer_len; 4566 (*cb)(req, &seg, 1, 0); 4567 } 4568 } else { 4569 /* 4570 * We have been given a list of addresses. 4571 * This case could be easily supported but they are not 4572 * currently generated by the CAM subsystem so there 4573 * is no point in wasting the time right now. 4574 */ 4575 struct bus_dma_segment *sgs; 4576 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4577 (*cb)(req, NULL, 0, EFAULT); 4578 } else { 4579 /* Just use the segments provided */ 4580 sgs = (struct bus_dma_segment *)csio->data_ptr; 4581 (*cb)(req, sgs, csio->sglist_cnt, 0); 4582 } 4583 } 4584 CAMLOCK_2_MPTLOCK(mpt); 4585 } else { 4586 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4587 4588 /* 4589 * XXX: I don't know why this seems to happen, but 4590 * XXX: completing the CCB seems to make things happy. 4591 * XXX: This seems to happen if the initiator requests 4592 * XXX: enough data that we have to do multiple CTIOs. 4593 */ 4594 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4595 mpt_lprt(mpt, MPT_PRT_DEBUG, 4596 "Meaningless STATUS CCB (%p): flags %x status %x " 4597 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4598 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4599 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4600 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4601 MPTLOCK_2_CAMLOCK(mpt); 4602 xpt_done(ccb); 4603 CAMLOCK_2_MPTLOCK(mpt); 4604 return; 4605 } 4606 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4607 sp = sense; 4608 memcpy(sp, &csio->sense_data, 4609 min(csio->sense_len, MPT_SENSE_SIZE)); 4610 } 4611 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4612 } 4613 } 4614 4615 static void 4616 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4617 uint32_t lun, int send, uint8_t *data, size_t length) 4618 { 4619 mpt_tgt_state_t *tgt; 4620 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4621 SGE_SIMPLE32 *se; 4622 uint32_t flags; 4623 uint8_t *dptr; 4624 bus_addr_t pptr; 4625 request_t *req; 4626 4627 /* 4628 * We enter with resid set to the data load for the command. 4629 */ 4630 tgt = MPT_TGT_STATE(mpt, cmd_req); 4631 if (length == 0 || tgt->resid == 0) { 4632 tgt->resid = 0; 4633 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4634 return; 4635 } 4636 4637 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4638 mpt_prt(mpt, "out of resources- dropping local response\n"); 4639 return; 4640 } 4641 tgt->is_local = 1; 4642 4643 4644 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4645 ta = req->req_vbuf; 4646 4647 if (mpt->is_sas) { 4648 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4649 ta->QueueTag = ssp->InitiatorTag; 4650 } else if (mpt->is_spi) { 4651 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4652 ta->QueueTag = sp->Tag; 4653 } 4654 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4655 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4656 ta->ReplyWord = htole32(tgt->reply_desc); 4657 if (lun > MPT_MAX_LUNS) { 4658 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4659 ta->LUN[1] = lun & 0xff; 4660 } else { 4661 ta->LUN[1] = lun; 4662 } 4663 ta->RelativeOffset = 0; 4664 ta->DataLength = length; 4665 4666 dptr = req->req_vbuf; 4667 dptr += MPT_RQSL(mpt); 4668 pptr = req->req_pbuf; 4669 pptr += MPT_RQSL(mpt); 4670 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4671 4672 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4673 memset(se, 0,sizeof (*se)); 4674 4675 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4676 if (send) { 4677 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4678 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4679 } 4680 se->Address = pptr; 4681 MPI_pSGE_SET_LENGTH(se, length); 4682 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4683 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4684 MPI_pSGE_SET_FLAGS(se, flags); 4685 4686 tgt->ccb = NULL; 4687 tgt->req = req; 4688 tgt->resid -= length; 4689 tgt->bytes_xfered = length; 4690 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4691 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4692 #else 4693 tgt->state = TGT_STATE_MOVING_DATA; 4694 #endif 4695 mpt_send_cmd(mpt, req); 4696 } 4697 4698 /* 4699 * Abort queued up CCBs 4700 */ 4701 static cam_status 4702 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4703 { 4704 struct mpt_hdr_stailq *lp; 4705 struct ccb_hdr *srch; 4706 int found = 0; 4707 union ccb *accb = ccb->cab.abort_ccb; 4708 tgt_resource_t *trtp; 4709 4710 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4711 4712 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4713 trtp = &mpt->trt_wildcard; 4714 } else { 4715 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4716 } 4717 4718 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4719 lp = &trtp->atios; 4720 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 4721 lp = &trtp->inots; 4722 } else { 4723 return (CAM_REQ_INVALID); 4724 } 4725 4726 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4727 if (srch == &accb->ccb_h) { 4728 found = 1; 4729 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4730 break; 4731 } 4732 } 4733 if (found) { 4734 accb->ccb_h.status = CAM_REQ_ABORTED; 4735 xpt_done(accb); 4736 return (CAM_REQ_CMP); 4737 } 4738 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4739 return (CAM_PATH_INVALID); 4740 } 4741 4742 /* 4743 * Ask the MPT to abort the current target command 4744 */ 4745 static int 4746 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4747 { 4748 int error; 4749 request_t *req; 4750 PTR_MSG_TARGET_MODE_ABORT abtp; 4751 4752 req = mpt_get_request(mpt, FALSE); 4753 if (req == NULL) { 4754 return (-1); 4755 } 4756 abtp = req->req_vbuf; 4757 memset(abtp, 0, sizeof (*abtp)); 4758 4759 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4760 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4761 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4762 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4763 error = 0; 4764 if (mpt->is_fc || mpt->is_sas) { 4765 mpt_send_cmd(mpt, req); 4766 } else { 4767 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4768 } 4769 return (error); 4770 } 4771 4772 /* 4773 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4774 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4775 * FC929 to set bogus FC_RSP fields (nonzero residuals 4776 * but w/o RESID fields set). This causes QLogic initiators 4777 * to think maybe that a frame was lost. 4778 * 4779 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4780 * we use allocated requests to do TARGET_ASSIST and we 4781 * need to know when to release them. 4782 */ 4783 4784 static void 4785 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4786 uint8_t status, uint8_t const *sense_data) 4787 { 4788 uint8_t *cmd_vbuf; 4789 mpt_tgt_state_t *tgt; 4790 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4791 request_t *req; 4792 bus_addr_t paddr; 4793 int resplen = 0; 4794 uint32_t fl; 4795 4796 cmd_vbuf = cmd_req->req_vbuf; 4797 cmd_vbuf += MPT_RQSL(mpt); 4798 tgt = MPT_TGT_STATE(mpt, cmd_req); 4799 4800 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4801 if (mpt->outofbeer == 0) { 4802 mpt->outofbeer = 1; 4803 xpt_freeze_simq(mpt->sim, 1); 4804 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4805 } 4806 if (ccb) { 4807 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4808 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4809 MPTLOCK_2_CAMLOCK(mpt); 4810 xpt_done(ccb); 4811 CAMLOCK_2_MPTLOCK(mpt); 4812 } else { 4813 mpt_prt(mpt, 4814 "could not allocate status request- dropping\n"); 4815 } 4816 return; 4817 } 4818 req->ccb = ccb; 4819 if (ccb) { 4820 ccb->ccb_h.ccb_mpt_ptr = mpt; 4821 ccb->ccb_h.ccb_req_ptr = req; 4822 } 4823 4824 /* 4825 * Record the currently active ccb, if any, and the 4826 * request for it in our target state area. 4827 */ 4828 tgt->ccb = ccb; 4829 tgt->req = req; 4830 tgt->state = TGT_STATE_SENDING_STATUS; 4831 4832 tp = req->req_vbuf; 4833 paddr = req->req_pbuf; 4834 paddr += MPT_RQSL(mpt); 4835 4836 memset(tp, 0, sizeof (*tp)); 4837 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4838 if (mpt->is_fc) { 4839 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4840 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4841 uint8_t *sts_vbuf; 4842 uint32_t *rsp; 4843 4844 sts_vbuf = req->req_vbuf; 4845 sts_vbuf += MPT_RQSL(mpt); 4846 rsp = (uint32_t *) sts_vbuf; 4847 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4848 4849 /* 4850 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4851 * It has to be big-endian in memory and is organized 4852 * in 32 bit words, which are much easier to deal with 4853 * as words which are swizzled as needed. 4854 * 4855 * All we're filling here is the FC_RSP payload. 4856 * We may just have the chip synthesize it if 4857 * we have no residual and an OK status. 4858 * 4859 */ 4860 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4861 4862 rsp[2] = status; 4863 if (tgt->resid) { 4864 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4865 rsp[3] = htobe32(tgt->resid); 4866 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4867 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4868 #endif 4869 } 4870 if (status == SCSI_STATUS_CHECK_COND) { 4871 int i; 4872 4873 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4874 rsp[4] = htobe32(MPT_SENSE_SIZE); 4875 if (sense_data) { 4876 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4877 } else { 4878 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4879 "TION but no sense data?\n"); 4880 memset(&rsp, 0, MPT_SENSE_SIZE); 4881 } 4882 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4883 rsp[i] = htobe32(rsp[i]); 4884 } 4885 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4886 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4887 #endif 4888 } 4889 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4890 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4891 #endif 4892 rsp[2] = htobe32(rsp[2]); 4893 } else if (mpt->is_sas) { 4894 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4895 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4896 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4897 } else { 4898 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4899 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4900 tp->StatusCode = status; 4901 tp->QueueTag = htole16(sp->Tag); 4902 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4903 } 4904 4905 tp->ReplyWord = htole32(tgt->reply_desc); 4906 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4907 4908 #ifdef WE_CAN_USE_AUTO_REPOST 4909 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4910 #endif 4911 if (status == SCSI_STATUS_OK && resplen == 0) { 4912 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4913 } else { 4914 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4915 fl = 4916 MPI_SGE_FLAGS_HOST_TO_IOC | 4917 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4918 MPI_SGE_FLAGS_LAST_ELEMENT | 4919 MPI_SGE_FLAGS_END_OF_LIST | 4920 MPI_SGE_FLAGS_END_OF_BUFFER; 4921 fl <<= MPI_SGE_FLAGS_SHIFT; 4922 fl |= resplen; 4923 tp->StatusDataSGE.FlagsLength = htole32(fl); 4924 } 4925 4926 mpt_lprt(mpt, MPT_PRT_DEBUG, 4927 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 4928 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 4929 req->serno, tgt->resid); 4930 if (ccb) { 4931 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4932 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); 4933 } 4934 mpt_send_cmd(mpt, req); 4935 } 4936 4937 static void 4938 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4939 tgt_resource_t *trtp, int init_id) 4940 { 4941 struct ccb_immed_notify *inot; 4942 mpt_tgt_state_t *tgt; 4943 4944 tgt = MPT_TGT_STATE(mpt, req); 4945 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); 4946 if (inot == NULL) { 4947 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4948 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 4949 return; 4950 } 4951 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4952 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4953 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 4954 4955 memset(&inot->sense_data, 0, sizeof (inot->sense_data)); 4956 inot->sense_len = 0; 4957 memset(inot->message_args, 0, sizeof (inot->message_args)); 4958 inot->initiator_id = init_id; /* XXX */ 4959 4960 /* 4961 * This is a somewhat grotesque attempt to map from task management 4962 * to old style SCSI messages. God help us all. 4963 */ 4964 switch (fc) { 4965 case MPT_ABORT_TASK_SET: 4966 inot->message_args[0] = MSG_ABORT_TAG; 4967 break; 4968 case MPT_CLEAR_TASK_SET: 4969 inot->message_args[0] = MSG_CLEAR_TASK_SET; 4970 break; 4971 case MPT_TARGET_RESET: 4972 inot->message_args[0] = MSG_TARGET_RESET; 4973 break; 4974 case MPT_CLEAR_ACA: 4975 inot->message_args[0] = MSG_CLEAR_ACA; 4976 break; 4977 case MPT_TERMINATE_TASK: 4978 inot->message_args[0] = MSG_ABORT_TAG; 4979 break; 4980 default: 4981 inot->message_args[0] = MSG_NOOP; 4982 break; 4983 } 4984 tgt->ccb = (union ccb *) inot; 4985 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 4986 MPTLOCK_2_CAMLOCK(mpt); 4987 xpt_done((union ccb *)inot); 4988 CAMLOCK_2_MPTLOCK(mpt); 4989 } 4990 4991 static void 4992 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 4993 { 4994 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 4995 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 4996 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 4997 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 4998 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 4999 '0', '0', '0', '1' 5000 }; 5001 struct ccb_accept_tio *atiop; 5002 lun_id_t lun; 5003 int tag_action = 0; 5004 mpt_tgt_state_t *tgt; 5005 tgt_resource_t *trtp = NULL; 5006 U8 *lunptr; 5007 U8 *vbuf; 5008 U16 itag; 5009 U16 ioindex; 5010 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 5011 uint8_t *cdbp; 5012 5013 /* 5014 * First, DMA sync the received command- 5015 * which is in the *request* * phys area. 5016 * 5017 * XXX: We could optimize this for a range 5018 */ 5019 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 5020 BUS_DMASYNC_POSTREAD); 5021 5022 /* 5023 * Stash info for the current command where we can get at it later. 5024 */ 5025 vbuf = req->req_vbuf; 5026 vbuf += MPT_RQSL(mpt); 5027 5028 /* 5029 * Get our state pointer set up. 5030 */ 5031 tgt = MPT_TGT_STATE(mpt, req); 5032 if (tgt->state != TGT_STATE_LOADED) { 5033 mpt_tgt_dump_req_state(mpt, req); 5034 panic("bad target state in mpt_scsi_tgt_atio"); 5035 } 5036 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 5037 tgt->state = TGT_STATE_IN_CAM; 5038 tgt->reply_desc = reply_desc; 5039 ioindex = GET_IO_INDEX(reply_desc); 5040 if (mpt->verbose >= MPT_PRT_DEBUG) { 5041 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 5042 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 5043 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 5044 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 5045 } 5046 if (mpt->is_fc) { 5047 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 5048 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 5049 if (fc->FcpCntl[2]) { 5050 /* 5051 * Task Management Request 5052 */ 5053 switch (fc->FcpCntl[2]) { 5054 case 0x2: 5055 fct = MPT_ABORT_TASK_SET; 5056 break; 5057 case 0x4: 5058 fct = MPT_CLEAR_TASK_SET; 5059 break; 5060 case 0x20: 5061 fct = MPT_TARGET_RESET; 5062 break; 5063 case 0x40: 5064 fct = MPT_CLEAR_ACA; 5065 break; 5066 case 0x80: 5067 fct = MPT_TERMINATE_TASK; 5068 break; 5069 default: 5070 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 5071 fc->FcpCntl[2]); 5072 mpt_scsi_tgt_status(mpt, 0, req, 5073 SCSI_STATUS_OK, 0); 5074 return; 5075 } 5076 } else { 5077 switch (fc->FcpCntl[1]) { 5078 case 0: 5079 tag_action = MSG_SIMPLE_Q_TAG; 5080 break; 5081 case 1: 5082 tag_action = MSG_HEAD_OF_Q_TAG; 5083 break; 5084 case 2: 5085 tag_action = MSG_ORDERED_Q_TAG; 5086 break; 5087 default: 5088 /* 5089 * Bah. Ignore Untagged Queing and ACA 5090 */ 5091 tag_action = MSG_SIMPLE_Q_TAG; 5092 break; 5093 } 5094 } 5095 tgt->resid = be32toh(fc->FcpDl); 5096 cdbp = fc->FcpCdb; 5097 lunptr = fc->FcpLun; 5098 itag = be16toh(fc->OptionalOxid); 5099 } else if (mpt->is_sas) { 5100 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 5101 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 5102 cdbp = ssp->CDB; 5103 lunptr = ssp->LogicalUnitNumber; 5104 itag = ssp->InitiatorTag; 5105 } else { 5106 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 5107 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 5108 cdbp = sp->CDB; 5109 lunptr = sp->LogicalUnitNumber; 5110 itag = sp->Tag; 5111 } 5112 5113 /* 5114 * Generate a simple lun 5115 */ 5116 switch (lunptr[0] & 0xc0) { 5117 case 0x40: 5118 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 5119 break; 5120 case 0: 5121 lun = lunptr[1]; 5122 break; 5123 default: 5124 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 5125 lun = 0xffff; 5126 break; 5127 } 5128 5129 /* 5130 * Deal with non-enabled or bad luns here. 5131 */ 5132 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 5133 mpt->trt[lun].enabled == 0) { 5134 if (mpt->twildcard) { 5135 trtp = &mpt->trt_wildcard; 5136 } else if (fct == MPT_NIL_TMT_VALUE) { 5137 /* 5138 * In this case, we haven't got an upstream listener 5139 * for either a specific lun or wildcard luns. We 5140 * have to make some sensible response. For regular 5141 * inquiry, just return some NOT HERE inquiry data. 5142 * For VPD inquiry, report illegal field in cdb. 5143 * For REQUEST SENSE, just return NO SENSE data. 5144 * REPORT LUNS gets illegal command. 5145 * All other commands get 'no such device'. 5146 */ 5147 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 5148 size_t len; 5149 5150 memset(buf, 0, MPT_SENSE_SIZE); 5151 cond = SCSI_STATUS_CHECK_COND; 5152 buf[0] = 0xf0; 5153 buf[2] = 0x5; 5154 buf[7] = 0x8; 5155 sp = buf; 5156 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5157 5158 switch (cdbp[0]) { 5159 case INQUIRY: 5160 { 5161 if (cdbp[1] != 0) { 5162 buf[12] = 0x26; 5163 buf[13] = 0x01; 5164 break; 5165 } 5166 len = min(tgt->resid, cdbp[4]); 5167 len = min(len, sizeof (null_iqd)); 5168 mpt_lprt(mpt, MPT_PRT_DEBUG, 5169 "local inquiry %ld bytes\n", (long) len); 5170 mpt_scsi_tgt_local(mpt, req, lun, 1, 5171 null_iqd, len); 5172 return; 5173 } 5174 case REQUEST_SENSE: 5175 { 5176 buf[2] = 0x0; 5177 len = min(tgt->resid, cdbp[4]); 5178 len = min(len, sizeof (buf)); 5179 mpt_lprt(mpt, MPT_PRT_DEBUG, 5180 "local reqsense %ld bytes\n", (long) len); 5181 mpt_scsi_tgt_local(mpt, req, lun, 1, 5182 buf, len); 5183 return; 5184 } 5185 case REPORT_LUNS: 5186 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5187 buf[12] = 0x26; 5188 return; 5189 default: 5190 mpt_lprt(mpt, MPT_PRT_DEBUG, 5191 "CMD 0x%x to unmanaged lun %u\n", 5192 cdbp[0], lun); 5193 buf[12] = 0x25; 5194 break; 5195 } 5196 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 5197 return; 5198 } 5199 /* otherwise, leave trtp NULL */ 5200 } else { 5201 trtp = &mpt->trt[lun]; 5202 } 5203 5204 /* 5205 * Deal with any task management 5206 */ 5207 if (fct != MPT_NIL_TMT_VALUE) { 5208 if (trtp == NULL) { 5209 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5210 fct); 5211 mpt_scsi_tgt_status(mpt, 0, req, 5212 SCSI_STATUS_OK, 0); 5213 } else { 5214 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5215 GET_INITIATOR_INDEX(reply_desc)); 5216 } 5217 return; 5218 } 5219 5220 5221 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5222 if (atiop == NULL) { 5223 mpt_lprt(mpt, MPT_PRT_WARN, 5224 "no ATIOs for lun %u- sending back %s\n", lun, 5225 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5226 mpt_scsi_tgt_status(mpt, NULL, req, 5227 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5228 NULL); 5229 return; 5230 } 5231 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5232 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5233 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 5234 atiop->ccb_h.ccb_mpt_ptr = mpt; 5235 atiop->ccb_h.status = CAM_CDB_RECVD; 5236 atiop->ccb_h.target_lun = lun; 5237 atiop->sense_len = 0; 5238 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5239 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 5240 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5241 5242 /* 5243 * The tag we construct here allows us to find the 5244 * original request that the command came in with. 5245 * 5246 * This way we don't have to depend on anything but the 5247 * tag to find things when CCBs show back up from CAM. 5248 */ 5249 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5250 tgt->tag_id = atiop->tag_id; 5251 if (tag_action) { 5252 atiop->tag_action = tag_action; 5253 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 5254 } 5255 if (mpt->verbose >= MPT_PRT_DEBUG) { 5256 int i; 5257 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 5258 atiop->ccb_h.target_lun); 5259 for (i = 0; i < atiop->cdb_len; i++) { 5260 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5261 (i == (atiop->cdb_len - 1))? '>' : ' '); 5262 } 5263 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5264 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 5265 } 5266 5267 MPTLOCK_2_CAMLOCK(mpt); 5268 xpt_done((union ccb *)atiop); 5269 CAMLOCK_2_MPTLOCK(mpt); 5270 } 5271 5272 static void 5273 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5274 { 5275 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5276 5277 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5278 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 5279 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 5280 tgt->tag_id, tgt->state); 5281 } 5282 5283 static void 5284 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5285 { 5286 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5287 req->index, req->index, req->state); 5288 mpt_tgt_dump_tgt_state(mpt, req); 5289 } 5290 5291 static int 5292 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5293 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5294 { 5295 int dbg; 5296 union ccb *ccb; 5297 U16 status; 5298 5299 if (reply_frame == NULL) { 5300 /* 5301 * Figure out what the state of the command is. 5302 */ 5303 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5304 5305 #ifdef INVARIANTS 5306 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5307 if (tgt->req) { 5308 mpt_req_not_spcl(mpt, tgt->req, 5309 "turbo scsi_tgt_reply associated req", __LINE__); 5310 } 5311 #endif 5312 switch(tgt->state) { 5313 case TGT_STATE_LOADED: 5314 /* 5315 * This is a new command starting. 5316 */ 5317 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5318 break; 5319 case TGT_STATE_MOVING_DATA: 5320 { 5321 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 5322 5323 ccb = tgt->ccb; 5324 if (tgt->req == NULL) { 5325 panic("mpt: turbo target reply with null " 5326 "associated request moving data"); 5327 /* NOTREACHED */ 5328 } 5329 if (ccb == NULL) { 5330 if (tgt->is_local == 0) { 5331 panic("mpt: turbo target reply with " 5332 "null associated ccb moving data"); 5333 /* NOTREACHED */ 5334 } 5335 mpt_lprt(mpt, MPT_PRT_DEBUG, 5336 "TARGET_ASSIST local done\n"); 5337 TAILQ_REMOVE(&mpt->request_pending_list, 5338 tgt->req, links); 5339 mpt_free_request(mpt, tgt->req); 5340 tgt->req = NULL; 5341 mpt_scsi_tgt_status(mpt, NULL, req, 5342 0, NULL); 5343 return (TRUE); 5344 } 5345 tgt->ccb = NULL; 5346 tgt->nxfers++; 5347 mpt_req_untimeout(req, mpt_timeout, ccb); 5348 mpt_lprt(mpt, MPT_PRT_DEBUG, 5349 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5350 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5351 /* 5352 * Free the Target Assist Request 5353 */ 5354 KASSERT(tgt->req->ccb == ccb, 5355 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5356 tgt->req->serno, tgt->req->ccb)); 5357 TAILQ_REMOVE(&mpt->request_pending_list, 5358 tgt->req, links); 5359 mpt_free_request(mpt, tgt->req); 5360 tgt->req = NULL; 5361 5362 /* 5363 * Do we need to send status now? That is, are 5364 * we done with all our data transfers? 5365 */ 5366 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5367 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5368 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5369 KASSERT(ccb->ccb_h.status, 5370 ("zero ccb sts at %d\n", __LINE__)); 5371 tgt->state = TGT_STATE_IN_CAM; 5372 if (mpt->outofbeer) { 5373 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5374 mpt->outofbeer = 0; 5375 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5376 } 5377 MPTLOCK_2_CAMLOCK(mpt); 5378 xpt_done(ccb); 5379 CAMLOCK_2_MPTLOCK(mpt); 5380 break; 5381 } 5382 /* 5383 * Otherwise, send status (and sense) 5384 */ 5385 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5386 sp = sense; 5387 memcpy(sp, &ccb->csio.sense_data, 5388 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5389 } 5390 mpt_scsi_tgt_status(mpt, ccb, req, 5391 ccb->csio.scsi_status, sp); 5392 break; 5393 } 5394 case TGT_STATE_SENDING_STATUS: 5395 case TGT_STATE_MOVING_DATA_AND_STATUS: 5396 { 5397 int ioindex; 5398 ccb = tgt->ccb; 5399 5400 if (tgt->req == NULL) { 5401 panic("mpt: turbo target reply with null " 5402 "associated request sending status"); 5403 /* NOTREACHED */ 5404 } 5405 5406 if (ccb) { 5407 tgt->ccb = NULL; 5408 if (tgt->state == 5409 TGT_STATE_MOVING_DATA_AND_STATUS) { 5410 tgt->nxfers++; 5411 } 5412 mpt_req_untimeout(req, mpt_timeout, ccb); 5413 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5414 ccb->ccb_h.status |= CAM_SENT_SENSE; 5415 } 5416 mpt_lprt(mpt, MPT_PRT_DEBUG, 5417 "TARGET_STATUS tag %x sts %x flgs %x req " 5418 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5419 ccb->ccb_h.flags, tgt->req); 5420 /* 5421 * Free the Target Send Status Request 5422 */ 5423 KASSERT(tgt->req->ccb == ccb, 5424 ("tgt->req %p:%u tgt->req->ccb %p", 5425 tgt->req, tgt->req->serno, tgt->req->ccb)); 5426 /* 5427 * Notify CAM that we're done 5428 */ 5429 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5430 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5431 KASSERT(ccb->ccb_h.status, 5432 ("ZERO ccb sts at %d\n", __LINE__)); 5433 tgt->ccb = NULL; 5434 } else { 5435 mpt_lprt(mpt, MPT_PRT_DEBUG, 5436 "TARGET_STATUS non-CAM for req %p:%u\n", 5437 tgt->req, tgt->req->serno); 5438 } 5439 TAILQ_REMOVE(&mpt->request_pending_list, 5440 tgt->req, links); 5441 mpt_free_request(mpt, tgt->req); 5442 tgt->req = NULL; 5443 5444 /* 5445 * And re-post the Command Buffer. 5446 * This will reset the state. 5447 */ 5448 ioindex = GET_IO_INDEX(reply_desc); 5449 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5450 tgt->is_local = 0; 5451 mpt_post_target_command(mpt, req, ioindex); 5452 5453 /* 5454 * And post a done for anyone who cares 5455 */ 5456 if (ccb) { 5457 if (mpt->outofbeer) { 5458 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5459 mpt->outofbeer = 0; 5460 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5461 } 5462 MPTLOCK_2_CAMLOCK(mpt); 5463 xpt_done(ccb); 5464 CAMLOCK_2_MPTLOCK(mpt); 5465 } 5466 break; 5467 } 5468 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5469 tgt->state = TGT_STATE_LOADED; 5470 break; 5471 default: 5472 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5473 "Reply Function\n", tgt->state); 5474 } 5475 return (TRUE); 5476 } 5477 5478 status = le16toh(reply_frame->IOCStatus); 5479 if (status != MPI_IOCSTATUS_SUCCESS) { 5480 dbg = MPT_PRT_ERROR; 5481 } else { 5482 dbg = MPT_PRT_DEBUG1; 5483 } 5484 5485 mpt_lprt(mpt, dbg, 5486 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5487 req, req->serno, reply_frame, reply_frame->Function, status); 5488 5489 switch (reply_frame->Function) { 5490 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5491 { 5492 mpt_tgt_state_t *tgt; 5493 #ifdef INVARIANTS 5494 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5495 #endif 5496 if (status != MPI_IOCSTATUS_SUCCESS) { 5497 /* 5498 * XXX What to do? 5499 */ 5500 break; 5501 } 5502 tgt = MPT_TGT_STATE(mpt, req); 5503 KASSERT(tgt->state == TGT_STATE_LOADING, 5504 ("bad state 0x%x on reply to buffer post\n", tgt->state)); 5505 mpt_assign_serno(mpt, req); 5506 tgt->state = TGT_STATE_LOADED; 5507 break; 5508 } 5509 case MPI_FUNCTION_TARGET_ASSIST: 5510 #ifdef INVARIANTS 5511 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5512 #endif 5513 mpt_prt(mpt, "target assist completion\n"); 5514 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5515 mpt_free_request(mpt, req); 5516 break; 5517 case MPI_FUNCTION_TARGET_STATUS_SEND: 5518 #ifdef INVARIANTS 5519 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5520 #endif 5521 mpt_prt(mpt, "status send completion\n"); 5522 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5523 mpt_free_request(mpt, req); 5524 break; 5525 case MPI_FUNCTION_TARGET_MODE_ABORT: 5526 { 5527 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5528 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5529 PTR_MSG_TARGET_MODE_ABORT abtp = 5530 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5531 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5532 #ifdef INVARIANTS 5533 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5534 #endif 5535 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5536 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5537 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5538 mpt_free_request(mpt, req); 5539 break; 5540 } 5541 default: 5542 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5543 "0x%x\n", reply_frame->Function); 5544 break; 5545 } 5546 return (TRUE); 5547 } 5548