1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 */ 96 #include <sys/cdefs.h> 97 __FBSDID("$FreeBSD$"); 98 99 #include <dev/mpt/mpt.h> 100 #include <dev/mpt/mpt_cam.h> 101 #include <dev/mpt/mpt_raid.h> 102 103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/mpt/mpilib/mpi_init.h" 105 #include "dev/mpt/mpilib/mpi_targ.h" 106 #include "dev/mpt/mpilib/mpi_fc.h" 107 #if __FreeBSD_version >= 500000 108 #include <sys/sysctl.h> 109 #endif 110 #include <sys/callout.h> 111 #include <sys/kthread.h> 112 113 #if __FreeBSD_version >= 700025 114 #ifndef CAM_NEW_TRAN_CODE 115 #define CAM_NEW_TRAN_CODE 1 116 #endif 117 #endif 118 119 static void mpt_poll(struct cam_sim *); 120 static timeout_t mpt_timeout; 121 static void mpt_action(struct cam_sim *, union ccb *); 122 static int 123 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 124 static void mpt_setwidth(struct mpt_softc *, int, int); 125 static void mpt_setsync(struct mpt_softc *, int, int, int); 126 static int mpt_update_spi_config(struct mpt_softc *, int); 127 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); 128 129 static mpt_reply_handler_t mpt_scsi_reply_handler; 130 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 131 static mpt_reply_handler_t mpt_fc_els_reply_handler; 132 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 133 MSG_DEFAULT_REPLY *); 134 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 135 static int mpt_fc_reset_link(struct mpt_softc *, int); 136 137 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 138 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 139 static void mpt_recovery_thread(void *arg); 140 static void mpt_recover_commands(struct mpt_softc *mpt); 141 142 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 143 u_int, u_int, u_int, int); 144 145 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 146 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 147 static int mpt_add_els_buffers(struct mpt_softc *mpt); 148 static int mpt_add_target_commands(struct mpt_softc *mpt); 149 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 150 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 151 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 152 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 153 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 154 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 155 uint8_t, uint8_t const *); 156 static void 157 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 158 tgt_resource_t *, int); 159 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 160 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 161 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 162 163 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 164 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 165 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 166 167 static mpt_probe_handler_t mpt_cam_probe; 168 static mpt_attach_handler_t mpt_cam_attach; 169 static mpt_enable_handler_t mpt_cam_enable; 170 static mpt_ready_handler_t mpt_cam_ready; 171 static mpt_event_handler_t mpt_cam_event; 172 static mpt_reset_handler_t mpt_cam_ioc_reset; 173 static mpt_detach_handler_t mpt_cam_detach; 174 175 static struct mpt_personality mpt_cam_personality = 176 { 177 .name = "mpt_cam", 178 .probe = mpt_cam_probe, 179 .attach = mpt_cam_attach, 180 .enable = mpt_cam_enable, 181 .ready = mpt_cam_ready, 182 .event = mpt_cam_event, 183 .reset = mpt_cam_ioc_reset, 184 .detach = mpt_cam_detach, 185 }; 186 187 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 188 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 189 190 int 191 mpt_cam_probe(struct mpt_softc *mpt) 192 { 193 int role; 194 195 /* 196 * Only attach to nodes that support the initiator or target role 197 * (or want to) or have RAID physical devices that need CAM pass-thru 198 * support. 199 */ 200 if (mpt->do_cfg_role) { 201 role = mpt->cfg_role; 202 } else { 203 role = mpt->role; 204 } 205 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 206 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 207 return (0); 208 } 209 return (ENODEV); 210 } 211 212 int 213 mpt_cam_attach(struct mpt_softc *mpt) 214 { 215 struct cam_devq *devq; 216 mpt_handler_t handler; 217 int maxq; 218 int error; 219 220 TAILQ_INIT(&mpt->request_timeout_list); 221 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 222 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 223 224 handler.reply_handler = mpt_scsi_reply_handler; 225 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 226 &scsi_io_handler_id); 227 if (error != 0) { 228 goto cleanup0; 229 } 230 231 handler.reply_handler = mpt_scsi_tmf_reply_handler; 232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 233 &scsi_tmf_handler_id); 234 if (error != 0) { 235 goto cleanup0; 236 } 237 238 /* 239 * If we're fibre channel and could support target mode, we register 240 * an ELS reply handler and give it resources. 241 */ 242 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 243 handler.reply_handler = mpt_fc_els_reply_handler; 244 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 245 &fc_els_handler_id); 246 if (error != 0) { 247 goto cleanup0; 248 } 249 if (mpt_add_els_buffers(mpt) == FALSE) { 250 error = ENOMEM; 251 goto cleanup0; 252 } 253 maxq -= mpt->els_cmds_allocated; 254 } 255 256 /* 257 * If we support target mode, we register a reply handler for it, 258 * but don't add command resources until we actually enable target 259 * mode. 260 */ 261 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 262 handler.reply_handler = mpt_scsi_tgt_reply_handler; 263 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 264 &mpt->scsi_tgt_handler_id); 265 if (error != 0) { 266 goto cleanup0; 267 } 268 } 269 270 /* 271 * We keep one request reserved for timeout TMF requests. 272 */ 273 mpt->tmf_req = mpt_get_request(mpt, FALSE); 274 if (mpt->tmf_req == NULL) { 275 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 276 error = ENOMEM; 277 goto cleanup0; 278 } 279 280 /* 281 * Mark the request as free even though not on the free list. 282 * There is only one TMF request allowed to be outstanding at 283 * a time and the TMF routines perform their own allocation 284 * tracking using the standard state flags. 285 */ 286 mpt->tmf_req->state = REQ_STATE_FREE; 287 maxq--; 288 289 if (mpt_spawn_recovery_thread(mpt) != 0) { 290 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 291 error = ENOMEM; 292 goto cleanup0; 293 } 294 295 /* 296 * The rest of this is CAM foo, for which we need to drop our lock 297 */ 298 MPTLOCK_2_CAMLOCK(mpt); 299 300 /* 301 * Create the device queue for our SIM(s). 302 */ 303 devq = cam_simq_alloc(maxq); 304 if (devq == NULL) { 305 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 306 error = ENOMEM; 307 goto cleanup; 308 } 309 310 /* 311 * Construct our SIM entry. 312 */ 313 mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 314 mpt->unit, &Giant, 1, maxq, devq); 315 if (mpt->sim == NULL) { 316 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 317 cam_simq_free(devq); 318 error = ENOMEM; 319 goto cleanup; 320 } 321 322 /* 323 * Register exactly this bus. 324 */ 325 if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) { 326 mpt_prt(mpt, "Bus registration Failed!\n"); 327 error = ENOMEM; 328 goto cleanup; 329 } 330 331 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 332 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 333 mpt_prt(mpt, "Unable to allocate Path!\n"); 334 error = ENOMEM; 335 goto cleanup; 336 } 337 338 /* 339 * Only register a second bus for RAID physical 340 * devices if the controller supports RAID. 341 */ 342 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 343 CAMLOCK_2_MPTLOCK(mpt); 344 return (0); 345 } 346 347 /* 348 * Create a "bus" to export all hidden disks to CAM. 349 */ 350 mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 351 mpt->unit, &Giant, 1, maxq, devq); 352 if (mpt->phydisk_sim == NULL) { 353 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 354 error = ENOMEM; 355 goto cleanup; 356 } 357 358 /* 359 * Register this bus. 360 */ 361 if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) { 362 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 363 error = ENOMEM; 364 goto cleanup; 365 } 366 367 if (xpt_create_path(&mpt->phydisk_path, NULL, 368 cam_sim_path(mpt->phydisk_sim), 369 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 370 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 371 error = ENOMEM; 372 goto cleanup; 373 } 374 CAMLOCK_2_MPTLOCK(mpt); 375 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 376 return (0); 377 378 cleanup: 379 CAMLOCK_2_MPTLOCK(mpt); 380 cleanup0: 381 mpt_cam_detach(mpt); 382 return (error); 383 } 384 385 /* 386 * Read FC configuration information 387 */ 388 static int 389 mpt_read_config_info_fc(struct mpt_softc *mpt) 390 { 391 char *topology = NULL; 392 int rv; 393 394 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 395 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 396 if (rv) { 397 return (-1); 398 } 399 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 400 mpt->mpt_fcport_page0.Header.PageVersion, 401 mpt->mpt_fcport_page0.Header.PageLength, 402 mpt->mpt_fcport_page0.Header.PageNumber, 403 mpt->mpt_fcport_page0.Header.PageType); 404 405 406 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 407 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 408 if (rv) { 409 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 410 return (-1); 411 } 412 413 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 414 415 switch (mpt->mpt_fcport_page0.Flags & 416 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 417 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 418 mpt->mpt_fcport_speed = 0; 419 topology = "<NO LOOP>"; 420 break; 421 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 422 topology = "N-Port"; 423 break; 424 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 425 topology = "NL-Port"; 426 break; 427 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 428 topology = "F-Port"; 429 break; 430 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 431 topology = "FL-Port"; 432 break; 433 default: 434 mpt->mpt_fcport_speed = 0; 435 topology = "?"; 436 break; 437 } 438 439 mpt_lprt(mpt, MPT_PRT_INFO, 440 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 441 "Speed %u-Gbit\n", topology, 442 mpt->mpt_fcport_page0.WWNN.High, 443 mpt->mpt_fcport_page0.WWNN.Low, 444 mpt->mpt_fcport_page0.WWPN.High, 445 mpt->mpt_fcport_page0.WWPN.Low, 446 mpt->mpt_fcport_speed); 447 #if __FreeBSD_version >= 500000 448 { 449 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 450 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 451 452 snprintf(mpt->scinfo.fc.wwnn, 453 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 454 mpt->mpt_fcport_page0.WWNN.High, 455 mpt->mpt_fcport_page0.WWNN.Low); 456 457 snprintf(mpt->scinfo.fc.wwpn, 458 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 459 mpt->mpt_fcport_page0.WWPN.High, 460 mpt->mpt_fcport_page0.WWPN.Low); 461 462 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 463 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 464 "World Wide Node Name"); 465 466 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 467 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 468 "World Wide Port Name"); 469 470 } 471 #endif 472 return (0); 473 } 474 475 /* 476 * Set FC configuration information. 477 */ 478 static int 479 mpt_set_initial_config_fc(struct mpt_softc *mpt) 480 { 481 482 CONFIG_PAGE_FC_PORT_1 fc; 483 U32 fl; 484 int r, doit = 0; 485 int role; 486 487 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 488 &fc.Header, FALSE, 5000); 489 if (r) { 490 mpt_prt(mpt, "failed to read FC page 1 header\n"); 491 return (mpt_fc_reset_link(mpt, 1)); 492 } 493 494 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 495 &fc.Header, sizeof (fc), FALSE, 5000); 496 if (r) { 497 mpt_prt(mpt, "failed to read FC page 1\n"); 498 return (mpt_fc_reset_link(mpt, 1)); 499 } 500 501 /* 502 * Check our flags to make sure we support the role we want. 503 */ 504 doit = 0; 505 role = 0; 506 fl = le32toh(fc.Flags);; 507 508 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 509 role |= MPT_ROLE_INITIATOR; 510 } 511 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 512 role |= MPT_ROLE_TARGET; 513 } 514 515 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 516 517 if (mpt->do_cfg_role == 0) { 518 role = mpt->cfg_role; 519 } else { 520 mpt->do_cfg_role = 0; 521 } 522 523 if (role != mpt->cfg_role) { 524 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 525 if ((role & MPT_ROLE_INITIATOR) == 0) { 526 mpt_prt(mpt, "adding initiator role\n"); 527 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 528 doit++; 529 } else { 530 mpt_prt(mpt, "keeping initiator role\n"); 531 } 532 } else if (role & MPT_ROLE_INITIATOR) { 533 mpt_prt(mpt, "removing initiator role\n"); 534 doit++; 535 } 536 if (mpt->cfg_role & MPT_ROLE_TARGET) { 537 if ((role & MPT_ROLE_TARGET) == 0) { 538 mpt_prt(mpt, "adding target role\n"); 539 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 540 doit++; 541 } else { 542 mpt_prt(mpt, "keeping target role\n"); 543 } 544 } else if (role & MPT_ROLE_TARGET) { 545 mpt_prt(mpt, "removing target role\n"); 546 doit++; 547 } 548 mpt->role = mpt->cfg_role; 549 } 550 551 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 552 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 553 mpt_prt(mpt, "adding OXID option\n"); 554 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 555 doit++; 556 } 557 } 558 559 if (doit) { 560 fc.Flags = htole32(fl); 561 r = mpt_write_cfg_page(mpt, 562 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 563 sizeof(fc), FALSE, 5000); 564 if (r != 0) { 565 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 566 return (0); 567 } 568 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 569 "effect until next reboot or IOC reset\n"); 570 } 571 return (0); 572 } 573 574 /* 575 * Read SAS configuration information. Nothing to do yet. 576 */ 577 static int 578 mpt_read_config_info_sas(struct mpt_softc *mpt) 579 { 580 return (0); 581 } 582 583 /* 584 * Set SAS configuration information. Nothing to do yet. 585 */ 586 static int 587 mpt_set_initial_config_sas(struct mpt_softc *mpt) 588 { 589 return (0); 590 } 591 592 /* 593 * Read SCSI configuration information 594 */ 595 static int 596 mpt_read_config_info_spi(struct mpt_softc *mpt) 597 { 598 int rv, i; 599 600 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 601 &mpt->mpt_port_page0.Header, FALSE, 5000); 602 if (rv) { 603 return (-1); 604 } 605 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 606 mpt->mpt_port_page0.Header.PageVersion, 607 mpt->mpt_port_page0.Header.PageLength, 608 mpt->mpt_port_page0.Header.PageNumber, 609 mpt->mpt_port_page0.Header.PageType); 610 611 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 612 &mpt->mpt_port_page1.Header, FALSE, 5000); 613 if (rv) { 614 return (-1); 615 } 616 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 617 mpt->mpt_port_page1.Header.PageVersion, 618 mpt->mpt_port_page1.Header.PageLength, 619 mpt->mpt_port_page1.Header.PageNumber, 620 mpt->mpt_port_page1.Header.PageType); 621 622 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 623 &mpt->mpt_port_page2.Header, FALSE, 5000); 624 if (rv) { 625 return (-1); 626 } 627 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 628 mpt->mpt_port_page2.Header.PageVersion, 629 mpt->mpt_port_page2.Header.PageLength, 630 mpt->mpt_port_page2.Header.PageNumber, 631 mpt->mpt_port_page2.Header.PageType); 632 633 for (i = 0; i < 16; i++) { 634 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 635 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 636 if (rv) { 637 return (-1); 638 } 639 mpt_lprt(mpt, MPT_PRT_DEBUG, 640 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 641 mpt->mpt_dev_page0[i].Header.PageVersion, 642 mpt->mpt_dev_page0[i].Header.PageLength, 643 mpt->mpt_dev_page0[i].Header.PageNumber, 644 mpt->mpt_dev_page0[i].Header.PageType); 645 646 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 647 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 648 if (rv) { 649 return (-1); 650 } 651 mpt_lprt(mpt, MPT_PRT_DEBUG, 652 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 653 mpt->mpt_dev_page1[i].Header.PageVersion, 654 mpt->mpt_dev_page1[i].Header.PageLength, 655 mpt->mpt_dev_page1[i].Header.PageNumber, 656 mpt->mpt_dev_page1[i].Header.PageType); 657 } 658 659 /* 660 * At this point, we don't *have* to fail. As long as we have 661 * valid config header information, we can (barely) lurch 662 * along. 663 */ 664 665 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 666 sizeof(mpt->mpt_port_page0), FALSE, 5000); 667 if (rv) { 668 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 669 } else { 670 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 671 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 672 mpt->mpt_port_page0.Capabilities, 673 mpt->mpt_port_page0.PhysicalInterface); 674 } 675 676 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 677 sizeof(mpt->mpt_port_page1), FALSE, 5000); 678 if (rv) { 679 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 680 } else { 681 mpt_lprt(mpt, MPT_PRT_DEBUG, 682 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 683 mpt->mpt_port_page1.Configuration, 684 mpt->mpt_port_page1.OnBusTimerValue); 685 } 686 687 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 688 sizeof(mpt->mpt_port_page2), FALSE, 5000); 689 if (rv) { 690 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 691 } else { 692 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 693 "Port Page 2: Flags %x Settings %x\n", 694 mpt->mpt_port_page2.PortFlags, 695 mpt->mpt_port_page2.PortSettings); 696 for (i = 0; i < 16; i++) { 697 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 698 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 699 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 700 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 701 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 702 } 703 } 704 705 for (i = 0; i < 16; i++) { 706 rv = mpt_read_cur_cfg_page(mpt, i, 707 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 708 FALSE, 5000); 709 if (rv) { 710 mpt_prt(mpt, 711 "cannot read SPI Target %d Device Page 0\n", i); 712 continue; 713 } 714 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 715 "target %d page 0: Negotiated Params %x Information %x\n", 716 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 717 mpt->mpt_dev_page0[i].Information); 718 719 rv = mpt_read_cur_cfg_page(mpt, i, 720 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 721 FALSE, 5000); 722 if (rv) { 723 mpt_prt(mpt, 724 "cannot read SPI Target %d Device Page 1\n", i); 725 continue; 726 } 727 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 728 "target %d page 1: Requested Params %x Configuration %x\n", 729 i, mpt->mpt_dev_page1[i].RequestedParameters, 730 mpt->mpt_dev_page1[i].Configuration); 731 } 732 return (0); 733 } 734 735 /* 736 * Validate SPI configuration information. 737 * 738 * In particular, validate SPI Port Page 1. 739 */ 740 static int 741 mpt_set_initial_config_spi(struct mpt_softc *mpt) 742 { 743 int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id; 744 int error; 745 746 mpt->mpt_disc_enable = 0xff; 747 mpt->mpt_tag_enable = 0; 748 749 if (mpt->mpt_port_page1.Configuration != pp1val) { 750 CONFIG_PAGE_SCSI_PORT_1 tmp; 751 752 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 753 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 754 tmp = mpt->mpt_port_page1; 755 tmp.Configuration = pp1val; 756 error = mpt_write_cur_cfg_page(mpt, 0, 757 &tmp.Header, sizeof(tmp), FALSE, 5000); 758 if (error) { 759 return (-1); 760 } 761 error = mpt_read_cur_cfg_page(mpt, 0, 762 &tmp.Header, sizeof(tmp), FALSE, 5000); 763 if (error) { 764 return (-1); 765 } 766 if (tmp.Configuration != pp1val) { 767 mpt_prt(mpt, 768 "failed to reset SPI Port Page 1 Config value\n"); 769 return (-1); 770 } 771 mpt->mpt_port_page1 = tmp; 772 } 773 774 /* 775 * The purpose of this exercise is to get 776 * all targets back to async/narrow. 777 * 778 * We skip this step if the BIOS has already negotiated 779 * speeds with the targets and does not require us to 780 * do Domain Validation. 781 */ 782 i = mpt->mpt_port_page2.PortSettings & 783 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 784 j = mpt->mpt_port_page2.PortFlags & 785 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 786 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* && 787 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) { 788 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 789 "honoring BIOS transfer negotiations\n"); 790 } else { 791 for (i = 0; i < 16; i++) { 792 mpt->mpt_dev_page1[i].RequestedParameters = 0; 793 mpt->mpt_dev_page1[i].Configuration = 0; 794 (void) mpt_update_spi_config(mpt, i); 795 } 796 } 797 return (0); 798 } 799 800 int 801 mpt_cam_enable(struct mpt_softc *mpt) 802 { 803 if (mpt->is_fc) { 804 if (mpt_read_config_info_fc(mpt)) { 805 return (EIO); 806 } 807 if (mpt_set_initial_config_fc(mpt)) { 808 return (EIO); 809 } 810 } else if (mpt->is_sas) { 811 if (mpt_read_config_info_sas(mpt)) { 812 return (EIO); 813 } 814 if (mpt_set_initial_config_sas(mpt)) { 815 return (EIO); 816 } 817 } else if (mpt->is_spi) { 818 if (mpt_read_config_info_spi(mpt)) { 819 return (EIO); 820 } 821 if (mpt_set_initial_config_spi(mpt)) { 822 return (EIO); 823 } 824 } 825 return (0); 826 } 827 828 void 829 mpt_cam_ready(struct mpt_softc *mpt) 830 { 831 /* 832 * If we're in target mode, hang out resources now 833 * so we don't cause the world to hang talking to us. 834 */ 835 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 836 /* 837 * Try to add some target command resources 838 */ 839 MPT_LOCK(mpt); 840 if (mpt_add_target_commands(mpt) == FALSE) { 841 mpt_prt(mpt, "failed to add target commands\n"); 842 } 843 MPT_UNLOCK(mpt); 844 } 845 mpt->ready = 1; 846 } 847 848 void 849 mpt_cam_detach(struct mpt_softc *mpt) 850 { 851 mpt_handler_t handler; 852 853 mpt->ready = 0; 854 mpt_terminate_recovery_thread(mpt); 855 856 handler.reply_handler = mpt_scsi_reply_handler; 857 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 858 scsi_io_handler_id); 859 handler.reply_handler = mpt_scsi_tmf_reply_handler; 860 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 861 scsi_tmf_handler_id); 862 handler.reply_handler = mpt_fc_els_reply_handler; 863 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 864 fc_els_handler_id); 865 handler.reply_handler = mpt_scsi_tgt_reply_handler; 866 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 867 mpt->scsi_tgt_handler_id); 868 869 if (mpt->tmf_req != NULL) { 870 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 871 mpt_free_request(mpt, mpt->tmf_req); 872 mpt->tmf_req = NULL; 873 } 874 875 if (mpt->sim != NULL) { 876 MPTLOCK_2_CAMLOCK(mpt); 877 xpt_free_path(mpt->path); 878 xpt_bus_deregister(cam_sim_path(mpt->sim)); 879 cam_sim_free(mpt->sim, TRUE); 880 mpt->sim = NULL; 881 CAMLOCK_2_MPTLOCK(mpt); 882 } 883 884 if (mpt->phydisk_sim != NULL) { 885 MPTLOCK_2_CAMLOCK(mpt); 886 xpt_free_path(mpt->phydisk_path); 887 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 888 cam_sim_free(mpt->phydisk_sim, TRUE); 889 mpt->phydisk_sim = NULL; 890 CAMLOCK_2_MPTLOCK(mpt); 891 } 892 } 893 894 /* This routine is used after a system crash to dump core onto the swap device. 895 */ 896 static void 897 mpt_poll(struct cam_sim *sim) 898 { 899 struct mpt_softc *mpt; 900 901 mpt = (struct mpt_softc *)cam_sim_softc(sim); 902 MPT_LOCK(mpt); 903 mpt_intr(mpt); 904 MPT_UNLOCK(mpt); 905 } 906 907 /* 908 * Watchdog timeout routine for SCSI requests. 909 */ 910 static void 911 mpt_timeout(void *arg) 912 { 913 union ccb *ccb; 914 struct mpt_softc *mpt; 915 request_t *req; 916 917 ccb = (union ccb *)arg; 918 mpt = ccb->ccb_h.ccb_mpt_ptr; 919 920 MPT_LOCK(mpt); 921 req = ccb->ccb_h.ccb_req_ptr; 922 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 923 req->serno, ccb, req->ccb); 924 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 925 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 926 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 927 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 928 req->state |= REQ_STATE_TIMEDOUT; 929 mpt_wakeup_recovery_thread(mpt); 930 } 931 MPT_UNLOCK(mpt); 932 } 933 934 /* 935 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 936 * 937 * Takes a list of physical segments and builds the SGL for SCSI IO command 938 * and forwards the commard to the IOC after one last check that CAM has not 939 * aborted the transaction. 940 */ 941 static void 942 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 943 { 944 request_t *req, *trq; 945 char *mpt_off; 946 union ccb *ccb; 947 struct mpt_softc *mpt; 948 int seg, first_lim; 949 uint32_t flags, nxt_off; 950 void *sglp = NULL; 951 MSG_REQUEST_HEADER *hdrp; 952 SGE_SIMPLE64 *se; 953 SGE_CHAIN64 *ce; 954 int istgt = 0; 955 956 req = (request_t *)arg; 957 ccb = req->ccb; 958 959 mpt = ccb->ccb_h.ccb_mpt_ptr; 960 req = ccb->ccb_h.ccb_req_ptr; 961 962 hdrp = req->req_vbuf; 963 mpt_off = req->req_vbuf; 964 965 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 966 error = EFBIG; 967 } 968 969 if (error == 0) { 970 switch (hdrp->Function) { 971 case MPI_FUNCTION_SCSI_IO_REQUEST: 972 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 973 istgt = 0; 974 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 975 break; 976 case MPI_FUNCTION_TARGET_ASSIST: 977 istgt = 1; 978 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 979 break; 980 default: 981 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 982 hdrp->Function); 983 error = EINVAL; 984 break; 985 } 986 } 987 988 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 989 error = EFBIG; 990 mpt_prt(mpt, "segment count %d too large (max %u)\n", 991 nseg, mpt->max_seg_cnt); 992 } 993 994 bad: 995 if (error != 0) { 996 if (error != EFBIG && error != ENOMEM) { 997 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 998 } 999 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1000 cam_status status; 1001 mpt_freeze_ccb(ccb); 1002 if (error == EFBIG) { 1003 status = CAM_REQ_TOO_BIG; 1004 } else if (error == ENOMEM) { 1005 if (mpt->outofbeer == 0) { 1006 mpt->outofbeer = 1; 1007 xpt_freeze_simq(mpt->sim, 1); 1008 mpt_lprt(mpt, MPT_PRT_DEBUG, 1009 "FREEZEQ\n"); 1010 } 1011 status = CAM_REQUEUE_REQ; 1012 } else { 1013 status = CAM_REQ_CMP_ERR; 1014 } 1015 mpt_set_ccb_status(ccb, status); 1016 } 1017 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1018 request_t *cmd_req = 1019 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1020 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1021 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1022 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1023 } 1024 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1025 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1026 xpt_done(ccb); 1027 CAMLOCK_2_MPTLOCK(mpt); 1028 mpt_free_request(mpt, req); 1029 MPTLOCK_2_CAMLOCK(mpt); 1030 return; 1031 } 1032 1033 /* 1034 * No data to transfer? 1035 * Just make a single simple SGL with zero length. 1036 */ 1037 1038 if (mpt->verbose >= MPT_PRT_DEBUG) { 1039 int tidx = ((char *)sglp) - mpt_off; 1040 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1041 } 1042 1043 if (nseg == 0) { 1044 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1045 MPI_pSGE_SET_FLAGS(se1, 1046 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1047 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1048 se1->FlagsLength = htole32(se1->FlagsLength); 1049 goto out; 1050 } 1051 1052 1053 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1054 if (istgt == 0) { 1055 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1056 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1057 } 1058 } else { 1059 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1060 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1061 } 1062 } 1063 1064 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1065 bus_dmasync_op_t op; 1066 if (istgt == 0) { 1067 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1068 op = BUS_DMASYNC_PREREAD; 1069 } else { 1070 op = BUS_DMASYNC_PREWRITE; 1071 } 1072 } else { 1073 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1074 op = BUS_DMASYNC_PREWRITE; 1075 } else { 1076 op = BUS_DMASYNC_PREREAD; 1077 } 1078 } 1079 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1080 } 1081 1082 /* 1083 * Okay, fill in what we can at the end of the command frame. 1084 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1085 * the command frame. 1086 * 1087 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1088 * SIMPLE64 pointers and start doing CHAIN64 entries after 1089 * that. 1090 */ 1091 1092 if (nseg < MPT_NSGL_FIRST(mpt)) { 1093 first_lim = nseg; 1094 } else { 1095 /* 1096 * Leave room for CHAIN element 1097 */ 1098 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1099 } 1100 1101 se = (SGE_SIMPLE64 *) sglp; 1102 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1103 uint32_t tf; 1104 1105 memset(se, 0, sizeof (*se)); 1106 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1107 if (sizeof(bus_addr_t) > 4) { 1108 se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32; 1109 } 1110 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1111 tf = flags; 1112 if (seg == first_lim - 1) { 1113 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1114 } 1115 if (seg == nseg - 1) { 1116 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1117 MPI_SGE_FLAGS_END_OF_BUFFER; 1118 } 1119 MPI_pSGE_SET_FLAGS(se, tf); 1120 se->FlagsLength = htole32(se->FlagsLength); 1121 } 1122 1123 if (seg == nseg) { 1124 goto out; 1125 } 1126 1127 /* 1128 * Tell the IOC where to find the first chain element. 1129 */ 1130 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1131 nxt_off = MPT_RQSL(mpt); 1132 trq = req; 1133 1134 /* 1135 * Make up the rest of the data segments out of a chain element 1136 * (contiained in the current request frame) which points to 1137 * SIMPLE64 elements in the next request frame, possibly ending 1138 * with *another* chain element (if there's more). 1139 */ 1140 while (seg < nseg) { 1141 int this_seg_lim; 1142 uint32_t tf, cur_off; 1143 bus_addr_t chain_list_addr; 1144 1145 /* 1146 * Point to the chain descriptor. Note that the chain 1147 * descriptor is at the end of the *previous* list (whether 1148 * chain or simple). 1149 */ 1150 ce = (SGE_CHAIN64 *) se; 1151 1152 /* 1153 * Before we change our current pointer, make sure we won't 1154 * overflow the request area with this frame. Note that we 1155 * test against 'greater than' here as it's okay in this case 1156 * to have next offset be just outside the request area. 1157 */ 1158 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1159 nxt_off = MPT_REQUEST_AREA; 1160 goto next_chain; 1161 } 1162 1163 /* 1164 * Set our SGE element pointer to the beginning of the chain 1165 * list and update our next chain list offset. 1166 */ 1167 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1168 cur_off = nxt_off; 1169 nxt_off += MPT_RQSL(mpt); 1170 1171 /* 1172 * Now initialized the chain descriptor. 1173 */ 1174 memset(ce, 0, sizeof (*ce)); 1175 1176 /* 1177 * Get the physical address of the chain list. 1178 */ 1179 chain_list_addr = trq->req_pbuf; 1180 chain_list_addr += cur_off; 1181 if (sizeof (bus_addr_t) > 4) { 1182 ce->Address.High = 1183 htole32((uint32_t) ((uint64_t)chain_list_addr >> 32)); 1184 } 1185 ce->Address.Low = htole32((uint32_t) chain_list_addr); 1186 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1187 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1188 1189 /* 1190 * If we have more than a frame's worth of segments left, 1191 * set up the chain list to have the last element be another 1192 * chain descriptor. 1193 */ 1194 if ((nseg - seg) > MPT_NSGL(mpt)) { 1195 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1196 /* 1197 * The length of the chain is the length in bytes of the 1198 * number of segments plus the next chain element. 1199 * 1200 * The next chain descriptor offset is the length, 1201 * in words, of the number of segments. 1202 */ 1203 ce->Length = (this_seg_lim - seg) * 1204 sizeof (SGE_SIMPLE64); 1205 ce->NextChainOffset = ce->Length >> 2; 1206 ce->Length += sizeof (SGE_CHAIN64); 1207 } else { 1208 this_seg_lim = nseg; 1209 ce->Length = (this_seg_lim - seg) * 1210 sizeof (SGE_SIMPLE64); 1211 } 1212 1213 /* 1214 * Fill in the chain list SGE elements with our segment data. 1215 * 1216 * If we're the last element in this chain list, set the last 1217 * element flag. If we're the completely last element period, 1218 * set the end of list and end of buffer flags. 1219 */ 1220 while (seg < this_seg_lim) { 1221 memset(se, 0, sizeof (*se)); 1222 se->Address.Low = htole32(dm_segs->ds_addr); 1223 if (sizeof (bus_addr_t) > 4) { 1224 se->Address.High = 1225 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1226 } 1227 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1228 tf = flags; 1229 if (seg == this_seg_lim - 1) { 1230 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1231 } 1232 if (seg == nseg - 1) { 1233 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1234 MPI_SGE_FLAGS_END_OF_BUFFER; 1235 } 1236 MPI_pSGE_SET_FLAGS(se, tf); 1237 se->FlagsLength = htole32(se->FlagsLength); 1238 se++; 1239 seg++; 1240 dm_segs++; 1241 } 1242 1243 next_chain: 1244 /* 1245 * If we have more segments to do and we've used up all of 1246 * the space in a request area, go allocate another one 1247 * and chain to that. 1248 */ 1249 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1250 request_t *nrq; 1251 1252 CAMLOCK_2_MPTLOCK(mpt); 1253 nrq = mpt_get_request(mpt, FALSE); 1254 MPTLOCK_2_CAMLOCK(mpt); 1255 1256 if (nrq == NULL) { 1257 error = ENOMEM; 1258 goto bad; 1259 } 1260 1261 /* 1262 * Append the new request area on the tail of our list. 1263 */ 1264 if ((trq = req->chain) == NULL) { 1265 req->chain = nrq; 1266 } else { 1267 while (trq->chain != NULL) { 1268 trq = trq->chain; 1269 } 1270 trq->chain = nrq; 1271 } 1272 trq = nrq; 1273 mpt_off = trq->req_vbuf; 1274 if (mpt->verbose >= MPT_PRT_DEBUG) { 1275 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1276 } 1277 nxt_off = 0; 1278 } 1279 } 1280 out: 1281 1282 /* 1283 * Last time we need to check if this CCB needs to be aborted. 1284 */ 1285 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1286 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1287 request_t *cmd_req = 1288 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1289 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1290 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1291 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1292 } 1293 mpt_prt(mpt, 1294 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1295 ccb->ccb_h.status & CAM_STATUS_MASK); 1296 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1297 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1298 } 1299 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1300 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1301 xpt_done(ccb); 1302 CAMLOCK_2_MPTLOCK(mpt); 1303 mpt_free_request(mpt, req); 1304 MPTLOCK_2_CAMLOCK(mpt); 1305 return; 1306 } 1307 1308 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1309 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1310 ccb->ccb_h.timeout_ch = 1311 timeout(mpt_timeout, (caddr_t)ccb, 1312 (ccb->ccb_h.timeout * hz) / 1000); 1313 } else { 1314 callout_handle_init(&ccb->ccb_h.timeout_ch); 1315 } 1316 if (mpt->verbose > MPT_PRT_DEBUG) { 1317 int nc = 0; 1318 mpt_print_request(req->req_vbuf); 1319 for (trq = req->chain; trq; trq = trq->chain) { 1320 printf(" Additional Chain Area %d\n", nc++); 1321 mpt_dump_sgl(trq->req_vbuf, 0); 1322 } 1323 } 1324 1325 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1326 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1327 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1328 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1329 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1330 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1331 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1332 } else { 1333 tgt->state = TGT_STATE_MOVING_DATA; 1334 } 1335 #else 1336 tgt->state = TGT_STATE_MOVING_DATA; 1337 #endif 1338 } 1339 CAMLOCK_2_MPTLOCK(mpt); 1340 mpt_send_cmd(mpt, req); 1341 MPTLOCK_2_CAMLOCK(mpt); 1342 } 1343 1344 static void 1345 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1346 { 1347 request_t *req, *trq; 1348 char *mpt_off; 1349 union ccb *ccb; 1350 struct mpt_softc *mpt; 1351 int seg, first_lim; 1352 uint32_t flags, nxt_off; 1353 void *sglp = NULL; 1354 MSG_REQUEST_HEADER *hdrp; 1355 SGE_SIMPLE32 *se; 1356 SGE_CHAIN32 *ce; 1357 int istgt = 0; 1358 1359 req = (request_t *)arg; 1360 ccb = req->ccb; 1361 1362 mpt = ccb->ccb_h.ccb_mpt_ptr; 1363 req = ccb->ccb_h.ccb_req_ptr; 1364 1365 hdrp = req->req_vbuf; 1366 mpt_off = req->req_vbuf; 1367 1368 1369 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1370 error = EFBIG; 1371 } 1372 1373 if (error == 0) { 1374 switch (hdrp->Function) { 1375 case MPI_FUNCTION_SCSI_IO_REQUEST: 1376 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1377 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1378 break; 1379 case MPI_FUNCTION_TARGET_ASSIST: 1380 istgt = 1; 1381 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1382 break; 1383 default: 1384 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1385 hdrp->Function); 1386 error = EINVAL; 1387 break; 1388 } 1389 } 1390 1391 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1392 error = EFBIG; 1393 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1394 nseg, mpt->max_seg_cnt); 1395 } 1396 1397 bad: 1398 if (error != 0) { 1399 if (error != EFBIG && error != ENOMEM) { 1400 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1401 } 1402 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1403 cam_status status; 1404 mpt_freeze_ccb(ccb); 1405 if (error == EFBIG) { 1406 status = CAM_REQ_TOO_BIG; 1407 } else if (error == ENOMEM) { 1408 if (mpt->outofbeer == 0) { 1409 mpt->outofbeer = 1; 1410 xpt_freeze_simq(mpt->sim, 1); 1411 mpt_lprt(mpt, MPT_PRT_DEBUG, 1412 "FREEZEQ\n"); 1413 } 1414 status = CAM_REQUEUE_REQ; 1415 } else { 1416 status = CAM_REQ_CMP_ERR; 1417 } 1418 mpt_set_ccb_status(ccb, status); 1419 } 1420 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1421 request_t *cmd_req = 1422 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1423 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1424 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1425 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1426 } 1427 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1428 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1429 xpt_done(ccb); 1430 CAMLOCK_2_MPTLOCK(mpt); 1431 mpt_free_request(mpt, req); 1432 MPTLOCK_2_CAMLOCK(mpt); 1433 return; 1434 } 1435 1436 /* 1437 * No data to transfer? 1438 * Just make a single simple SGL with zero length. 1439 */ 1440 1441 if (mpt->verbose >= MPT_PRT_DEBUG) { 1442 int tidx = ((char *)sglp) - mpt_off; 1443 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1444 } 1445 1446 if (nseg == 0) { 1447 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1448 MPI_pSGE_SET_FLAGS(se1, 1449 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1450 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1451 se1->FlagsLength = htole32(se1->FlagsLength); 1452 goto out; 1453 } 1454 1455 1456 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1457 if (istgt == 0) { 1458 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1459 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1460 } 1461 } else { 1462 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1463 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1464 } 1465 } 1466 1467 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1468 bus_dmasync_op_t op; 1469 if (istgt) { 1470 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1471 op = BUS_DMASYNC_PREREAD; 1472 } else { 1473 op = BUS_DMASYNC_PREWRITE; 1474 } 1475 } else { 1476 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1477 op = BUS_DMASYNC_PREWRITE; 1478 } else { 1479 op = BUS_DMASYNC_PREREAD; 1480 } 1481 } 1482 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1483 } 1484 1485 /* 1486 * Okay, fill in what we can at the end of the command frame. 1487 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1488 * the command frame. 1489 * 1490 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1491 * SIMPLE32 pointers and start doing CHAIN32 entries after 1492 * that. 1493 */ 1494 1495 if (nseg < MPT_NSGL_FIRST(mpt)) { 1496 first_lim = nseg; 1497 } else { 1498 /* 1499 * Leave room for CHAIN element 1500 */ 1501 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1502 } 1503 1504 se = (SGE_SIMPLE32 *) sglp; 1505 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1506 uint32_t tf; 1507 1508 memset(se, 0,sizeof (*se)); 1509 se->Address = dm_segs->ds_addr; 1510 1511 1512 1513 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1514 tf = flags; 1515 if (seg == first_lim - 1) { 1516 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1517 } 1518 if (seg == nseg - 1) { 1519 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1520 MPI_SGE_FLAGS_END_OF_BUFFER; 1521 } 1522 MPI_pSGE_SET_FLAGS(se, tf); 1523 se->FlagsLength = htole32(se->FlagsLength); 1524 } 1525 1526 if (seg == nseg) { 1527 goto out; 1528 } 1529 1530 /* 1531 * Tell the IOC where to find the first chain element. 1532 */ 1533 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1534 nxt_off = MPT_RQSL(mpt); 1535 trq = req; 1536 1537 /* 1538 * Make up the rest of the data segments out of a chain element 1539 * (contiained in the current request frame) which points to 1540 * SIMPLE32 elements in the next request frame, possibly ending 1541 * with *another* chain element (if there's more). 1542 */ 1543 while (seg < nseg) { 1544 int this_seg_lim; 1545 uint32_t tf, cur_off; 1546 bus_addr_t chain_list_addr; 1547 1548 /* 1549 * Point to the chain descriptor. Note that the chain 1550 * descriptor is at the end of the *previous* list (whether 1551 * chain or simple). 1552 */ 1553 ce = (SGE_CHAIN32 *) se; 1554 1555 /* 1556 * Before we change our current pointer, make sure we won't 1557 * overflow the request area with this frame. Note that we 1558 * test against 'greater than' here as it's okay in this case 1559 * to have next offset be just outside the request area. 1560 */ 1561 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1562 nxt_off = MPT_REQUEST_AREA; 1563 goto next_chain; 1564 } 1565 1566 /* 1567 * Set our SGE element pointer to the beginning of the chain 1568 * list and update our next chain list offset. 1569 */ 1570 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1571 cur_off = nxt_off; 1572 nxt_off += MPT_RQSL(mpt); 1573 1574 /* 1575 * Now initialized the chain descriptor. 1576 */ 1577 memset(ce, 0, sizeof (*ce)); 1578 1579 /* 1580 * Get the physical address of the chain list. 1581 */ 1582 chain_list_addr = trq->req_pbuf; 1583 chain_list_addr += cur_off; 1584 1585 1586 1587 ce->Address = chain_list_addr; 1588 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1589 1590 1591 /* 1592 * If we have more than a frame's worth of segments left, 1593 * set up the chain list to have the last element be another 1594 * chain descriptor. 1595 */ 1596 if ((nseg - seg) > MPT_NSGL(mpt)) { 1597 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1598 /* 1599 * The length of the chain is the length in bytes of the 1600 * number of segments plus the next chain element. 1601 * 1602 * The next chain descriptor offset is the length, 1603 * in words, of the number of segments. 1604 */ 1605 ce->Length = (this_seg_lim - seg) * 1606 sizeof (SGE_SIMPLE32); 1607 ce->NextChainOffset = ce->Length >> 2; 1608 ce->Length += sizeof (SGE_CHAIN32); 1609 } else { 1610 this_seg_lim = nseg; 1611 ce->Length = (this_seg_lim - seg) * 1612 sizeof (SGE_SIMPLE32); 1613 } 1614 1615 /* 1616 * Fill in the chain list SGE elements with our segment data. 1617 * 1618 * If we're the last element in this chain list, set the last 1619 * element flag. If we're the completely last element period, 1620 * set the end of list and end of buffer flags. 1621 */ 1622 while (seg < this_seg_lim) { 1623 memset(se, 0, sizeof (*se)); 1624 se->Address = dm_segs->ds_addr; 1625 1626 1627 1628 1629 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1630 tf = flags; 1631 if (seg == this_seg_lim - 1) { 1632 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1633 } 1634 if (seg == nseg - 1) { 1635 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1636 MPI_SGE_FLAGS_END_OF_BUFFER; 1637 } 1638 MPI_pSGE_SET_FLAGS(se, tf); 1639 se->FlagsLength = htole32(se->FlagsLength); 1640 se++; 1641 seg++; 1642 dm_segs++; 1643 } 1644 1645 next_chain: 1646 /* 1647 * If we have more segments to do and we've used up all of 1648 * the space in a request area, go allocate another one 1649 * and chain to that. 1650 */ 1651 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1652 request_t *nrq; 1653 1654 CAMLOCK_2_MPTLOCK(mpt); 1655 nrq = mpt_get_request(mpt, FALSE); 1656 MPTLOCK_2_CAMLOCK(mpt); 1657 1658 if (nrq == NULL) { 1659 error = ENOMEM; 1660 goto bad; 1661 } 1662 1663 /* 1664 * Append the new request area on the tail of our list. 1665 */ 1666 if ((trq = req->chain) == NULL) { 1667 req->chain = nrq; 1668 } else { 1669 while (trq->chain != NULL) { 1670 trq = trq->chain; 1671 } 1672 trq->chain = nrq; 1673 } 1674 trq = nrq; 1675 mpt_off = trq->req_vbuf; 1676 if (mpt->verbose >= MPT_PRT_DEBUG) { 1677 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1678 } 1679 nxt_off = 0; 1680 } 1681 } 1682 out: 1683 1684 /* 1685 * Last time we need to check if this CCB needs to be aborted. 1686 */ 1687 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1688 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1689 request_t *cmd_req = 1690 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1691 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1692 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1693 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1694 } 1695 mpt_prt(mpt, 1696 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 1697 ccb->ccb_h.status & CAM_STATUS_MASK); 1698 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1699 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1700 } 1701 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1702 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1703 xpt_done(ccb); 1704 CAMLOCK_2_MPTLOCK(mpt); 1705 mpt_free_request(mpt, req); 1706 MPTLOCK_2_CAMLOCK(mpt); 1707 return; 1708 } 1709 1710 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1711 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1712 ccb->ccb_h.timeout_ch = 1713 timeout(mpt_timeout, (caddr_t)ccb, 1714 (ccb->ccb_h.timeout * hz) / 1000); 1715 } else { 1716 callout_handle_init(&ccb->ccb_h.timeout_ch); 1717 } 1718 if (mpt->verbose > MPT_PRT_DEBUG) { 1719 int nc = 0; 1720 mpt_print_request(req->req_vbuf); 1721 for (trq = req->chain; trq; trq = trq->chain) { 1722 printf(" Additional Chain Area %d\n", nc++); 1723 mpt_dump_sgl(trq->req_vbuf, 0); 1724 } 1725 } 1726 1727 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1728 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1729 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1730 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1731 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1732 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1733 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1734 } else { 1735 tgt->state = TGT_STATE_MOVING_DATA; 1736 } 1737 #else 1738 tgt->state = TGT_STATE_MOVING_DATA; 1739 #endif 1740 } 1741 CAMLOCK_2_MPTLOCK(mpt); 1742 mpt_send_cmd(mpt, req); 1743 MPTLOCK_2_CAMLOCK(mpt); 1744 } 1745 1746 static void 1747 mpt_start(struct cam_sim *sim, union ccb *ccb) 1748 { 1749 request_t *req; 1750 struct mpt_softc *mpt; 1751 MSG_SCSI_IO_REQUEST *mpt_req; 1752 struct ccb_scsiio *csio = &ccb->csio; 1753 struct ccb_hdr *ccbh = &ccb->ccb_h; 1754 bus_dmamap_callback_t *cb; 1755 target_id_t tgt; 1756 int raid_passthru; 1757 1758 /* Get the pointer for the physical addapter */ 1759 mpt = ccb->ccb_h.ccb_mpt_ptr; 1760 raid_passthru = (sim == mpt->phydisk_sim); 1761 1762 CAMLOCK_2_MPTLOCK(mpt); 1763 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 1764 if (mpt->outofbeer == 0) { 1765 mpt->outofbeer = 1; 1766 xpt_freeze_simq(mpt->sim, 1); 1767 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 1768 } 1769 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1770 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 1771 MPTLOCK_2_CAMLOCK(mpt); 1772 xpt_done(ccb); 1773 return; 1774 } 1775 #ifdef INVARIANTS 1776 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 1777 #endif 1778 MPTLOCK_2_CAMLOCK(mpt); 1779 1780 if (sizeof (bus_addr_t) > 4) { 1781 cb = mpt_execute_req_a64; 1782 } else { 1783 cb = mpt_execute_req; 1784 } 1785 1786 /* 1787 * Link the ccb and the request structure so we can find 1788 * the other knowing either the request or the ccb 1789 */ 1790 req->ccb = ccb; 1791 ccb->ccb_h.ccb_req_ptr = req; 1792 1793 /* Now we build the command for the IOC */ 1794 mpt_req = req->req_vbuf; 1795 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 1796 1797 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 1798 if (raid_passthru) { 1799 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 1800 CAMLOCK_2_MPTLOCK(mpt); 1801 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 1802 MPTLOCK_2_CAMLOCK(mpt); 1803 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1804 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 1805 xpt_done(ccb); 1806 return; 1807 } 1808 MPTLOCK_2_CAMLOCK(mpt); 1809 mpt_req->Bus = 0; /* we never set bus here */ 1810 } else { 1811 tgt = ccb->ccb_h.target_id; 1812 mpt_req->Bus = 0; /* XXX */ 1813 1814 } 1815 mpt_req->SenseBufferLength = 1816 (csio->sense_len < MPT_SENSE_SIZE) ? 1817 csio->sense_len : MPT_SENSE_SIZE; 1818 1819 /* 1820 * We use the message context to find the request structure when we 1821 * Get the command completion interrupt from the IOC. 1822 */ 1823 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 1824 1825 /* Which physical device to do the I/O on */ 1826 mpt_req->TargetID = tgt; 1827 1828 /* We assume a single level LUN type */ 1829 if (ccb->ccb_h.target_lun >= 256) { 1830 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 1831 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 1832 } else { 1833 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 1834 } 1835 1836 /* Set the direction of the transfer */ 1837 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1838 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 1839 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1840 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 1841 } else { 1842 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 1843 } 1844 1845 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 1846 switch(ccb->csio.tag_action) { 1847 case MSG_HEAD_OF_Q_TAG: 1848 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 1849 break; 1850 case MSG_ACA_TASK: 1851 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 1852 break; 1853 case MSG_ORDERED_Q_TAG: 1854 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 1855 break; 1856 case MSG_SIMPLE_Q_TAG: 1857 default: 1858 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 1859 break; 1860 } 1861 } else { 1862 if (mpt->is_fc || mpt->is_sas) { 1863 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 1864 } else { 1865 /* XXX No such thing for a target doing packetized. */ 1866 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 1867 } 1868 } 1869 1870 if (mpt->is_spi) { 1871 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1872 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 1873 } 1874 } 1875 1876 /* Copy the scsi command block into place */ 1877 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 1878 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 1879 } else { 1880 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 1881 } 1882 1883 mpt_req->CDBLength = csio->cdb_len; 1884 mpt_req->DataLength = htole32(csio->dxfer_len); 1885 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 1886 1887 /* 1888 * Do a *short* print here if we're set to MPT_PRT_DEBUG 1889 */ 1890 if (mpt->verbose == MPT_PRT_DEBUG) { 1891 U32 df; 1892 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 1893 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 1894 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 1895 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 1896 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 1897 mpt_prtc(mpt, "(%s %u byte%s ", 1898 (df == MPI_SCSIIO_CONTROL_READ)? 1899 "read" : "write", csio->dxfer_len, 1900 (csio->dxfer_len == 1)? ")" : "s)"); 1901 } 1902 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 1903 ccb->ccb_h.target_lun, req, req->serno); 1904 } 1905 1906 /* 1907 * If we have any data to send with this command map it into bus space. 1908 */ 1909 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1910 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 1911 /* 1912 * We've been given a pointer to a single buffer. 1913 */ 1914 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 1915 /* 1916 * Virtual address that needs to translated into 1917 * one or more physical address ranges. 1918 */ 1919 int error; 1920 int s = splsoftvm(); 1921 error = bus_dmamap_load(mpt->buffer_dmat, 1922 req->dmap, csio->data_ptr, csio->dxfer_len, 1923 cb, req, 0); 1924 splx(s); 1925 if (error == EINPROGRESS) { 1926 /* 1927 * So as to maintain ordering, 1928 * freeze the controller queue 1929 * until our mapping is 1930 * returned. 1931 */ 1932 xpt_freeze_simq(mpt->sim, 1); 1933 ccbh->status |= CAM_RELEASE_SIMQ; 1934 } 1935 } else { 1936 /* 1937 * We have been given a pointer to single 1938 * physical buffer. 1939 */ 1940 struct bus_dma_segment seg; 1941 seg.ds_addr = 1942 (bus_addr_t)(vm_offset_t)csio->data_ptr; 1943 seg.ds_len = csio->dxfer_len; 1944 (*cb)(req, &seg, 1, 0); 1945 } 1946 } else { 1947 /* 1948 * We have been given a list of addresses. 1949 * This case could be easily supported but they are not 1950 * currently generated by the CAM subsystem so there 1951 * is no point in wasting the time right now. 1952 */ 1953 struct bus_dma_segment *segs; 1954 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 1955 (*cb)(req, NULL, 0, EFAULT); 1956 } else { 1957 /* Just use the segments provided */ 1958 segs = (struct bus_dma_segment *)csio->data_ptr; 1959 (*cb)(req, segs, csio->sglist_cnt, 0); 1960 } 1961 } 1962 } else { 1963 (*cb)(req, NULL, 0, 0); 1964 } 1965 } 1966 1967 static int 1968 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 1969 int sleep_ok) 1970 { 1971 int error; 1972 uint16_t status; 1973 uint8_t response; 1974 1975 error = mpt_scsi_send_tmf(mpt, 1976 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 1977 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 1978 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1979 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 1980 0, /* XXX How do I get the channel ID? */ 1981 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 1982 lun != CAM_LUN_WILDCARD ? lun : 0, 1983 0, sleep_ok); 1984 1985 if (error != 0) { 1986 /* 1987 * mpt_scsi_send_tmf hard resets on failure, so no 1988 * need to do so here. 1989 */ 1990 mpt_prt(mpt, 1991 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 1992 return (EIO); 1993 } 1994 1995 /* Wait for bus reset to be processed by the IOC. */ 1996 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 1997 REQ_STATE_DONE, sleep_ok, 5000); 1998 1999 status = mpt->tmf_req->IOCStatus; 2000 response = mpt->tmf_req->ResponseCode; 2001 mpt->tmf_req->state = REQ_STATE_FREE; 2002 2003 if (error) { 2004 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2005 "Resetting controller.\n"); 2006 mpt_reset(mpt, TRUE); 2007 return (ETIMEDOUT); 2008 } 2009 2010 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2011 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2012 "Resetting controller.\n", status); 2013 mpt_reset(mpt, TRUE); 2014 return (EIO); 2015 } 2016 2017 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2018 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2019 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2020 "Resetting controller.\n", response); 2021 mpt_reset(mpt, TRUE); 2022 return (EIO); 2023 } 2024 return (0); 2025 } 2026 2027 static int 2028 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2029 { 2030 int r = 0; 2031 request_t *req; 2032 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2033 2034 req = mpt_get_request(mpt, FALSE); 2035 if (req == NULL) { 2036 return (ENOMEM); 2037 } 2038 fc = req->req_vbuf; 2039 memset(fc, 0, sizeof(*fc)); 2040 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2041 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2042 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2043 mpt_send_cmd(mpt, req); 2044 if (dowait) { 2045 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2046 REQ_STATE_DONE, FALSE, 60 * 1000); 2047 if (r == 0) { 2048 mpt_free_request(mpt, req); 2049 } 2050 } 2051 return (r); 2052 } 2053 2054 static int 2055 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2056 MSG_EVENT_NOTIFY_REPLY *msg) 2057 { 2058 uint32_t data0, data1; 2059 2060 data0 = le32toh(msg->Data[0]); 2061 data1 = le32toh(msg->Data[1]); 2062 switch(msg->Event & 0xFF) { 2063 case MPI_EVENT_UNIT_ATTENTION: 2064 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2065 (data0 >> 8) & 0xff, data0 & 0xff); 2066 break; 2067 2068 case MPI_EVENT_IOC_BUS_RESET: 2069 /* We generated a bus reset */ 2070 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2071 (data0 >> 8) & 0xff); 2072 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2073 break; 2074 2075 case MPI_EVENT_EXT_BUS_RESET: 2076 /* Someone else generated a bus reset */ 2077 mpt_prt(mpt, "External Bus Reset Detected\n"); 2078 /* 2079 * These replies don't return EventData like the MPI 2080 * spec says they do 2081 */ 2082 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2083 break; 2084 2085 case MPI_EVENT_RESCAN: 2086 #if __FreeBSD_version >= 600000 2087 { 2088 union ccb *ccb; 2089 uint32_t pathid; 2090 struct cam_sim *sim; 2091 /* 2092 * In general this means a device has been added to the loop. 2093 */ 2094 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2095 if (mpt->ready == 0) { 2096 break; 2097 } 2098 if (mpt->phydisk_sim) { 2099 sim = mpt->phydisk_sim; 2100 } else { 2101 sim = mpt->sim; 2102 } 2103 pathid = cam_sim_path(sim); 2104 MPTLOCK_2_CAMLOCK(mpt); 2105 /* 2106 * Allocate a CCB, create a wildcard path for this bus, 2107 * and schedule a rescan. 2108 */ 2109 ccb = xpt_alloc_ccb_nowait(sim); 2110 if (ccb == NULL) { 2111 mpt_prt(mpt, "unable to alloc CCB for rescan\n"); 2112 CAMLOCK_2_MPTLOCK(mpt); 2113 break; 2114 } 2115 2116 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 2117 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2118 CAMLOCK_2_MPTLOCK(mpt); 2119 mpt_prt(mpt, "unable to create path for rescan\n"); 2120 xpt_free_ccb(ccb); 2121 break; 2122 } 2123 xpt_rescan(ccb); 2124 CAMLOCK_2_MPTLOCK(mpt); 2125 break; 2126 } 2127 #else 2128 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2129 break; 2130 #endif 2131 case MPI_EVENT_LINK_STATUS_CHANGE: 2132 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2133 (data1 >> 8) & 0xff, 2134 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2135 break; 2136 2137 case MPI_EVENT_LOOP_STATE_CHANGE: 2138 switch ((data0 >> 16) & 0xff) { 2139 case 0x01: 2140 mpt_prt(mpt, 2141 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2142 "(Loop Initialization)\n", 2143 (data1 >> 8) & 0xff, 2144 (data0 >> 8) & 0xff, 2145 (data0 ) & 0xff); 2146 switch ((data0 >> 8) & 0xff) { 2147 case 0xF7: 2148 if ((data0 & 0xff) == 0xF7) { 2149 mpt_prt(mpt, "Device needs AL_PA\n"); 2150 } else { 2151 mpt_prt(mpt, "Device %02x doesn't like " 2152 "FC performance\n", 2153 data0 & 0xFF); 2154 } 2155 break; 2156 case 0xF8: 2157 if ((data0 & 0xff) == 0xF7) { 2158 mpt_prt(mpt, "Device had loop failure " 2159 "at its receiver prior to acquiring" 2160 " AL_PA\n"); 2161 } else { 2162 mpt_prt(mpt, "Device %02x detected loop" 2163 " failure at its receiver\n", 2164 data0 & 0xFF); 2165 } 2166 break; 2167 default: 2168 mpt_prt(mpt, "Device %02x requests that device " 2169 "%02x reset itself\n", 2170 data0 & 0xFF, 2171 (data0 >> 8) & 0xFF); 2172 break; 2173 } 2174 break; 2175 case 0x02: 2176 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2177 "LPE(%02x,%02x) (Loop Port Enable)\n", 2178 (data1 >> 8) & 0xff, /* Port */ 2179 (data0 >> 8) & 0xff, /* Character 3 */ 2180 (data0 ) & 0xff /* Character 4 */); 2181 break; 2182 case 0x03: 2183 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2184 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2185 (data1 >> 8) & 0xff, /* Port */ 2186 (data0 >> 8) & 0xff, /* Character 3 */ 2187 (data0 ) & 0xff /* Character 4 */); 2188 break; 2189 default: 2190 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2191 "FC event (%02x %02x %02x)\n", 2192 (data1 >> 8) & 0xff, /* Port */ 2193 (data0 >> 16) & 0xff, /* Event */ 2194 (data0 >> 8) & 0xff, /* Character 3 */ 2195 (data0 ) & 0xff /* Character 4 */); 2196 } 2197 break; 2198 2199 case MPI_EVENT_LOGOUT: 2200 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2201 (data1 >> 8) & 0xff, data0); 2202 break; 2203 case MPI_EVENT_QUEUE_FULL: 2204 { 2205 struct cam_sim *sim; 2206 struct cam_path *tmppath; 2207 struct ccb_relsim crs; 2208 PTR_EVENT_DATA_QUEUE_FULL pqf = 2209 (PTR_EVENT_DATA_QUEUE_FULL) msg->Data; 2210 lun_id_t lun_id; 2211 2212 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2213 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2214 if (mpt->phydisk_sim) { 2215 sim = mpt->phydisk_sim; 2216 } else { 2217 sim = mpt->sim; 2218 } 2219 MPTLOCK_2_CAMLOCK(mpt); 2220 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2221 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2222 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2223 mpt_prt(mpt, "unable to create a path to send " 2224 "XPT_REL_SIMQ"); 2225 CAMLOCK_2_MPTLOCK(mpt); 2226 break; 2227 } 2228 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2229 crs.ccb_h.func_code = XPT_REL_SIMQ; 2230 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2231 crs.openings = pqf->CurrentDepth - 1; 2232 xpt_action((union ccb *)&crs); 2233 if (crs.ccb_h.status != CAM_REQ_CMP) { 2234 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2235 } 2236 xpt_free_path(tmppath); 2237 } 2238 CAMLOCK_2_MPTLOCK(mpt); 2239 break; 2240 } 2241 case MPI_EVENT_EVENT_CHANGE: 2242 case MPI_EVENT_INTEGRATED_RAID: 2243 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2244 case MPI_EVENT_SAS_SES: 2245 break; 2246 default: 2247 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2248 msg->Event & 0xFF); 2249 return (0); 2250 } 2251 return (1); 2252 } 2253 2254 /* 2255 * Reply path for all SCSI I/O requests, called from our 2256 * interrupt handler by extracting our handler index from 2257 * the MsgContext field of the reply from the IOC. 2258 * 2259 * This routine is optimized for the common case of a 2260 * completion without error. All exception handling is 2261 * offloaded to non-inlined helper routines to minimize 2262 * cache footprint. 2263 */ 2264 static int 2265 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2266 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2267 { 2268 MSG_SCSI_IO_REQUEST *scsi_req; 2269 union ccb *ccb; 2270 target_id_t tgt; 2271 2272 if (req->state == REQ_STATE_FREE) { 2273 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2274 return (TRUE); 2275 } 2276 2277 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2278 ccb = req->ccb; 2279 if (ccb == NULL) { 2280 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2281 req, req->serno); 2282 return (TRUE); 2283 } 2284 2285 tgt = scsi_req->TargetID; 2286 untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch); 2287 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2288 2289 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2290 bus_dmasync_op_t op; 2291 2292 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2293 op = BUS_DMASYNC_POSTREAD; 2294 else 2295 op = BUS_DMASYNC_POSTWRITE; 2296 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2297 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2298 } 2299 2300 if (reply_frame == NULL) { 2301 /* 2302 * Context only reply, completion without error status. 2303 */ 2304 ccb->csio.resid = 0; 2305 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2306 ccb->csio.scsi_status = SCSI_STATUS_OK; 2307 } else { 2308 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2309 } 2310 2311 if (mpt->outofbeer) { 2312 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2313 mpt->outofbeer = 0; 2314 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2315 } 2316 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2317 struct scsi_inquiry_data *iq = 2318 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2319 if (scsi_req->Function == 2320 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2321 /* 2322 * Fake out the device type so that only the 2323 * pass-thru device will attach. 2324 */ 2325 iq->device &= ~0x1F; 2326 iq->device |= T_NODEVICE; 2327 } 2328 } 2329 if (mpt->verbose == MPT_PRT_DEBUG) { 2330 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2331 req, req->serno); 2332 } 2333 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2334 MPTLOCK_2_CAMLOCK(mpt); 2335 xpt_done(ccb); 2336 CAMLOCK_2_MPTLOCK(mpt); 2337 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2338 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2339 } else { 2340 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2341 req, req->serno); 2342 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2343 } 2344 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2345 ("CCB req needed wakeup")); 2346 #ifdef INVARIANTS 2347 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2348 #endif 2349 mpt_free_request(mpt, req); 2350 return (TRUE); 2351 } 2352 2353 static int 2354 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2355 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2356 { 2357 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2358 2359 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2360 #ifdef INVARIANTS 2361 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2362 #endif 2363 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2364 /* Record IOC Status and Response Code of TMF for any waiters. */ 2365 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2366 req->ResponseCode = tmf_reply->ResponseCode; 2367 2368 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2369 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2370 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2371 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2372 req->state |= REQ_STATE_DONE; 2373 wakeup(req); 2374 } else { 2375 mpt->tmf_req->state = REQ_STATE_FREE; 2376 } 2377 return (TRUE); 2378 } 2379 2380 /* 2381 * XXX: Move to definitions file 2382 */ 2383 #define ELS 0x22 2384 #define FC4LS 0x32 2385 #define ABTS 0x81 2386 #define BA_ACC 0x84 2387 2388 #define LS_RJT 0x01 2389 #define LS_ACC 0x02 2390 #define PLOGI 0x03 2391 #define LOGO 0x05 2392 #define SRR 0x14 2393 #define PRLI 0x20 2394 #define PRLO 0x21 2395 #define ADISC 0x52 2396 #define RSCN 0x61 2397 2398 static void 2399 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2400 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2401 { 2402 uint32_t fl; 2403 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2404 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2405 2406 /* 2407 * We are going to reuse the ELS request to send this response back. 2408 */ 2409 rsp = &tmp; 2410 memset(rsp, 0, sizeof(*rsp)); 2411 2412 #ifdef USE_IMMEDIATE_LINK_DATA 2413 /* 2414 * Apparently the IMMEDIATE stuff doesn't seem to work. 2415 */ 2416 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2417 #endif 2418 rsp->RspLength = length; 2419 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2420 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2421 2422 /* 2423 * Copy over information from the original reply frame to 2424 * it's correct place in the response. 2425 */ 2426 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2427 2428 /* 2429 * And now copy back the temporary area to the original frame. 2430 */ 2431 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2432 rsp = req->req_vbuf; 2433 2434 #ifdef USE_IMMEDIATE_LINK_DATA 2435 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2436 #else 2437 { 2438 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2439 bus_addr_t paddr = req->req_pbuf; 2440 paddr += MPT_RQSL(mpt); 2441 2442 fl = 2443 MPI_SGE_FLAGS_HOST_TO_IOC | 2444 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2445 MPI_SGE_FLAGS_LAST_ELEMENT | 2446 MPI_SGE_FLAGS_END_OF_LIST | 2447 MPI_SGE_FLAGS_END_OF_BUFFER; 2448 fl <<= MPI_SGE_FLAGS_SHIFT; 2449 fl |= (length); 2450 se->FlagsLength = htole32(fl); 2451 se->Address = htole32((uint32_t) paddr); 2452 } 2453 #endif 2454 2455 /* 2456 * Send it on... 2457 */ 2458 mpt_send_cmd(mpt, req); 2459 } 2460 2461 static int 2462 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2463 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2464 { 2465 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2466 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2467 U8 rctl; 2468 U8 type; 2469 U8 cmd; 2470 U16 status = le16toh(reply_frame->IOCStatus); 2471 U32 *elsbuf; 2472 int ioindex; 2473 int do_refresh = TRUE; 2474 2475 #ifdef INVARIANTS 2476 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2477 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2478 req, req->serno, rp->Function)); 2479 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2480 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2481 } else { 2482 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2483 } 2484 #endif 2485 mpt_lprt(mpt, MPT_PRT_DEBUG, 2486 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2487 req, req->serno, reply_frame, reply_frame->Function); 2488 2489 if (status != MPI_IOCSTATUS_SUCCESS) { 2490 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2491 status, reply_frame->Function); 2492 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2493 /* 2494 * XXX: to get around shutdown issue 2495 */ 2496 mpt->disabled = 1; 2497 return (TRUE); 2498 } 2499 return (TRUE); 2500 } 2501 2502 /* 2503 * If the function of a link service response, we recycle the 2504 * response to be a refresh for a new link service request. 2505 * 2506 * The request pointer is bogus in this case and we have to fetch 2507 * it based upon the TransactionContext. 2508 */ 2509 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2510 /* Freddie Uncle Charlie Katie */ 2511 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2512 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2513 if (mpt->els_cmd_ptrs[ioindex] == req) { 2514 break; 2515 } 2516 2517 KASSERT(ioindex < mpt->els_cmds_allocated, 2518 ("can't find my mommie!")); 2519 2520 /* remove from active list as we're going to re-post it */ 2521 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2522 req->state &= ~REQ_STATE_QUEUED; 2523 req->state |= REQ_STATE_DONE; 2524 mpt_fc_post_els(mpt, req, ioindex); 2525 return (TRUE); 2526 } 2527 2528 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2529 /* remove from active list as we're done */ 2530 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2531 req->state &= ~REQ_STATE_QUEUED; 2532 req->state |= REQ_STATE_DONE; 2533 if (req->state & REQ_STATE_TIMEDOUT) { 2534 mpt_lprt(mpt, MPT_PRT_DEBUG, 2535 "Sync Primitive Send Completed After Timeout\n"); 2536 mpt_free_request(mpt, req); 2537 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2538 mpt_lprt(mpt, MPT_PRT_DEBUG, 2539 "Async Primitive Send Complete\n"); 2540 mpt_free_request(mpt, req); 2541 } else { 2542 mpt_lprt(mpt, MPT_PRT_DEBUG, 2543 "Sync Primitive Send Complete- Waking Waiter\n"); 2544 wakeup(req); 2545 } 2546 return (TRUE); 2547 } 2548 2549 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2550 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2551 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2552 rp->MsgLength, rp->MsgFlags); 2553 return (TRUE); 2554 } 2555 2556 if (rp->MsgLength <= 5) { 2557 /* 2558 * This is just a ack of an original ELS buffer post 2559 */ 2560 mpt_lprt(mpt, MPT_PRT_DEBUG, 2561 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2562 return (TRUE); 2563 } 2564 2565 2566 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2567 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2568 2569 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2570 cmd = be32toh(elsbuf[0]) >> 24; 2571 2572 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2573 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2574 return (TRUE); 2575 } 2576 2577 ioindex = le32toh(rp->TransactionContext); 2578 req = mpt->els_cmd_ptrs[ioindex]; 2579 2580 if (rctl == ELS && type == 1) { 2581 switch (cmd) { 2582 case PRLI: 2583 /* 2584 * Send back a PRLI ACC 2585 */ 2586 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2587 le32toh(rp->Wwn.PortNameHigh), 2588 le32toh(rp->Wwn.PortNameLow)); 2589 elsbuf[0] = htobe32(0x02100014); 2590 elsbuf[1] |= htobe32(0x00000100); 2591 elsbuf[4] = htobe32(0x00000002); 2592 if (mpt->role & MPT_ROLE_TARGET) 2593 elsbuf[4] |= htobe32(0x00000010); 2594 if (mpt->role & MPT_ROLE_INITIATOR) 2595 elsbuf[4] |= htobe32(0x00000020); 2596 /* remove from active list as we're done */ 2597 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2598 req->state &= ~REQ_STATE_QUEUED; 2599 req->state |= REQ_STATE_DONE; 2600 mpt_fc_els_send_response(mpt, req, rp, 20); 2601 do_refresh = FALSE; 2602 break; 2603 case PRLO: 2604 memset(elsbuf, 0, 5 * (sizeof (U32))); 2605 elsbuf[0] = htobe32(0x02100014); 2606 elsbuf[1] = htobe32(0x08000100); 2607 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2608 le32toh(rp->Wwn.PortNameHigh), 2609 le32toh(rp->Wwn.PortNameLow)); 2610 /* remove from active list as we're done */ 2611 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2612 req->state &= ~REQ_STATE_QUEUED; 2613 req->state |= REQ_STATE_DONE; 2614 mpt_fc_els_send_response(mpt, req, rp, 20); 2615 do_refresh = FALSE; 2616 break; 2617 default: 2618 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2619 break; 2620 } 2621 } else if (rctl == ABTS && type == 0) { 2622 uint16_t rx_id = le16toh(rp->Rxid); 2623 uint16_t ox_id = le16toh(rp->Oxid); 2624 request_t *tgt_req = NULL; 2625 2626 mpt_prt(mpt, 2627 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2628 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2629 le32toh(rp->Wwn.PortNameLow)); 2630 if (rx_id >= mpt->mpt_max_tgtcmds) { 2631 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2632 } else if (mpt->tgt_cmd_ptrs == NULL) { 2633 mpt_prt(mpt, "No TGT CMD PTRS\n"); 2634 } else { 2635 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 2636 } 2637 if (tgt_req) { 2638 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 2639 uint8_t *vbuf; 2640 union ccb *ccb = tgt->ccb; 2641 uint32_t ct_id; 2642 2643 vbuf = tgt_req->req_vbuf; 2644 vbuf += MPT_RQSL(mpt); 2645 2646 /* 2647 * Check to make sure we have the correct command 2648 * The reply descriptor in the target state should 2649 * should contain an IoIndex that should match the 2650 * RX_ID. 2651 * 2652 * It'd be nice to have OX_ID to crosscheck with 2653 * as well. 2654 */ 2655 ct_id = GET_IO_INDEX(tgt->reply_desc); 2656 2657 if (ct_id != rx_id) { 2658 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2659 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 2660 rx_id, ct_id); 2661 goto skip; 2662 } 2663 2664 ccb = tgt->ccb; 2665 if (ccb) { 2666 mpt_prt(mpt, 2667 "CCB (%p): lun %u flags %x status %x\n", 2668 ccb, ccb->ccb_h.target_lun, 2669 ccb->ccb_h.flags, ccb->ccb_h.status); 2670 } 2671 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 2672 "%x nxfers %x\n", tgt->state, 2673 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 2674 tgt->nxfers); 2675 skip: 2676 if (mpt_abort_target_cmd(mpt, tgt_req)) { 2677 mpt_prt(mpt, "unable to start TargetAbort\n"); 2678 } 2679 } else { 2680 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 2681 } 2682 memset(elsbuf, 0, 5 * (sizeof (U32))); 2683 elsbuf[0] = htobe32(0); 2684 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 2685 elsbuf[2] = htobe32(0x000ffff); 2686 /* 2687 * Dork with the reply frame so that the reponse to it 2688 * will be correct. 2689 */ 2690 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 2691 /* remove from active list as we're done */ 2692 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2693 req->state &= ~REQ_STATE_QUEUED; 2694 req->state |= REQ_STATE_DONE; 2695 mpt_fc_els_send_response(mpt, req, rp, 12); 2696 do_refresh = FALSE; 2697 } else { 2698 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 2699 } 2700 if (do_refresh == TRUE) { 2701 /* remove from active list as we're done */ 2702 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2703 req->state &= ~REQ_STATE_QUEUED; 2704 req->state |= REQ_STATE_DONE; 2705 mpt_fc_post_els(mpt, req, ioindex); 2706 } 2707 return (TRUE); 2708 } 2709 2710 /* 2711 * Clean up all SCSI Initiator personality state in response 2712 * to a controller reset. 2713 */ 2714 static void 2715 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 2716 { 2717 /* 2718 * The pending list is already run down by 2719 * the generic handler. Perform the same 2720 * operation on the timed out request list. 2721 */ 2722 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 2723 MPI_IOCSTATUS_INVALID_STATE); 2724 2725 /* 2726 * XXX: We need to repost ELS and Target Command Buffers? 2727 */ 2728 2729 /* 2730 * Inform the XPT that a bus reset has occurred. 2731 */ 2732 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2733 } 2734 2735 /* 2736 * Parse additional completion information in the reply 2737 * frame for SCSI I/O requests. 2738 */ 2739 static int 2740 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 2741 MSG_DEFAULT_REPLY *reply_frame) 2742 { 2743 union ccb *ccb; 2744 MSG_SCSI_IO_REPLY *scsi_io_reply; 2745 u_int ioc_status; 2746 u_int sstate; 2747 u_int loginfo; 2748 2749 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 2750 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 2751 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 2752 ("MPT SCSI I/O Handler called with incorrect reply type")); 2753 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 2754 ("MPT SCSI I/O Handler called with continuation reply")); 2755 2756 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 2757 ioc_status = le16toh(scsi_io_reply->IOCStatus); 2758 loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE; 2759 ioc_status &= MPI_IOCSTATUS_MASK; 2760 sstate = scsi_io_reply->SCSIState; 2761 2762 ccb = req->ccb; 2763 ccb->csio.resid = 2764 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 2765 2766 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 2767 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 2768 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2769 ccb->csio.sense_resid = 2770 ccb->csio.sense_len - scsi_io_reply->SenseCount; 2771 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 2772 min(ccb->csio.sense_len, scsi_io_reply->SenseCount)); 2773 } 2774 2775 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 2776 /* 2777 * Tag messages rejected, but non-tagged retry 2778 * was successful. 2779 XXXX 2780 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 2781 */ 2782 } 2783 2784 switch(ioc_status) { 2785 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2786 /* 2787 * XXX 2788 * Linux driver indicates that a zero 2789 * transfer length with this error code 2790 * indicates a CRC error. 2791 * 2792 * No need to swap the bytes for checking 2793 * against zero. 2794 */ 2795 if (scsi_io_reply->TransferCount == 0) { 2796 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 2797 break; 2798 } 2799 /* FALLTHROUGH */ 2800 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 2801 case MPI_IOCSTATUS_SUCCESS: 2802 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 2803 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 2804 /* 2805 * Status was never returned for this transaction. 2806 */ 2807 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 2808 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 2809 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 2810 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 2811 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 2812 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 2813 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 2814 2815 /* XXX Handle SPI-Packet and FCP-2 reponse info. */ 2816 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 2817 } else 2818 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2819 break; 2820 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 2821 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 2822 break; 2823 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 2824 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 2825 break; 2826 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2827 /* 2828 * Since selection timeouts and "device really not 2829 * there" are grouped into this error code, report 2830 * selection timeout. Selection timeouts are 2831 * typically retried before giving up on the device 2832 * whereas "device not there" errors are considered 2833 * unretryable. 2834 */ 2835 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 2836 break; 2837 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2838 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 2839 break; 2840 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 2841 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 2842 break; 2843 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 2844 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 2845 break; 2846 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2847 ccb->ccb_h.status = CAM_UA_TERMIO; 2848 break; 2849 case MPI_IOCSTATUS_INVALID_STATE: 2850 /* 2851 * The IOC has been reset. Emulate a bus reset. 2852 */ 2853 /* FALLTHROUGH */ 2854 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 2855 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2856 break; 2857 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 2858 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 2859 /* 2860 * Don't clobber any timeout status that has 2861 * already been set for this transaction. We 2862 * want the SCSI layer to be able to differentiate 2863 * between the command we aborted due to timeout 2864 * and any innocent bystanders. 2865 */ 2866 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 2867 break; 2868 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 2869 break; 2870 2871 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 2872 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 2873 break; 2874 case MPI_IOCSTATUS_BUSY: 2875 mpt_set_ccb_status(ccb, CAM_BUSY); 2876 break; 2877 case MPI_IOCSTATUS_INVALID_FUNCTION: 2878 case MPI_IOCSTATUS_INVALID_SGL: 2879 case MPI_IOCSTATUS_INTERNAL_ERROR: 2880 case MPI_IOCSTATUS_INVALID_FIELD: 2881 default: 2882 /* XXX 2883 * Some of the above may need to kick 2884 * of a recovery action!!!! 2885 */ 2886 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 2887 break; 2888 } 2889 2890 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2891 mpt_freeze_ccb(ccb); 2892 } 2893 2894 return (TRUE); 2895 } 2896 2897 static void 2898 mpt_action(struct cam_sim *sim, union ccb *ccb) 2899 { 2900 struct mpt_softc *mpt; 2901 struct ccb_trans_settings *cts; 2902 target_id_t tgt; 2903 lun_id_t lun; 2904 int raid_passthru; 2905 2906 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 2907 2908 mpt = (struct mpt_softc *)cam_sim_softc(sim); 2909 KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action")); 2910 raid_passthru = (sim == mpt->phydisk_sim); 2911 2912 tgt = ccb->ccb_h.target_id; 2913 lun = ccb->ccb_h.target_lun; 2914 if (raid_passthru && 2915 ccb->ccb_h.func_code != XPT_PATH_INQ && 2916 ccb->ccb_h.func_code != XPT_RESET_BUS && 2917 ccb->ccb_h.func_code != XPT_RESET_DEV) { 2918 CAMLOCK_2_MPTLOCK(mpt); 2919 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2920 MPTLOCK_2_CAMLOCK(mpt); 2921 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2922 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2923 xpt_done(ccb); 2924 return; 2925 } 2926 MPTLOCK_2_CAMLOCK(mpt); 2927 } 2928 ccb->ccb_h.ccb_mpt_ptr = mpt; 2929 2930 switch (ccb->ccb_h.func_code) { 2931 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2932 /* 2933 * Do a couple of preliminary checks... 2934 */ 2935 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2936 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2937 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2938 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 2939 break; 2940 } 2941 } 2942 /* Max supported CDB length is 16 bytes */ 2943 /* XXX Unless we implement the new 32byte message type */ 2944 if (ccb->csio.cdb_len > 2945 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 2946 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2947 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 2948 break; 2949 } 2950 #ifdef MPT_TEST_MULTIPATH 2951 if (mpt->failure_id == ccb->ccb_h.target_id) { 2952 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2953 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 2954 break; 2955 } 2956 #endif 2957 ccb->csio.scsi_status = SCSI_STATUS_OK; 2958 mpt_start(sim, ccb); 2959 return; 2960 2961 case XPT_RESET_BUS: 2962 if (raid_passthru) { 2963 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2964 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2965 break; 2966 } 2967 case XPT_RESET_DEV: 2968 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 2969 if (bootverbose) { 2970 xpt_print(ccb->ccb_h.path, "reset bus\n"); 2971 } 2972 } else { 2973 xpt_print(ccb->ccb_h.path, "reset device\n"); 2974 } 2975 CAMLOCK_2_MPTLOCK(mpt); 2976 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 2977 MPTLOCK_2_CAMLOCK(mpt); 2978 2979 /* 2980 * mpt_bus_reset is always successful in that it 2981 * will fall back to a hard reset should a bus 2982 * reset attempt fail. 2983 */ 2984 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2985 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2986 break; 2987 2988 case XPT_ABORT: 2989 { 2990 union ccb *accb = ccb->cab.abort_ccb; 2991 CAMLOCK_2_MPTLOCK(mpt); 2992 switch (accb->ccb_h.func_code) { 2993 case XPT_ACCEPT_TARGET_IO: 2994 case XPT_IMMED_NOTIFY: 2995 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 2996 break; 2997 case XPT_CONT_TARGET_IO: 2998 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 2999 ccb->ccb_h.status = CAM_UA_ABORT; 3000 break; 3001 case XPT_SCSI_IO: 3002 ccb->ccb_h.status = CAM_UA_ABORT; 3003 break; 3004 default: 3005 ccb->ccb_h.status = CAM_REQ_INVALID; 3006 break; 3007 } 3008 MPTLOCK_2_CAMLOCK(mpt); 3009 break; 3010 } 3011 3012 #ifdef CAM_NEW_TRAN_CODE 3013 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3014 #else 3015 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) 3016 #endif 3017 #define DP_DISC_ENABLE 0x1 3018 #define DP_DISC_DISABL 0x2 3019 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3020 3021 #define DP_TQING_ENABLE 0x4 3022 #define DP_TQING_DISABL 0x8 3023 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3024 3025 #define DP_WIDE 0x10 3026 #define DP_NARROW 0x20 3027 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3028 3029 #define DP_SYNC 0x40 3030 3031 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3032 { 3033 #ifdef CAM_NEW_TRAN_CODE 3034 struct ccb_trans_settings_scsi *scsi; 3035 struct ccb_trans_settings_spi *spi; 3036 #endif 3037 uint8_t dval; 3038 u_int period; 3039 u_int offset; 3040 int i, j; 3041 3042 cts = &ccb->cts; 3043 3044 if (mpt->is_fc || mpt->is_sas) { 3045 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3046 break; 3047 } 3048 3049 #ifdef CAM_NEW_TRAN_CODE 3050 scsi = &cts->proto_specific.scsi; 3051 spi = &cts->xport_specific.spi; 3052 3053 /* 3054 * We can be called just to valid transport and proto versions 3055 */ 3056 if (scsi->valid == 0 && spi->valid == 0) { 3057 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3058 break; 3059 } 3060 #endif 3061 3062 /* 3063 * Skip attempting settings on RAID volume disks. 3064 * Other devices on the bus get the normal treatment. 3065 */ 3066 if (mpt->phydisk_sim && raid_passthru == 0 && 3067 mpt_is_raid_volume(mpt, tgt) != 0) { 3068 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3069 "no transfer settings for RAID vols\n"); 3070 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3071 break; 3072 } 3073 3074 i = mpt->mpt_port_page2.PortSettings & 3075 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3076 j = mpt->mpt_port_page2.PortFlags & 3077 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3078 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3079 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3080 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3081 "honoring BIOS transfer negotiations\n"); 3082 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3083 break; 3084 } 3085 3086 dval = 0; 3087 period = 0; 3088 offset = 0; 3089 3090 #ifndef CAM_NEW_TRAN_CODE 3091 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 3092 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? 3093 DP_DISC_ENABLE : DP_DISC_DISABL; 3094 } 3095 3096 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 3097 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? 3098 DP_TQING_ENABLE : DP_TQING_DISABL; 3099 } 3100 3101 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 3102 dval |= cts->bus_width ? DP_WIDE : DP_NARROW; 3103 } 3104 3105 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 3106 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { 3107 dval |= DP_SYNC; 3108 period = cts->sync_period; 3109 offset = cts->sync_offset; 3110 } 3111 #else 3112 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3113 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3114 DP_DISC_ENABLE : DP_DISC_DISABL; 3115 } 3116 3117 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3118 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3119 DP_TQING_ENABLE : DP_TQING_DISABL; 3120 } 3121 3122 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3123 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3124 DP_WIDE : DP_NARROW; 3125 } 3126 3127 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3128 dval |= DP_SYNC; 3129 offset = spi->sync_offset; 3130 } else { 3131 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3132 &mpt->mpt_dev_page1[tgt]; 3133 offset = ptr->RequestedParameters; 3134 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3135 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3136 } 3137 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3138 dval |= DP_SYNC; 3139 period = spi->sync_period; 3140 } else { 3141 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3142 &mpt->mpt_dev_page1[tgt]; 3143 period = ptr->RequestedParameters; 3144 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3145 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3146 } 3147 #endif 3148 CAMLOCK_2_MPTLOCK(mpt); 3149 if (dval & DP_DISC_ENABLE) { 3150 mpt->mpt_disc_enable |= (1 << tgt); 3151 } else if (dval & DP_DISC_DISABL) { 3152 mpt->mpt_disc_enable &= ~(1 << tgt); 3153 } 3154 if (dval & DP_TQING_ENABLE) { 3155 mpt->mpt_tag_enable |= (1 << tgt); 3156 } else if (dval & DP_TQING_DISABL) { 3157 mpt->mpt_tag_enable &= ~(1 << tgt); 3158 } 3159 if (dval & DP_WIDTH) { 3160 mpt_setwidth(mpt, tgt, 1); 3161 } 3162 if (dval & DP_SYNC) { 3163 mpt_setsync(mpt, tgt, period, offset); 3164 } 3165 if (dval == 0) { 3166 MPTLOCK_2_CAMLOCK(mpt); 3167 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3168 break; 3169 } 3170 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3171 "set [%d]: 0x%x period 0x%x offset %d\n", 3172 tgt, dval, period, offset); 3173 if (mpt_update_spi_config(mpt, tgt)) { 3174 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3175 } else { 3176 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3177 } 3178 MPTLOCK_2_CAMLOCK(mpt); 3179 break; 3180 } 3181 case XPT_GET_TRAN_SETTINGS: 3182 { 3183 #ifdef CAM_NEW_TRAN_CODE 3184 struct ccb_trans_settings_scsi *scsi; 3185 cts = &ccb->cts; 3186 cts->protocol = PROTO_SCSI; 3187 if (mpt->is_fc) { 3188 struct ccb_trans_settings_fc *fc = 3189 &cts->xport_specific.fc; 3190 cts->protocol_version = SCSI_REV_SPC; 3191 cts->transport = XPORT_FC; 3192 cts->transport_version = 0; 3193 fc->valid = CTS_FC_VALID_SPEED; 3194 fc->bitrate = 100000; 3195 } else if (mpt->is_sas) { 3196 struct ccb_trans_settings_sas *sas = 3197 &cts->xport_specific.sas; 3198 cts->protocol_version = SCSI_REV_SPC2; 3199 cts->transport = XPORT_SAS; 3200 cts->transport_version = 0; 3201 sas->valid = CTS_SAS_VALID_SPEED; 3202 sas->bitrate = 300000; 3203 } else { 3204 cts->protocol_version = SCSI_REV_2; 3205 cts->transport = XPORT_SPI; 3206 cts->transport_version = 2; 3207 if (mpt_get_spi_settings(mpt, cts) != 0) { 3208 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3209 break; 3210 } 3211 } 3212 scsi = &cts->proto_specific.scsi; 3213 scsi->valid = CTS_SCSI_VALID_TQ; 3214 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3215 #else 3216 cts = &ccb->cts; 3217 if (mpt->is_fc) { 3218 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3219 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3220 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3221 } else if (mpt->is_sas) { 3222 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3223 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3224 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3225 } else if (mpt_get_spi_settings(mpt, cts) != 0) { 3226 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3227 break; 3228 } 3229 #endif 3230 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3231 break; 3232 } 3233 case XPT_CALC_GEOMETRY: 3234 { 3235 struct ccb_calc_geometry *ccg; 3236 3237 ccg = &ccb->ccg; 3238 if (ccg->block_size == 0) { 3239 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3240 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3241 break; 3242 } 3243 mpt_calc_geometry(ccg, /*extended*/1); 3244 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 3245 break; 3246 } 3247 case XPT_PATH_INQ: /* Path routing inquiry */ 3248 { 3249 struct ccb_pathinq *cpi = &ccb->cpi; 3250 3251 cpi->version_num = 1; 3252 cpi->target_sprt = 0; 3253 cpi->hba_eng_cnt = 0; 3254 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3255 /* 3256 * FC cards report MAX_DEVICES of 512, but 3257 * the MSG_SCSI_IO_REQUEST target id field 3258 * is only 8 bits. Until we fix the driver 3259 * to support 'channels' for bus overflow, 3260 * just limit it. 3261 */ 3262 if (cpi->max_target > 255) { 3263 cpi->max_target = 255; 3264 } 3265 3266 /* 3267 * VMware ESX reports > 16 devices and then dies when we probe. 3268 */ 3269 if (mpt->is_spi && cpi->max_target > 15) { 3270 cpi->max_target = 15; 3271 } 3272 cpi->max_lun = 7; 3273 cpi->initiator_id = mpt->mpt_ini_id; 3274 cpi->bus_id = cam_sim_bus(sim); 3275 3276 /* 3277 * The base speed is the speed of the underlying connection. 3278 */ 3279 #ifdef CAM_NEW_TRAN_CODE 3280 cpi->protocol = PROTO_SCSI; 3281 if (mpt->is_fc) { 3282 cpi->hba_misc = PIM_NOBUSRESET; 3283 cpi->base_transfer_speed = 100000; 3284 cpi->hba_inquiry = PI_TAG_ABLE; 3285 cpi->transport = XPORT_FC; 3286 cpi->transport_version = 0; 3287 cpi->protocol_version = SCSI_REV_SPC; 3288 } else if (mpt->is_sas) { 3289 cpi->hba_misc = PIM_NOBUSRESET; 3290 cpi->base_transfer_speed = 300000; 3291 cpi->hba_inquiry = PI_TAG_ABLE; 3292 cpi->transport = XPORT_SAS; 3293 cpi->transport_version = 0; 3294 cpi->protocol_version = SCSI_REV_SPC2; 3295 } else { 3296 cpi->hba_misc = PIM_SEQSCAN; 3297 cpi->base_transfer_speed = 3300; 3298 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3299 cpi->transport = XPORT_SPI; 3300 cpi->transport_version = 2; 3301 cpi->protocol_version = SCSI_REV_2; 3302 } 3303 #else 3304 if (mpt->is_fc) { 3305 cpi->hba_misc = PIM_NOBUSRESET; 3306 cpi->base_transfer_speed = 100000; 3307 cpi->hba_inquiry = PI_TAG_ABLE; 3308 } else if (mpt->is_sas) { 3309 cpi->hba_misc = PIM_NOBUSRESET; 3310 cpi->base_transfer_speed = 300000; 3311 cpi->hba_inquiry = PI_TAG_ABLE; 3312 } else { 3313 cpi->hba_misc = PIM_SEQSCAN; 3314 cpi->base_transfer_speed = 3300; 3315 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3316 } 3317 #endif 3318 3319 /* 3320 * We give our fake RAID passhtru bus a width that is MaxVolumes 3321 * wide and restrict it to one lun. 3322 */ 3323 if (raid_passthru) { 3324 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3325 cpi->initiator_id = cpi->max_target + 1; 3326 cpi->max_lun = 0; 3327 } 3328 3329 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3330 cpi->hba_misc |= PIM_NOINITIATOR; 3331 } 3332 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3333 cpi->target_sprt = 3334 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3335 } else { 3336 cpi->target_sprt = 0; 3337 } 3338 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3339 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3340 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3341 cpi->unit_number = cam_sim_unit(sim); 3342 cpi->ccb_h.status = CAM_REQ_CMP; 3343 break; 3344 } 3345 case XPT_EN_LUN: /* Enable LUN as a target */ 3346 { 3347 int result; 3348 3349 CAMLOCK_2_MPTLOCK(mpt); 3350 if (ccb->cel.enable) 3351 result = mpt_enable_lun(mpt, 3352 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3353 else 3354 result = mpt_disable_lun(mpt, 3355 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3356 MPTLOCK_2_CAMLOCK(mpt); 3357 if (result == 0) { 3358 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3359 } else { 3360 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3361 } 3362 break; 3363 } 3364 case XPT_NOTIFY_ACK: /* recycle notify ack */ 3365 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 3366 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3367 { 3368 tgt_resource_t *trtp; 3369 lun_id_t lun = ccb->ccb_h.target_lun; 3370 ccb->ccb_h.sim_priv.entries[0].field = 0; 3371 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3372 ccb->ccb_h.flags = 0; 3373 3374 if (lun == CAM_LUN_WILDCARD) { 3375 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3376 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3377 break; 3378 } 3379 trtp = &mpt->trt_wildcard; 3380 } else if (lun >= MPT_MAX_LUNS) { 3381 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3382 break; 3383 } else { 3384 trtp = &mpt->trt[lun]; 3385 } 3386 CAMLOCK_2_MPTLOCK(mpt); 3387 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3388 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3389 "Put FREE ATIO %p lun %d\n", ccb, lun); 3390 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3391 sim_links.stqe); 3392 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 3393 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3394 "Put FREE INOT lun %d\n", lun); 3395 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3396 sim_links.stqe); 3397 } else { 3398 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3399 } 3400 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3401 MPTLOCK_2_CAMLOCK(mpt); 3402 return; 3403 } 3404 case XPT_CONT_TARGET_IO: 3405 CAMLOCK_2_MPTLOCK(mpt); 3406 mpt_target_start_io(mpt, ccb); 3407 MPTLOCK_2_CAMLOCK(mpt); 3408 return; 3409 3410 default: 3411 ccb->ccb_h.status = CAM_REQ_INVALID; 3412 break; 3413 } 3414 xpt_done(ccb); 3415 } 3416 3417 static int 3418 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3419 { 3420 #ifdef CAM_NEW_TRAN_CODE 3421 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3422 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3423 #endif 3424 target_id_t tgt; 3425 uint32_t dval, pval, oval; 3426 int rv; 3427 3428 if (IS_CURRENT_SETTINGS(cts) == 0) { 3429 tgt = cts->ccb_h.target_id; 3430 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3431 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3432 return (-1); 3433 } 3434 } else { 3435 tgt = cts->ccb_h.target_id; 3436 } 3437 3438 /* 3439 * We aren't looking at Port Page 2 BIOS settings here- 3440 * sometimes these have been known to be bogus XXX. 3441 * 3442 * For user settings, we pick the max from port page 0 3443 * 3444 * For current settings we read the current settings out from 3445 * device page 0 for that target. 3446 */ 3447 if (IS_CURRENT_SETTINGS(cts)) { 3448 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3449 dval = 0; 3450 3451 CAMLOCK_2_MPTLOCK(mpt); 3452 tmp = mpt->mpt_dev_page0[tgt]; 3453 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3454 sizeof(tmp), FALSE, 5000); 3455 if (rv) { 3456 MPTLOCK_2_CAMLOCK(mpt); 3457 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3458 return (rv); 3459 } 3460 MPTLOCK_2_CAMLOCK(mpt); 3461 mpt_lprt(mpt, MPT_PRT_DEBUG, 3462 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3463 tmp.NegotiatedParameters, tmp.Information); 3464 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3465 DP_WIDE : DP_NARROW; 3466 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3467 DP_DISC_ENABLE : DP_DISC_DISABL; 3468 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3469 DP_TQING_ENABLE : DP_TQING_DISABL; 3470 oval = tmp.NegotiatedParameters; 3471 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3472 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3473 pval = tmp.NegotiatedParameters; 3474 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3475 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3476 mpt->mpt_dev_page0[tgt] = tmp; 3477 } else { 3478 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3479 oval = mpt->mpt_port_page0.Capabilities; 3480 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3481 pval = mpt->mpt_port_page0.Capabilities; 3482 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3483 } 3484 3485 #ifndef CAM_NEW_TRAN_CODE 3486 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 3487 cts->valid = 0; 3488 cts->sync_period = pval; 3489 cts->sync_offset = oval; 3490 cts->valid |= CCB_TRANS_SYNC_RATE_VALID; 3491 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; 3492 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; 3493 if (dval & DP_WIDE) { 3494 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3495 } else { 3496 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3497 } 3498 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3499 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3500 if (dval & DP_DISC_ENABLE) { 3501 cts->flags |= CCB_TRANS_DISC_ENB; 3502 } 3503 if (dval & DP_TQING_ENABLE) { 3504 cts->flags |= CCB_TRANS_TAG_ENB; 3505 } 3506 } 3507 #else 3508 spi->valid = 0; 3509 scsi->valid = 0; 3510 spi->flags = 0; 3511 scsi->flags = 0; 3512 spi->sync_offset = oval; 3513 spi->sync_period = pval; 3514 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3515 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3516 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3517 if (dval & DP_WIDE) { 3518 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3519 } else { 3520 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3521 } 3522 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3523 scsi->valid = CTS_SCSI_VALID_TQ; 3524 if (dval & DP_TQING_ENABLE) { 3525 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3526 } 3527 spi->valid |= CTS_SPI_VALID_DISC; 3528 if (dval & DP_DISC_ENABLE) { 3529 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3530 } 3531 } 3532 #endif 3533 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3534 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3535 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); 3536 return (0); 3537 } 3538 3539 static void 3540 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3541 { 3542 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3543 3544 ptr = &mpt->mpt_dev_page1[tgt]; 3545 if (onoff) { 3546 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3547 } else { 3548 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3549 } 3550 } 3551 3552 static void 3553 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3554 { 3555 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3556 3557 ptr = &mpt->mpt_dev_page1[tgt]; 3558 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3559 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3560 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3561 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3562 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3563 if (period == 0) { 3564 return; 3565 } 3566 ptr->RequestedParameters |= 3567 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3568 ptr->RequestedParameters |= 3569 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3570 if (period < 0xa) { 3571 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3572 } 3573 if (period < 0x9) { 3574 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3575 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3576 } 3577 } 3578 3579 static int 3580 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3581 { 3582 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3583 int rv; 3584 3585 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3586 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3587 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3588 tmp = mpt->mpt_dev_page1[tgt]; 3589 rv = mpt_write_cur_cfg_page(mpt, tgt, 3590 &tmp.Header, sizeof(tmp), FALSE, 5000); 3591 if (rv) { 3592 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3593 return (-1); 3594 } 3595 return (0); 3596 } 3597 3598 static void 3599 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) 3600 { 3601 #if __FreeBSD_version >= 500000 3602 cam_calc_geometry(ccg, extended); 3603 #else 3604 uint32_t size_mb; 3605 uint32_t secs_per_cylinder; 3606 3607 if (ccg->block_size == 0) { 3608 ccg->ccb_h.status = CAM_REQ_INVALID; 3609 return; 3610 } 3611 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); 3612 if (size_mb > 1024 && extended) { 3613 ccg->heads = 255; 3614 ccg->secs_per_track = 63; 3615 } else { 3616 ccg->heads = 64; 3617 ccg->secs_per_track = 32; 3618 } 3619 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3620 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3621 ccg->ccb_h.status = CAM_REQ_CMP; 3622 #endif 3623 } 3624 3625 /****************************** Timeout Recovery ******************************/ 3626 static int 3627 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3628 { 3629 int error; 3630 3631 error = mpt_kthread_create(mpt_recovery_thread, mpt, 3632 &mpt->recovery_thread, /*flags*/0, 3633 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3634 return (error); 3635 } 3636 3637 static void 3638 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3639 { 3640 if (mpt->recovery_thread == NULL) { 3641 return; 3642 } 3643 mpt->shutdwn_recovery = 1; 3644 wakeup(mpt); 3645 /* 3646 * Sleep on a slightly different location 3647 * for this interlock just for added safety. 3648 */ 3649 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 3650 } 3651 3652 static void 3653 mpt_recovery_thread(void *arg) 3654 { 3655 struct mpt_softc *mpt; 3656 3657 #if __FreeBSD_version >= 500000 3658 mtx_lock(&Giant); 3659 #endif 3660 mpt = (struct mpt_softc *)arg; 3661 MPT_LOCK(mpt); 3662 for (;;) { 3663 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3664 if (mpt->shutdwn_recovery == 0) { 3665 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 3666 } 3667 } 3668 if (mpt->shutdwn_recovery != 0) { 3669 break; 3670 } 3671 mpt_recover_commands(mpt); 3672 } 3673 mpt->recovery_thread = NULL; 3674 wakeup(&mpt->recovery_thread); 3675 MPT_UNLOCK(mpt); 3676 #if __FreeBSD_version >= 500000 3677 mtx_unlock(&Giant); 3678 #endif 3679 kthread_exit(0); 3680 } 3681 3682 static int 3683 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 3684 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 3685 { 3686 MSG_SCSI_TASK_MGMT *tmf_req; 3687 int error; 3688 3689 /* 3690 * Wait for any current TMF request to complete. 3691 * We're only allowed to issue one TMF at a time. 3692 */ 3693 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 3694 sleep_ok, MPT_TMF_MAX_TIMEOUT); 3695 if (error != 0) { 3696 mpt_reset(mpt, TRUE); 3697 return (ETIMEDOUT); 3698 } 3699 3700 mpt_assign_serno(mpt, mpt->tmf_req); 3701 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 3702 3703 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 3704 memset(tmf_req, 0, sizeof(*tmf_req)); 3705 tmf_req->TargetID = target; 3706 tmf_req->Bus = channel; 3707 tmf_req->ChainOffset = 0; 3708 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 3709 tmf_req->Reserved = 0; 3710 tmf_req->TaskType = type; 3711 tmf_req->Reserved1 = 0; 3712 tmf_req->MsgFlags = flags; 3713 tmf_req->MsgContext = 3714 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 3715 memset(&tmf_req->LUN, 0, 3716 sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2)); 3717 if (lun > 256) { 3718 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 3719 tmf_req->LUN[1] = lun & 0xff; 3720 } else { 3721 tmf_req->LUN[1] = lun; 3722 } 3723 tmf_req->TaskMsgContext = abort_ctx; 3724 3725 mpt_lprt(mpt, MPT_PRT_DEBUG, 3726 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 3727 mpt->tmf_req->serno, tmf_req->MsgContext); 3728 if (mpt->verbose > MPT_PRT_DEBUG) { 3729 mpt_print_request(tmf_req); 3730 } 3731 3732 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 3733 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 3734 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 3735 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 3736 if (error != MPT_OK) { 3737 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 3738 mpt->tmf_req->state = REQ_STATE_FREE; 3739 mpt_reset(mpt, TRUE); 3740 } 3741 return (error); 3742 } 3743 3744 /* 3745 * When a command times out, it is placed on the requeust_timeout_list 3746 * and we wake our recovery thread. The MPT-Fusion architecture supports 3747 * only a single TMF operation at a time, so we serially abort/bdr, etc, 3748 * the timedout transactions. The next TMF is issued either by the 3749 * completion handler of the current TMF waking our recovery thread, 3750 * or the TMF timeout handler causing a hard reset sequence. 3751 */ 3752 static void 3753 mpt_recover_commands(struct mpt_softc *mpt) 3754 { 3755 request_t *req; 3756 union ccb *ccb; 3757 int error; 3758 3759 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3760 /* 3761 * No work to do- leave. 3762 */ 3763 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 3764 return; 3765 } 3766 3767 /* 3768 * Flush any commands whose completion coincides with their timeout. 3769 */ 3770 mpt_intr(mpt); 3771 3772 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3773 /* 3774 * The timedout commands have already 3775 * completed. This typically means 3776 * that either the timeout value was on 3777 * the hairy edge of what the device 3778 * requires or - more likely - interrupts 3779 * are not happening. 3780 */ 3781 mpt_prt(mpt, "Timedout requests already complete. " 3782 "Interrupts may not be functioning.\n"); 3783 mpt_enable_ints(mpt); 3784 return; 3785 } 3786 3787 /* 3788 * We have no visibility into the current state of the 3789 * controller, so attempt to abort the commands in the 3790 * order they timed-out. For initiator commands, we 3791 * depend on the reply handler pulling requests off 3792 * the timeout list. 3793 */ 3794 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 3795 uint16_t status; 3796 uint8_t response; 3797 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 3798 3799 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 3800 req, req->serno, hdrp->Function); 3801 ccb = req->ccb; 3802 if (ccb == NULL) { 3803 mpt_prt(mpt, "null ccb in timed out request. " 3804 "Resetting Controller.\n"); 3805 mpt_reset(mpt, TRUE); 3806 continue; 3807 } 3808 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 3809 3810 /* 3811 * Check to see if this is not an initiator command and 3812 * deal with it differently if it is. 3813 */ 3814 switch (hdrp->Function) { 3815 case MPI_FUNCTION_SCSI_IO_REQUEST: 3816 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 3817 break; 3818 default: 3819 /* 3820 * XXX: FIX ME: need to abort target assists... 3821 */ 3822 mpt_prt(mpt, "just putting it back on the pend q\n"); 3823 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 3824 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 3825 links); 3826 continue; 3827 } 3828 3829 error = mpt_scsi_send_tmf(mpt, 3830 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 3831 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 3832 htole32(req->index | scsi_io_handler_id), TRUE); 3833 3834 if (error != 0) { 3835 /* 3836 * mpt_scsi_send_tmf hard resets on failure, so no 3837 * need to do so here. Our queue should be emptied 3838 * by the hard reset. 3839 */ 3840 continue; 3841 } 3842 3843 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 3844 REQ_STATE_DONE, TRUE, 500); 3845 3846 status = mpt->tmf_req->IOCStatus; 3847 response = mpt->tmf_req->ResponseCode; 3848 mpt->tmf_req->state = REQ_STATE_FREE; 3849 3850 if (error != 0) { 3851 /* 3852 * If we've errored out,, reset the controller. 3853 */ 3854 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 3855 "Resetting controller\n"); 3856 mpt_reset(mpt, TRUE); 3857 continue; 3858 } 3859 3860 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 3861 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 3862 "Resetting controller.\n", status); 3863 mpt_reset(mpt, TRUE); 3864 continue; 3865 } 3866 3867 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 3868 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 3869 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 3870 "Resetting controller.\n", response); 3871 mpt_reset(mpt, TRUE); 3872 continue; 3873 } 3874 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 3875 } 3876 } 3877 3878 /************************ Target Mode Support ****************************/ 3879 static void 3880 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 3881 { 3882 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 3883 PTR_SGE_TRANSACTION32 tep; 3884 PTR_SGE_SIMPLE32 se; 3885 bus_addr_t paddr; 3886 uint32_t fl; 3887 3888 paddr = req->req_pbuf; 3889 paddr += MPT_RQSL(mpt); 3890 3891 fc = req->req_vbuf; 3892 memset(fc, 0, MPT_REQUEST_AREA); 3893 fc->BufferCount = 1; 3894 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 3895 fc->MsgContext = htole32(req->index | fc_els_handler_id); 3896 3897 /* 3898 * Okay, set up ELS buffer pointers. ELS buffer pointers 3899 * consist of a TE SGL element (with details length of zero) 3900 * followe by a SIMPLE SGL element which holds the address 3901 * of the buffer. 3902 */ 3903 3904 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 3905 3906 tep->ContextSize = 4; 3907 tep->Flags = 0; 3908 tep->TransactionContext[0] = htole32(ioindex); 3909 3910 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 3911 fl = 3912 MPI_SGE_FLAGS_HOST_TO_IOC | 3913 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 3914 MPI_SGE_FLAGS_LAST_ELEMENT | 3915 MPI_SGE_FLAGS_END_OF_LIST | 3916 MPI_SGE_FLAGS_END_OF_BUFFER; 3917 fl <<= MPI_SGE_FLAGS_SHIFT; 3918 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 3919 se->FlagsLength = htole32(fl); 3920 se->Address = htole32((uint32_t) paddr); 3921 mpt_lprt(mpt, MPT_PRT_DEBUG, 3922 "add ELS index %d ioindex %d for %p:%u\n", 3923 req->index, ioindex, req, req->serno); 3924 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 3925 ("mpt_fc_post_els: request not locked")); 3926 mpt_send_cmd(mpt, req); 3927 } 3928 3929 static void 3930 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 3931 { 3932 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 3933 PTR_CMD_BUFFER_DESCRIPTOR cb; 3934 bus_addr_t paddr; 3935 3936 paddr = req->req_pbuf; 3937 paddr += MPT_RQSL(mpt); 3938 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 3939 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 3940 3941 fc = req->req_vbuf; 3942 fc->BufferCount = 1; 3943 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 3944 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 3945 3946 cb = &fc->Buffer[0]; 3947 cb->IoIndex = htole16(ioindex); 3948 cb->u.PhysicalAddress32 = htole32((U32) paddr); 3949 3950 mpt_check_doorbell(mpt); 3951 mpt_send_cmd(mpt, req); 3952 } 3953 3954 static int 3955 mpt_add_els_buffers(struct mpt_softc *mpt) 3956 { 3957 int i; 3958 3959 if (mpt->is_fc == 0) { 3960 return (TRUE); 3961 } 3962 3963 if (mpt->els_cmds_allocated) { 3964 return (TRUE); 3965 } 3966 3967 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), 3968 M_DEVBUF, M_NOWAIT | M_ZERO); 3969 3970 if (mpt->els_cmd_ptrs == NULL) { 3971 return (FALSE); 3972 } 3973 3974 /* 3975 * Feed the chip some ELS buffer resources 3976 */ 3977 for (i = 0; i < MPT_MAX_ELS; i++) { 3978 request_t *req = mpt_get_request(mpt, FALSE); 3979 if (req == NULL) { 3980 break; 3981 } 3982 req->state |= REQ_STATE_LOCKED; 3983 mpt->els_cmd_ptrs[i] = req; 3984 mpt_fc_post_els(mpt, req, i); 3985 } 3986 3987 if (i == 0) { 3988 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 3989 free(mpt->els_cmd_ptrs, M_DEVBUF); 3990 mpt->els_cmd_ptrs = NULL; 3991 return (FALSE); 3992 } 3993 if (i != MPT_MAX_ELS) { 3994 mpt_lprt(mpt, MPT_PRT_INFO, 3995 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 3996 } 3997 mpt->els_cmds_allocated = i; 3998 return(TRUE); 3999 } 4000 4001 static int 4002 mpt_add_target_commands(struct mpt_softc *mpt) 4003 { 4004 int i, max; 4005 4006 if (mpt->tgt_cmd_ptrs) { 4007 return (TRUE); 4008 } 4009 4010 max = MPT_MAX_REQUESTS(mpt) >> 1; 4011 if (max > mpt->mpt_max_tgtcmds) { 4012 max = mpt->mpt_max_tgtcmds; 4013 } 4014 mpt->tgt_cmd_ptrs = 4015 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4016 if (mpt->tgt_cmd_ptrs == NULL) { 4017 mpt_prt(mpt, 4018 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4019 return (FALSE); 4020 } 4021 4022 for (i = 0; i < max; i++) { 4023 request_t *req; 4024 4025 req = mpt_get_request(mpt, FALSE); 4026 if (req == NULL) { 4027 break; 4028 } 4029 req->state |= REQ_STATE_LOCKED; 4030 mpt->tgt_cmd_ptrs[i] = req; 4031 mpt_post_target_command(mpt, req, i); 4032 } 4033 4034 4035 if (i == 0) { 4036 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4037 free(mpt->tgt_cmd_ptrs, M_DEVBUF); 4038 mpt->tgt_cmd_ptrs = NULL; 4039 return (FALSE); 4040 } 4041 4042 mpt->tgt_cmds_allocated = i; 4043 4044 if (i < max) { 4045 mpt_lprt(mpt, MPT_PRT_INFO, 4046 "added %d of %d target bufs\n", i, max); 4047 } 4048 return (i); 4049 } 4050 4051 static int 4052 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4053 { 4054 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4055 mpt->twildcard = 1; 4056 } else if (lun >= MPT_MAX_LUNS) { 4057 return (EINVAL); 4058 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4059 return (EINVAL); 4060 } 4061 if (mpt->tenabled == 0) { 4062 if (mpt->is_fc) { 4063 (void) mpt_fc_reset_link(mpt, 0); 4064 } 4065 mpt->tenabled = 1; 4066 } 4067 if (lun == CAM_LUN_WILDCARD) { 4068 mpt->trt_wildcard.enabled = 1; 4069 } else { 4070 mpt->trt[lun].enabled = 1; 4071 } 4072 return (0); 4073 } 4074 4075 static int 4076 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4077 { 4078 int i; 4079 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4080 mpt->twildcard = 0; 4081 } else if (lun >= MPT_MAX_LUNS) { 4082 return (EINVAL); 4083 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4084 return (EINVAL); 4085 } 4086 if (lun == CAM_LUN_WILDCARD) { 4087 mpt->trt_wildcard.enabled = 0; 4088 } else { 4089 mpt->trt[lun].enabled = 0; 4090 } 4091 for (i = 0; i < MPT_MAX_LUNS; i++) { 4092 if (mpt->trt[lun].enabled) { 4093 break; 4094 } 4095 } 4096 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4097 if (mpt->is_fc) { 4098 (void) mpt_fc_reset_link(mpt, 0); 4099 } 4100 mpt->tenabled = 0; 4101 } 4102 return (0); 4103 } 4104 4105 /* 4106 * Called with MPT lock held 4107 */ 4108 static void 4109 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4110 { 4111 struct ccb_scsiio *csio = &ccb->csio; 4112 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4113 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4114 4115 switch (tgt->state) { 4116 case TGT_STATE_IN_CAM: 4117 break; 4118 case TGT_STATE_MOVING_DATA: 4119 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4120 xpt_freeze_simq(mpt->sim, 1); 4121 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4122 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4123 MPTLOCK_2_CAMLOCK(mpt); 4124 xpt_done(ccb); 4125 CAMLOCK_2_MPTLOCK(mpt); 4126 return; 4127 default: 4128 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4129 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4130 mpt_tgt_dump_req_state(mpt, cmd_req); 4131 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4132 MPTLOCK_2_CAMLOCK(mpt); 4133 xpt_done(ccb); 4134 CAMLOCK_2_MPTLOCK(mpt); 4135 return; 4136 } 4137 4138 if (csio->dxfer_len) { 4139 bus_dmamap_callback_t *cb; 4140 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4141 request_t *req; 4142 4143 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4144 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len)); 4145 4146 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4147 if (mpt->outofbeer == 0) { 4148 mpt->outofbeer = 1; 4149 xpt_freeze_simq(mpt->sim, 1); 4150 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4151 } 4152 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4153 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4154 MPTLOCK_2_CAMLOCK(mpt); 4155 xpt_done(ccb); 4156 CAMLOCK_2_MPTLOCK(mpt); 4157 return; 4158 } 4159 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4160 if (sizeof (bus_addr_t) > 4) { 4161 cb = mpt_execute_req_a64; 4162 } else { 4163 cb = mpt_execute_req; 4164 } 4165 4166 req->ccb = ccb; 4167 ccb->ccb_h.ccb_req_ptr = req; 4168 4169 /* 4170 * Record the currently active ccb and the 4171 * request for it in our target state area. 4172 */ 4173 tgt->ccb = ccb; 4174 tgt->req = req; 4175 4176 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4177 ta = req->req_vbuf; 4178 4179 if (mpt->is_sas) { 4180 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4181 cmd_req->req_vbuf; 4182 ta->QueueTag = ssp->InitiatorTag; 4183 } else if (mpt->is_spi) { 4184 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4185 cmd_req->req_vbuf; 4186 ta->QueueTag = sp->Tag; 4187 } 4188 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4189 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4190 ta->ReplyWord = htole32(tgt->reply_desc); 4191 if (csio->ccb_h.target_lun > 256) { 4192 ta->LUN[0] = 4193 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4194 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4195 } else { 4196 ta->LUN[1] = csio->ccb_h.target_lun; 4197 } 4198 4199 ta->RelativeOffset = tgt->bytes_xfered; 4200 ta->DataLength = ccb->csio.dxfer_len; 4201 if (ta->DataLength > tgt->resid) { 4202 ta->DataLength = tgt->resid; 4203 } 4204 4205 /* 4206 * XXX Should be done after data transfer completes? 4207 */ 4208 tgt->resid -= csio->dxfer_len; 4209 tgt->bytes_xfered += csio->dxfer_len; 4210 4211 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4212 ta->TargetAssistFlags |= 4213 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4214 } 4215 4216 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4217 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4218 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4219 ta->TargetAssistFlags |= 4220 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4221 } 4222 #endif 4223 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4224 4225 mpt_lprt(mpt, MPT_PRT_DEBUG, 4226 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4227 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4228 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4229 4230 MPTLOCK_2_CAMLOCK(mpt); 4231 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4232 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4233 int error; 4234 int s = splsoftvm(); 4235 error = bus_dmamap_load(mpt->buffer_dmat, 4236 req->dmap, csio->data_ptr, csio->dxfer_len, 4237 cb, req, 0); 4238 splx(s); 4239 if (error == EINPROGRESS) { 4240 xpt_freeze_simq(mpt->sim, 1); 4241 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4242 } 4243 } else { 4244 /* 4245 * We have been given a pointer to single 4246 * physical buffer. 4247 */ 4248 struct bus_dma_segment seg; 4249 seg.ds_addr = (bus_addr_t) 4250 (vm_offset_t)csio->data_ptr; 4251 seg.ds_len = csio->dxfer_len; 4252 (*cb)(req, &seg, 1, 0); 4253 } 4254 } else { 4255 /* 4256 * We have been given a list of addresses. 4257 * This case could be easily supported but they are not 4258 * currently generated by the CAM subsystem so there 4259 * is no point in wasting the time right now. 4260 */ 4261 struct bus_dma_segment *sgs; 4262 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4263 (*cb)(req, NULL, 0, EFAULT); 4264 } else { 4265 /* Just use the segments provided */ 4266 sgs = (struct bus_dma_segment *)csio->data_ptr; 4267 (*cb)(req, sgs, csio->sglist_cnt, 0); 4268 } 4269 } 4270 CAMLOCK_2_MPTLOCK(mpt); 4271 } else { 4272 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4273 4274 /* 4275 * XXX: I don't know why this seems to happen, but 4276 * XXX: completing the CCB seems to make things happy. 4277 * XXX: This seems to happen if the initiator requests 4278 * XXX: enough data that we have to do multiple CTIOs. 4279 */ 4280 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4281 mpt_lprt(mpt, MPT_PRT_DEBUG, 4282 "Meaningless STATUS CCB (%p): flags %x status %x " 4283 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4284 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4285 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4286 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4287 MPTLOCK_2_CAMLOCK(mpt); 4288 xpt_done(ccb); 4289 CAMLOCK_2_MPTLOCK(mpt); 4290 return; 4291 } 4292 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4293 sp = sense; 4294 memcpy(sp, &csio->sense_data, 4295 min(csio->sense_len, MPT_SENSE_SIZE)); 4296 } 4297 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4298 } 4299 } 4300 4301 static void 4302 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4303 uint32_t lun, int send, uint8_t *data, size_t length) 4304 { 4305 mpt_tgt_state_t *tgt; 4306 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4307 SGE_SIMPLE32 *se; 4308 uint32_t flags; 4309 uint8_t *dptr; 4310 bus_addr_t pptr; 4311 request_t *req; 4312 4313 /* 4314 * We enter with resid set to the data load for the command. 4315 */ 4316 tgt = MPT_TGT_STATE(mpt, cmd_req); 4317 if (length == 0 || tgt->resid == 0) { 4318 tgt->resid = 0; 4319 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4320 return; 4321 } 4322 4323 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4324 mpt_prt(mpt, "out of resources- dropping local response\n"); 4325 return; 4326 } 4327 tgt->is_local = 1; 4328 4329 4330 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4331 ta = req->req_vbuf; 4332 4333 if (mpt->is_sas) { 4334 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4335 ta->QueueTag = ssp->InitiatorTag; 4336 } else if (mpt->is_spi) { 4337 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4338 ta->QueueTag = sp->Tag; 4339 } 4340 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4341 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4342 ta->ReplyWord = htole32(tgt->reply_desc); 4343 if (lun > 256) { 4344 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4345 ta->LUN[1] = lun & 0xff; 4346 } else { 4347 ta->LUN[1] = lun; 4348 } 4349 ta->RelativeOffset = 0; 4350 ta->DataLength = length; 4351 4352 dptr = req->req_vbuf; 4353 dptr += MPT_RQSL(mpt); 4354 pptr = req->req_pbuf; 4355 pptr += MPT_RQSL(mpt); 4356 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4357 4358 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4359 memset(se, 0,sizeof (*se)); 4360 4361 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4362 if (send) { 4363 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4364 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4365 } 4366 se->Address = pptr; 4367 MPI_pSGE_SET_LENGTH(se, length); 4368 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4369 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4370 MPI_pSGE_SET_FLAGS(se, flags); 4371 4372 tgt->ccb = NULL; 4373 tgt->req = req; 4374 tgt->resid -= length; 4375 tgt->bytes_xfered = length; 4376 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4377 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4378 #else 4379 tgt->state = TGT_STATE_MOVING_DATA; 4380 #endif 4381 mpt_send_cmd(mpt, req); 4382 } 4383 4384 /* 4385 * Abort queued up CCBs 4386 */ 4387 static cam_status 4388 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4389 { 4390 struct mpt_hdr_stailq *lp; 4391 struct ccb_hdr *srch; 4392 int found = 0; 4393 union ccb *accb = ccb->cab.abort_ccb; 4394 tgt_resource_t *trtp; 4395 4396 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4397 4398 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4399 trtp = &mpt->trt_wildcard; 4400 } else { 4401 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4402 } 4403 4404 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4405 lp = &trtp->atios; 4406 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 4407 lp = &trtp->inots; 4408 } else { 4409 return (CAM_REQ_INVALID); 4410 } 4411 4412 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4413 if (srch == &accb->ccb_h) { 4414 found = 1; 4415 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4416 break; 4417 } 4418 } 4419 if (found) { 4420 accb->ccb_h.status = CAM_REQ_ABORTED; 4421 xpt_done(accb); 4422 return (CAM_REQ_CMP); 4423 } 4424 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4425 return (CAM_PATH_INVALID); 4426 } 4427 4428 /* 4429 * Ask the MPT to abort the current target command 4430 */ 4431 static int 4432 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4433 { 4434 int error; 4435 request_t *req; 4436 PTR_MSG_TARGET_MODE_ABORT abtp; 4437 4438 req = mpt_get_request(mpt, FALSE); 4439 if (req == NULL) { 4440 return (-1); 4441 } 4442 abtp = req->req_vbuf; 4443 memset(abtp, 0, sizeof (*abtp)); 4444 4445 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4446 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4447 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4448 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4449 error = 0; 4450 if (mpt->is_fc || mpt->is_sas) { 4451 mpt_send_cmd(mpt, req); 4452 } else { 4453 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4454 } 4455 return (error); 4456 } 4457 4458 /* 4459 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4460 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4461 * FC929 to set bogus FC_RSP fields (nonzero residuals 4462 * but w/o RESID fields set). This causes QLogic initiators 4463 * to think maybe that a frame was lost. 4464 * 4465 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4466 * we use allocated requests to do TARGET_ASSIST and we 4467 * need to know when to release them. 4468 */ 4469 4470 static void 4471 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4472 uint8_t status, uint8_t const *sense_data) 4473 { 4474 uint8_t *cmd_vbuf; 4475 mpt_tgt_state_t *tgt; 4476 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4477 request_t *req; 4478 bus_addr_t paddr; 4479 int resplen = 0; 4480 uint32_t fl; 4481 4482 cmd_vbuf = cmd_req->req_vbuf; 4483 cmd_vbuf += MPT_RQSL(mpt); 4484 tgt = MPT_TGT_STATE(mpt, cmd_req); 4485 4486 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4487 if (mpt->outofbeer == 0) { 4488 mpt->outofbeer = 1; 4489 xpt_freeze_simq(mpt->sim, 1); 4490 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4491 } 4492 if (ccb) { 4493 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4494 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4495 MPTLOCK_2_CAMLOCK(mpt); 4496 xpt_done(ccb); 4497 CAMLOCK_2_MPTLOCK(mpt); 4498 } else { 4499 mpt_prt(mpt, 4500 "could not allocate status request- dropping\n"); 4501 } 4502 return; 4503 } 4504 req->ccb = ccb; 4505 if (ccb) { 4506 ccb->ccb_h.ccb_mpt_ptr = mpt; 4507 ccb->ccb_h.ccb_req_ptr = req; 4508 } 4509 4510 /* 4511 * Record the currently active ccb, if any, and the 4512 * request for it in our target state area. 4513 */ 4514 tgt->ccb = ccb; 4515 tgt->req = req; 4516 tgt->state = TGT_STATE_SENDING_STATUS; 4517 4518 tp = req->req_vbuf; 4519 paddr = req->req_pbuf; 4520 paddr += MPT_RQSL(mpt); 4521 4522 memset(tp, 0, sizeof (*tp)); 4523 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4524 if (mpt->is_fc) { 4525 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4526 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4527 uint8_t *sts_vbuf; 4528 uint32_t *rsp; 4529 4530 sts_vbuf = req->req_vbuf; 4531 sts_vbuf += MPT_RQSL(mpt); 4532 rsp = (uint32_t *) sts_vbuf; 4533 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4534 4535 /* 4536 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4537 * It has to be big-endian in memory and is organized 4538 * in 32 bit words, which are much easier to deal with 4539 * as words which are swizzled as needed. 4540 * 4541 * All we're filling here is the FC_RSP payload. 4542 * We may just have the chip synthesize it if 4543 * we have no residual and an OK status. 4544 * 4545 */ 4546 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4547 4548 rsp[2] = status; 4549 if (tgt->resid) { 4550 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4551 rsp[3] = htobe32(tgt->resid); 4552 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4553 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4554 #endif 4555 } 4556 if (status == SCSI_STATUS_CHECK_COND) { 4557 int i; 4558 4559 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4560 rsp[4] = htobe32(MPT_SENSE_SIZE); 4561 if (sense_data) { 4562 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4563 } else { 4564 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4565 "TION but no sense data?\n"); 4566 memset(&rsp, 0, MPT_SENSE_SIZE); 4567 } 4568 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4569 rsp[i] = htobe32(rsp[i]); 4570 } 4571 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4572 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4573 #endif 4574 } 4575 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4576 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4577 #endif 4578 rsp[2] = htobe32(rsp[2]); 4579 } else if (mpt->is_sas) { 4580 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4581 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4582 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4583 } else { 4584 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4585 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4586 tp->StatusCode = status; 4587 tp->QueueTag = htole16(sp->Tag); 4588 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4589 } 4590 4591 tp->ReplyWord = htole32(tgt->reply_desc); 4592 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4593 4594 #ifdef WE_CAN_USE_AUTO_REPOST 4595 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4596 #endif 4597 if (status == SCSI_STATUS_OK && resplen == 0) { 4598 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4599 } else { 4600 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4601 fl = 4602 MPI_SGE_FLAGS_HOST_TO_IOC | 4603 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4604 MPI_SGE_FLAGS_LAST_ELEMENT | 4605 MPI_SGE_FLAGS_END_OF_LIST | 4606 MPI_SGE_FLAGS_END_OF_BUFFER; 4607 fl <<= MPI_SGE_FLAGS_SHIFT; 4608 fl |= resplen; 4609 tp->StatusDataSGE.FlagsLength = htole32(fl); 4610 } 4611 4612 mpt_lprt(mpt, MPT_PRT_DEBUG, 4613 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 4614 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 4615 req->serno, tgt->resid); 4616 if (ccb) { 4617 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4618 ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz); 4619 } 4620 mpt_send_cmd(mpt, req); 4621 } 4622 4623 static void 4624 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4625 tgt_resource_t *trtp, int init_id) 4626 { 4627 struct ccb_immed_notify *inot; 4628 mpt_tgt_state_t *tgt; 4629 4630 tgt = MPT_TGT_STATE(mpt, req); 4631 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); 4632 if (inot == NULL) { 4633 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4634 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 4635 return; 4636 } 4637 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4638 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4639 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 4640 4641 memset(&inot->sense_data, 0, sizeof (inot->sense_data)); 4642 inot->sense_len = 0; 4643 memset(inot->message_args, 0, sizeof (inot->message_args)); 4644 inot->initiator_id = init_id; /* XXX */ 4645 4646 /* 4647 * This is a somewhat grotesque attempt to map from task management 4648 * to old style SCSI messages. God help us all. 4649 */ 4650 switch (fc) { 4651 case MPT_ABORT_TASK_SET: 4652 inot->message_args[0] = MSG_ABORT_TAG; 4653 break; 4654 case MPT_CLEAR_TASK_SET: 4655 inot->message_args[0] = MSG_CLEAR_TASK_SET; 4656 break; 4657 case MPT_TARGET_RESET: 4658 inot->message_args[0] = MSG_TARGET_RESET; 4659 break; 4660 case MPT_CLEAR_ACA: 4661 inot->message_args[0] = MSG_CLEAR_ACA; 4662 break; 4663 case MPT_TERMINATE_TASK: 4664 inot->message_args[0] = MSG_ABORT_TAG; 4665 break; 4666 default: 4667 inot->message_args[0] = MSG_NOOP; 4668 break; 4669 } 4670 tgt->ccb = (union ccb *) inot; 4671 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 4672 MPTLOCK_2_CAMLOCK(mpt); 4673 xpt_done((union ccb *)inot); 4674 CAMLOCK_2_MPTLOCK(mpt); 4675 } 4676 4677 static void 4678 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 4679 { 4680 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 4681 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 4682 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 4683 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 4684 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 4685 '0', '0', '0', '1' 4686 }; 4687 struct ccb_accept_tio *atiop; 4688 lun_id_t lun; 4689 int tag_action = 0; 4690 mpt_tgt_state_t *tgt; 4691 tgt_resource_t *trtp = NULL; 4692 U8 *lunptr; 4693 U8 *vbuf; 4694 U16 itag; 4695 U16 ioindex; 4696 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 4697 uint8_t *cdbp; 4698 4699 /* 4700 * First, DMA sync the received command- 4701 * which is in the *request* * phys area. 4702 * 4703 * XXX: We could optimize this for a range 4704 */ 4705 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 4706 BUS_DMASYNC_POSTREAD); 4707 4708 /* 4709 * Stash info for the current command where we can get at it later. 4710 */ 4711 vbuf = req->req_vbuf; 4712 vbuf += MPT_RQSL(mpt); 4713 4714 /* 4715 * Get our state pointer set up. 4716 */ 4717 tgt = MPT_TGT_STATE(mpt, req); 4718 if (tgt->state != TGT_STATE_LOADED) { 4719 mpt_tgt_dump_req_state(mpt, req); 4720 panic("bad target state in mpt_scsi_tgt_atio"); 4721 } 4722 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 4723 tgt->state = TGT_STATE_IN_CAM; 4724 tgt->reply_desc = reply_desc; 4725 ioindex = GET_IO_INDEX(reply_desc); 4726 if (mpt->verbose >= MPT_PRT_DEBUG) { 4727 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 4728 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 4729 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 4730 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 4731 } 4732 if (mpt->is_fc) { 4733 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 4734 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 4735 if (fc->FcpCntl[2]) { 4736 /* 4737 * Task Management Request 4738 */ 4739 switch (fc->FcpCntl[2]) { 4740 case 0x2: 4741 fct = MPT_ABORT_TASK_SET; 4742 break; 4743 case 0x4: 4744 fct = MPT_CLEAR_TASK_SET; 4745 break; 4746 case 0x20: 4747 fct = MPT_TARGET_RESET; 4748 break; 4749 case 0x40: 4750 fct = MPT_CLEAR_ACA; 4751 break; 4752 case 0x80: 4753 fct = MPT_TERMINATE_TASK; 4754 break; 4755 default: 4756 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 4757 fc->FcpCntl[2]); 4758 mpt_scsi_tgt_status(mpt, 0, req, 4759 SCSI_STATUS_OK, 0); 4760 return; 4761 } 4762 } else { 4763 switch (fc->FcpCntl[1]) { 4764 case 0: 4765 tag_action = MSG_SIMPLE_Q_TAG; 4766 break; 4767 case 1: 4768 tag_action = MSG_HEAD_OF_Q_TAG; 4769 break; 4770 case 2: 4771 tag_action = MSG_ORDERED_Q_TAG; 4772 break; 4773 default: 4774 /* 4775 * Bah. Ignore Untagged Queing and ACA 4776 */ 4777 tag_action = MSG_SIMPLE_Q_TAG; 4778 break; 4779 } 4780 } 4781 tgt->resid = be32toh(fc->FcpDl); 4782 cdbp = fc->FcpCdb; 4783 lunptr = fc->FcpLun; 4784 itag = be16toh(fc->OptionalOxid); 4785 } else if (mpt->is_sas) { 4786 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 4787 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 4788 cdbp = ssp->CDB; 4789 lunptr = ssp->LogicalUnitNumber; 4790 itag = ssp->InitiatorTag; 4791 } else { 4792 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 4793 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 4794 cdbp = sp->CDB; 4795 lunptr = sp->LogicalUnitNumber; 4796 itag = sp->Tag; 4797 } 4798 4799 /* 4800 * Generate a simple lun 4801 */ 4802 switch (lunptr[0] & 0xc0) { 4803 case 0x40: 4804 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 4805 break; 4806 case 0: 4807 lun = lunptr[1]; 4808 break; 4809 default: 4810 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 4811 lun = 0xffff; 4812 break; 4813 } 4814 4815 /* 4816 * Deal with non-enabled or bad luns here. 4817 */ 4818 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 4819 mpt->trt[lun].enabled == 0) { 4820 if (mpt->twildcard) { 4821 trtp = &mpt->trt_wildcard; 4822 } else if (fct == MPT_NIL_TMT_VALUE) { 4823 /* 4824 * In this case, we haven't got an upstream listener 4825 * for either a specific lun or wildcard luns. We 4826 * have to make some sensible response. For regular 4827 * inquiry, just return some NOT HERE inquiry data. 4828 * For VPD inquiry, report illegal field in cdb. 4829 * For REQUEST SENSE, just return NO SENSE data. 4830 * REPORT LUNS gets illegal command. 4831 * All other commands get 'no such device'. 4832 */ 4833 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 4834 size_t len; 4835 4836 memset(buf, 0, MPT_SENSE_SIZE); 4837 cond = SCSI_STATUS_CHECK_COND; 4838 buf[0] = 0xf0; 4839 buf[2] = 0x5; 4840 buf[7] = 0x8; 4841 sp = buf; 4842 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 4843 4844 switch (cdbp[0]) { 4845 case INQUIRY: 4846 { 4847 if (cdbp[1] != 0) { 4848 buf[12] = 0x26; 4849 buf[13] = 0x01; 4850 break; 4851 } 4852 len = min(tgt->resid, cdbp[4]); 4853 len = min(len, sizeof (null_iqd)); 4854 mpt_lprt(mpt, MPT_PRT_DEBUG, 4855 "local inquiry %ld bytes\n", (long) len); 4856 mpt_scsi_tgt_local(mpt, req, lun, 1, 4857 null_iqd, len); 4858 return; 4859 } 4860 case REQUEST_SENSE: 4861 { 4862 buf[2] = 0x0; 4863 len = min(tgt->resid, cdbp[4]); 4864 len = min(len, sizeof (buf)); 4865 mpt_lprt(mpt, MPT_PRT_DEBUG, 4866 "local reqsense %ld bytes\n", (long) len); 4867 mpt_scsi_tgt_local(mpt, req, lun, 1, 4868 buf, len); 4869 return; 4870 } 4871 case REPORT_LUNS: 4872 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 4873 buf[12] = 0x26; 4874 return; 4875 default: 4876 mpt_lprt(mpt, MPT_PRT_DEBUG, 4877 "CMD 0x%x to unmanaged lun %u\n", 4878 cdbp[0], lun); 4879 buf[12] = 0x25; 4880 break; 4881 } 4882 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 4883 return; 4884 } 4885 /* otherwise, leave trtp NULL */ 4886 } else { 4887 trtp = &mpt->trt[lun]; 4888 } 4889 4890 /* 4891 * Deal with any task management 4892 */ 4893 if (fct != MPT_NIL_TMT_VALUE) { 4894 if (trtp == NULL) { 4895 mpt_prt(mpt, "task mgmt function %x but no listener\n", 4896 fct); 4897 mpt_scsi_tgt_status(mpt, 0, req, 4898 SCSI_STATUS_OK, 0); 4899 } else { 4900 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 4901 GET_INITIATOR_INDEX(reply_desc)); 4902 } 4903 return; 4904 } 4905 4906 4907 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 4908 if (atiop == NULL) { 4909 mpt_lprt(mpt, MPT_PRT_WARN, 4910 "no ATIOs for lun %u- sending back %s\n", lun, 4911 mpt->tenabled? "QUEUE FULL" : "BUSY"); 4912 mpt_scsi_tgt_status(mpt, NULL, req, 4913 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 4914 NULL); 4915 return; 4916 } 4917 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 4918 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4919 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 4920 atiop->ccb_h.ccb_mpt_ptr = mpt; 4921 atiop->ccb_h.status = CAM_CDB_RECVD; 4922 atiop->ccb_h.target_lun = lun; 4923 atiop->sense_len = 0; 4924 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 4925 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 4926 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 4927 4928 /* 4929 * The tag we construct here allows us to find the 4930 * original request that the command came in with. 4931 * 4932 * This way we don't have to depend on anything but the 4933 * tag to find things when CCBs show back up from CAM. 4934 */ 4935 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 4936 tgt->tag_id = atiop->tag_id; 4937 if (tag_action) { 4938 atiop->tag_action = tag_action; 4939 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 4940 } 4941 if (mpt->verbose >= MPT_PRT_DEBUG) { 4942 int i; 4943 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 4944 atiop->ccb_h.target_lun); 4945 for (i = 0; i < atiop->cdb_len; i++) { 4946 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 4947 (i == (atiop->cdb_len - 1))? '>' : ' '); 4948 } 4949 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 4950 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 4951 } 4952 4953 MPTLOCK_2_CAMLOCK(mpt); 4954 xpt_done((union ccb *)atiop); 4955 CAMLOCK_2_MPTLOCK(mpt); 4956 } 4957 4958 static void 4959 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 4960 { 4961 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 4962 4963 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 4964 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 4965 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 4966 tgt->tag_id, tgt->state); 4967 } 4968 4969 static void 4970 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 4971 { 4972 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 4973 req->index, req->index, req->state); 4974 mpt_tgt_dump_tgt_state(mpt, req); 4975 } 4976 4977 static int 4978 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 4979 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 4980 { 4981 int dbg; 4982 union ccb *ccb; 4983 U16 status; 4984 4985 if (reply_frame == NULL) { 4986 /* 4987 * Figure out what the state of the command is. 4988 */ 4989 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 4990 4991 #ifdef INVARIANTS 4992 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 4993 if (tgt->req) { 4994 mpt_req_not_spcl(mpt, tgt->req, 4995 "turbo scsi_tgt_reply associated req", __LINE__); 4996 } 4997 #endif 4998 switch(tgt->state) { 4999 case TGT_STATE_LOADED: 5000 /* 5001 * This is a new command starting. 5002 */ 5003 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5004 break; 5005 case TGT_STATE_MOVING_DATA: 5006 { 5007 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 5008 5009 ccb = tgt->ccb; 5010 if (tgt->req == NULL) { 5011 panic("mpt: turbo target reply with null " 5012 "associated request moving data"); 5013 /* NOTREACHED */ 5014 } 5015 if (ccb == NULL) { 5016 if (tgt->is_local == 0) { 5017 panic("mpt: turbo target reply with " 5018 "null associated ccb moving data"); 5019 /* NOTREACHED */ 5020 } 5021 mpt_lprt(mpt, MPT_PRT_DEBUG, 5022 "TARGET_ASSIST local done\n"); 5023 TAILQ_REMOVE(&mpt->request_pending_list, 5024 tgt->req, links); 5025 mpt_free_request(mpt, tgt->req); 5026 tgt->req = NULL; 5027 mpt_scsi_tgt_status(mpt, NULL, req, 5028 0, NULL); 5029 return (TRUE); 5030 } 5031 tgt->ccb = NULL; 5032 tgt->nxfers++; 5033 untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch); 5034 mpt_lprt(mpt, MPT_PRT_DEBUG, 5035 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5036 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5037 /* 5038 * Free the Target Assist Request 5039 */ 5040 KASSERT(tgt->req->ccb == ccb, 5041 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5042 tgt->req->serno, tgt->req->ccb)); 5043 TAILQ_REMOVE(&mpt->request_pending_list, 5044 tgt->req, links); 5045 mpt_free_request(mpt, tgt->req); 5046 tgt->req = NULL; 5047 5048 /* 5049 * Do we need to send status now? That is, are 5050 * we done with all our data transfers? 5051 */ 5052 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5053 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5054 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5055 KASSERT(ccb->ccb_h.status, 5056 ("zero ccb sts at %d\n", __LINE__)); 5057 tgt->state = TGT_STATE_IN_CAM; 5058 if (mpt->outofbeer) { 5059 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5060 mpt->outofbeer = 0; 5061 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5062 } 5063 MPTLOCK_2_CAMLOCK(mpt); 5064 xpt_done(ccb); 5065 CAMLOCK_2_MPTLOCK(mpt); 5066 break; 5067 } 5068 /* 5069 * Otherwise, send status (and sense) 5070 */ 5071 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5072 sp = sense; 5073 memcpy(sp, &ccb->csio.sense_data, 5074 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5075 } 5076 mpt_scsi_tgt_status(mpt, ccb, req, 5077 ccb->csio.scsi_status, sp); 5078 break; 5079 } 5080 case TGT_STATE_SENDING_STATUS: 5081 case TGT_STATE_MOVING_DATA_AND_STATUS: 5082 { 5083 int ioindex; 5084 ccb = tgt->ccb; 5085 5086 if (tgt->req == NULL) { 5087 panic("mpt: turbo target reply with null " 5088 "associated request sending status"); 5089 /* NOTREACHED */ 5090 } 5091 5092 if (ccb) { 5093 tgt->ccb = NULL; 5094 if (tgt->state == 5095 TGT_STATE_MOVING_DATA_AND_STATUS) { 5096 tgt->nxfers++; 5097 } 5098 untimeout(mpt_timeout, ccb, 5099 ccb->ccb_h.timeout_ch); 5100 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5101 ccb->ccb_h.status |= CAM_SENT_SENSE; 5102 } 5103 mpt_lprt(mpt, MPT_PRT_DEBUG, 5104 "TARGET_STATUS tag %x sts %x flgs %x req " 5105 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5106 ccb->ccb_h.flags, tgt->req); 5107 /* 5108 * Free the Target Send Status Request 5109 */ 5110 KASSERT(tgt->req->ccb == ccb, 5111 ("tgt->req %p:%u tgt->req->ccb %p", 5112 tgt->req, tgt->req->serno, tgt->req->ccb)); 5113 /* 5114 * Notify CAM that we're done 5115 */ 5116 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5117 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5118 KASSERT(ccb->ccb_h.status, 5119 ("ZERO ccb sts at %d\n", __LINE__)); 5120 tgt->ccb = NULL; 5121 } else { 5122 mpt_lprt(mpt, MPT_PRT_DEBUG, 5123 "TARGET_STATUS non-CAM for req %p:%u\n", 5124 tgt->req, tgt->req->serno); 5125 } 5126 TAILQ_REMOVE(&mpt->request_pending_list, 5127 tgt->req, links); 5128 mpt_free_request(mpt, tgt->req); 5129 tgt->req = NULL; 5130 5131 /* 5132 * And re-post the Command Buffer. 5133 * This will reset the state. 5134 */ 5135 ioindex = GET_IO_INDEX(reply_desc); 5136 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5137 tgt->is_local = 0; 5138 mpt_post_target_command(mpt, req, ioindex); 5139 5140 /* 5141 * And post a done for anyone who cares 5142 */ 5143 if (ccb) { 5144 if (mpt->outofbeer) { 5145 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5146 mpt->outofbeer = 0; 5147 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5148 } 5149 MPTLOCK_2_CAMLOCK(mpt); 5150 xpt_done(ccb); 5151 CAMLOCK_2_MPTLOCK(mpt); 5152 } 5153 break; 5154 } 5155 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5156 tgt->state = TGT_STATE_LOADED; 5157 break; 5158 default: 5159 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5160 "Reply Function\n", tgt->state); 5161 } 5162 return (TRUE); 5163 } 5164 5165 status = le16toh(reply_frame->IOCStatus); 5166 if (status != MPI_IOCSTATUS_SUCCESS) { 5167 dbg = MPT_PRT_ERROR; 5168 } else { 5169 dbg = MPT_PRT_DEBUG1; 5170 } 5171 5172 mpt_lprt(mpt, dbg, 5173 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5174 req, req->serno, reply_frame, reply_frame->Function, status); 5175 5176 switch (reply_frame->Function) { 5177 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5178 { 5179 mpt_tgt_state_t *tgt; 5180 #ifdef INVARIANTS 5181 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5182 #endif 5183 if (status != MPI_IOCSTATUS_SUCCESS) { 5184 /* 5185 * XXX What to do? 5186 */ 5187 break; 5188 } 5189 tgt = MPT_TGT_STATE(mpt, req); 5190 KASSERT(tgt->state == TGT_STATE_LOADING, 5191 ("bad state 0x%x on reply to buffer post\n", tgt->state)); 5192 mpt_assign_serno(mpt, req); 5193 tgt->state = TGT_STATE_LOADED; 5194 break; 5195 } 5196 case MPI_FUNCTION_TARGET_ASSIST: 5197 #ifdef INVARIANTS 5198 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5199 #endif 5200 mpt_prt(mpt, "target assist completion\n"); 5201 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5202 mpt_free_request(mpt, req); 5203 break; 5204 case MPI_FUNCTION_TARGET_STATUS_SEND: 5205 #ifdef INVARIANTS 5206 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5207 #endif 5208 mpt_prt(mpt, "status send completion\n"); 5209 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5210 mpt_free_request(mpt, req); 5211 break; 5212 case MPI_FUNCTION_TARGET_MODE_ABORT: 5213 { 5214 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5215 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5216 PTR_MSG_TARGET_MODE_ABORT abtp = 5217 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5218 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5219 #ifdef INVARIANTS 5220 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5221 #endif 5222 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5223 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5224 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5225 mpt_free_request(mpt, req); 5226 break; 5227 } 5228 default: 5229 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5230 "0x%x\n", reply_frame->Function); 5231 break; 5232 } 5233 return (TRUE); 5234 } 5235