1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 */ 96 #include <sys/cdefs.h> 97 __FBSDID("$FreeBSD$"); 98 99 #include <dev/mpt/mpt.h> 100 #include <dev/mpt/mpt_cam.h> 101 #include <dev/mpt/mpt_raid.h> 102 103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/mpt/mpilib/mpi_init.h" 105 #include "dev/mpt/mpilib/mpi_targ.h" 106 #include "dev/mpt/mpilib/mpi_fc.h" 107 #if __FreeBSD_version >= 500000 108 #include <sys/sysctl.h> 109 #endif 110 #include <sys/callout.h> 111 #include <sys/kthread.h> 112 113 #if __FreeBSD_version >= 700000 114 #ifndef CAM_NEW_TRAN_CODE 115 #define CAM_NEW_TRAN_CODE 1 116 #endif 117 #endif 118 119 static void mpt_poll(struct cam_sim *); 120 static timeout_t mpt_timeout; 121 static void mpt_action(struct cam_sim *, union ccb *); 122 static int 123 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 124 static void mpt_setwidth(struct mpt_softc *, int, int); 125 static void mpt_setsync(struct mpt_softc *, int, int, int); 126 static int mpt_update_spi_config(struct mpt_softc *, int); 127 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); 128 129 static mpt_reply_handler_t mpt_scsi_reply_handler; 130 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 131 static mpt_reply_handler_t mpt_fc_els_reply_handler; 132 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 133 MSG_DEFAULT_REPLY *); 134 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 135 static int mpt_fc_reset_link(struct mpt_softc *, int); 136 137 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 138 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 139 static void mpt_recovery_thread(void *arg); 140 static void mpt_recover_commands(struct mpt_softc *mpt); 141 142 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 143 u_int, u_int, u_int, int); 144 145 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 146 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 147 static int mpt_add_els_buffers(struct mpt_softc *mpt); 148 static int mpt_add_target_commands(struct mpt_softc *mpt); 149 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 150 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 151 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 152 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 153 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 154 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 155 uint8_t, uint8_t const *); 156 static void 157 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 158 tgt_resource_t *, int); 159 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 160 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 161 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 162 163 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 164 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 165 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 166 167 static mpt_probe_handler_t mpt_cam_probe; 168 static mpt_attach_handler_t mpt_cam_attach; 169 static mpt_enable_handler_t mpt_cam_enable; 170 static mpt_ready_handler_t mpt_cam_ready; 171 static mpt_event_handler_t mpt_cam_event; 172 static mpt_reset_handler_t mpt_cam_ioc_reset; 173 static mpt_detach_handler_t mpt_cam_detach; 174 175 static struct mpt_personality mpt_cam_personality = 176 { 177 .name = "mpt_cam", 178 .probe = mpt_cam_probe, 179 .attach = mpt_cam_attach, 180 .enable = mpt_cam_enable, 181 .ready = mpt_cam_ready, 182 .event = mpt_cam_event, 183 .reset = mpt_cam_ioc_reset, 184 .detach = mpt_cam_detach, 185 }; 186 187 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 188 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 189 190 int 191 mpt_cam_probe(struct mpt_softc *mpt) 192 { 193 int role; 194 195 /* 196 * Only attach to nodes that support the initiator or target role 197 * (or want to) or have RAID physical devices that need CAM pass-thru 198 * support. 199 */ 200 if (mpt->do_cfg_role) { 201 role = mpt->cfg_role; 202 } else { 203 role = mpt->role; 204 } 205 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 206 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 207 return (0); 208 } 209 return (ENODEV); 210 } 211 212 int 213 mpt_cam_attach(struct mpt_softc *mpt) 214 { 215 struct cam_devq *devq; 216 mpt_handler_t handler; 217 int maxq; 218 int error; 219 220 TAILQ_INIT(&mpt->request_timeout_list); 221 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 222 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 223 224 handler.reply_handler = mpt_scsi_reply_handler; 225 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 226 &scsi_io_handler_id); 227 if (error != 0) { 228 goto cleanup0; 229 } 230 231 handler.reply_handler = mpt_scsi_tmf_reply_handler; 232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 233 &scsi_tmf_handler_id); 234 if (error != 0) { 235 goto cleanup0; 236 } 237 238 /* 239 * If we're fibre channel and could support target mode, we register 240 * an ELS reply handler and give it resources. 241 */ 242 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 243 handler.reply_handler = mpt_fc_els_reply_handler; 244 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 245 &fc_els_handler_id); 246 if (error != 0) { 247 goto cleanup0; 248 } 249 if (mpt_add_els_buffers(mpt) == FALSE) { 250 error = ENOMEM; 251 goto cleanup0; 252 } 253 maxq -= mpt->els_cmds_allocated; 254 } 255 256 /* 257 * If we support target mode, we register a reply handler for it, 258 * but don't add command resources until we actually enable target 259 * mode. 260 */ 261 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 262 handler.reply_handler = mpt_scsi_tgt_reply_handler; 263 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 264 &mpt->scsi_tgt_handler_id); 265 if (error != 0) { 266 goto cleanup0; 267 } 268 } 269 270 /* 271 * We keep one request reserved for timeout TMF requests. 272 */ 273 mpt->tmf_req = mpt_get_request(mpt, FALSE); 274 if (mpt->tmf_req == NULL) { 275 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 276 error = ENOMEM; 277 goto cleanup0; 278 } 279 280 /* 281 * Mark the request as free even though not on the free list. 282 * There is only one TMF request allowed to be outstanding at 283 * a time and the TMF routines perform their own allocation 284 * tracking using the standard state flags. 285 */ 286 mpt->tmf_req->state = REQ_STATE_FREE; 287 maxq--; 288 289 if (mpt_spawn_recovery_thread(mpt) != 0) { 290 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 291 error = ENOMEM; 292 goto cleanup0; 293 } 294 295 /* 296 * The rest of this is CAM foo, for which we need to drop our lock 297 */ 298 MPTLOCK_2_CAMLOCK(mpt); 299 300 /* 301 * Create the device queue for our SIM(s). 302 */ 303 devq = cam_simq_alloc(maxq); 304 if (devq == NULL) { 305 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 306 error = ENOMEM; 307 goto cleanup; 308 } 309 310 /* 311 * Construct our SIM entry. 312 */ 313 mpt->sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 314 mpt->unit, 1, maxq, devq); 315 if (mpt->sim == NULL) { 316 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 317 cam_simq_free(devq); 318 error = ENOMEM; 319 goto cleanup; 320 } 321 322 /* 323 * Register exactly this bus. 324 */ 325 if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) { 326 mpt_prt(mpt, "Bus registration Failed!\n"); 327 error = ENOMEM; 328 goto cleanup; 329 } 330 331 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 332 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 333 mpt_prt(mpt, "Unable to allocate Path!\n"); 334 error = ENOMEM; 335 goto cleanup; 336 } 337 338 /* 339 * Only register a second bus for RAID physical 340 * devices if the controller supports RAID. 341 */ 342 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 343 CAMLOCK_2_MPTLOCK(mpt); 344 return (0); 345 } 346 347 /* 348 * Create a "bus" to export all hidden disks to CAM. 349 */ 350 mpt->phydisk_sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 351 mpt->unit, 1, maxq, devq); 352 if (mpt->phydisk_sim == NULL) { 353 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 354 error = ENOMEM; 355 goto cleanup; 356 } 357 358 /* 359 * Register this bus. 360 */ 361 if (xpt_bus_register(mpt->phydisk_sim, 1) != CAM_SUCCESS) { 362 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 363 error = ENOMEM; 364 goto cleanup; 365 } 366 367 if (xpt_create_path(&mpt->phydisk_path, NULL, 368 cam_sim_path(mpt->phydisk_sim), 369 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 370 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 371 error = ENOMEM; 372 goto cleanup; 373 } 374 CAMLOCK_2_MPTLOCK(mpt); 375 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 376 return (0); 377 378 cleanup: 379 CAMLOCK_2_MPTLOCK(mpt); 380 cleanup0: 381 mpt_cam_detach(mpt); 382 return (error); 383 } 384 385 /* 386 * Read FC configuration information 387 */ 388 static int 389 mpt_read_config_info_fc(struct mpt_softc *mpt) 390 { 391 char *topology = NULL; 392 int rv; 393 394 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 395 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 396 if (rv) { 397 return (-1); 398 } 399 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 400 mpt->mpt_fcport_page0.Header.PageVersion, 401 mpt->mpt_fcport_page0.Header.PageLength, 402 mpt->mpt_fcport_page0.Header.PageNumber, 403 mpt->mpt_fcport_page0.Header.PageType); 404 405 406 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 407 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 408 if (rv) { 409 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 410 return (-1); 411 } 412 413 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 414 415 switch (mpt->mpt_fcport_page0.Flags & 416 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 417 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 418 mpt->mpt_fcport_speed = 0; 419 topology = "<NO LOOP>"; 420 break; 421 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 422 topology = "N-Port"; 423 break; 424 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 425 topology = "NL-Port"; 426 break; 427 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 428 topology = "F-Port"; 429 break; 430 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 431 topology = "FL-Port"; 432 break; 433 default: 434 mpt->mpt_fcport_speed = 0; 435 topology = "?"; 436 break; 437 } 438 439 mpt_lprt(mpt, MPT_PRT_INFO, 440 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 441 "Speed %u-Gbit\n", topology, 442 mpt->mpt_fcport_page0.WWNN.High, 443 mpt->mpt_fcport_page0.WWNN.Low, 444 mpt->mpt_fcport_page0.WWPN.High, 445 mpt->mpt_fcport_page0.WWPN.Low, 446 mpt->mpt_fcport_speed); 447 #if __FreeBSD_version >= 500000 448 { 449 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 450 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 451 452 snprintf(mpt->scinfo.fc.wwnn, 453 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 454 mpt->mpt_fcport_page0.WWNN.High, 455 mpt->mpt_fcport_page0.WWNN.Low); 456 457 snprintf(mpt->scinfo.fc.wwpn, 458 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 459 mpt->mpt_fcport_page0.WWPN.High, 460 mpt->mpt_fcport_page0.WWPN.Low); 461 462 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 463 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 464 "World Wide Node Name"); 465 466 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 467 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 468 "World Wide Port Name"); 469 470 } 471 #endif 472 return (0); 473 } 474 475 /* 476 * Set FC configuration information. 477 */ 478 static int 479 mpt_set_initial_config_fc(struct mpt_softc *mpt) 480 { 481 482 CONFIG_PAGE_FC_PORT_1 fc; 483 U32 fl; 484 int r, doit = 0; 485 int role; 486 487 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 488 &fc.Header, FALSE, 5000); 489 if (r) { 490 mpt_prt(mpt, "failed to read FC page 1 header\n"); 491 return (mpt_fc_reset_link(mpt, 1)); 492 } 493 494 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 495 &fc.Header, sizeof (fc), FALSE, 5000); 496 if (r) { 497 mpt_prt(mpt, "failed to read FC page 1\n"); 498 return (mpt_fc_reset_link(mpt, 1)); 499 } 500 501 /* 502 * Check our flags to make sure we support the role we want. 503 */ 504 doit = 0; 505 role = 0; 506 fl = le32toh(fc.Flags);; 507 508 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 509 role |= MPT_ROLE_INITIATOR; 510 } 511 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 512 role |= MPT_ROLE_TARGET; 513 } 514 515 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 516 517 if (mpt->do_cfg_role == 0) { 518 role = mpt->cfg_role; 519 } else { 520 mpt->do_cfg_role = 0; 521 } 522 523 if (role != mpt->cfg_role) { 524 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 525 if ((role & MPT_ROLE_INITIATOR) == 0) { 526 mpt_prt(mpt, "adding initiator role\n"); 527 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 528 doit++; 529 } else { 530 mpt_prt(mpt, "keeping initiator role\n"); 531 } 532 } else if (role & MPT_ROLE_INITIATOR) { 533 mpt_prt(mpt, "removing initiator role\n"); 534 doit++; 535 } 536 if (mpt->cfg_role & MPT_ROLE_TARGET) { 537 if ((role & MPT_ROLE_TARGET) == 0) { 538 mpt_prt(mpt, "adding target role\n"); 539 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 540 doit++; 541 } else { 542 mpt_prt(mpt, "keeping target role\n"); 543 } 544 } else if (role & MPT_ROLE_TARGET) { 545 mpt_prt(mpt, "removing target role\n"); 546 doit++; 547 } 548 mpt->role = mpt->cfg_role; 549 } 550 551 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 552 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 553 mpt_prt(mpt, "adding OXID option\n"); 554 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 555 doit++; 556 } 557 } 558 559 if (doit) { 560 fc.Flags = htole32(fl); 561 r = mpt_write_cfg_page(mpt, 562 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 563 sizeof(fc), FALSE, 5000); 564 if (r != 0) { 565 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 566 return (0); 567 } 568 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 569 "effect until next reboot or IOC reset\n"); 570 } 571 return (0); 572 } 573 574 /* 575 * Read SAS configuration information. Nothing to do yet. 576 */ 577 static int 578 mpt_read_config_info_sas(struct mpt_softc *mpt) 579 { 580 return (0); 581 } 582 583 /* 584 * Set SAS configuration information. Nothing to do yet. 585 */ 586 static int 587 mpt_set_initial_config_sas(struct mpt_softc *mpt) 588 { 589 return (0); 590 } 591 592 /* 593 * Read SCSI configuration information 594 */ 595 static int 596 mpt_read_config_info_spi(struct mpt_softc *mpt) 597 { 598 int rv, i; 599 600 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 601 &mpt->mpt_port_page0.Header, FALSE, 5000); 602 if (rv) { 603 return (-1); 604 } 605 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 606 mpt->mpt_port_page0.Header.PageVersion, 607 mpt->mpt_port_page0.Header.PageLength, 608 mpt->mpt_port_page0.Header.PageNumber, 609 mpt->mpt_port_page0.Header.PageType); 610 611 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 612 &mpt->mpt_port_page1.Header, FALSE, 5000); 613 if (rv) { 614 return (-1); 615 } 616 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 617 mpt->mpt_port_page1.Header.PageVersion, 618 mpt->mpt_port_page1.Header.PageLength, 619 mpt->mpt_port_page1.Header.PageNumber, 620 mpt->mpt_port_page1.Header.PageType); 621 622 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 623 &mpt->mpt_port_page2.Header, FALSE, 5000); 624 if (rv) { 625 return (-1); 626 } 627 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 628 mpt->mpt_port_page2.Header.PageVersion, 629 mpt->mpt_port_page2.Header.PageLength, 630 mpt->mpt_port_page2.Header.PageNumber, 631 mpt->mpt_port_page2.Header.PageType); 632 633 for (i = 0; i < 16; i++) { 634 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 635 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 636 if (rv) { 637 return (-1); 638 } 639 mpt_lprt(mpt, MPT_PRT_DEBUG, 640 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 641 mpt->mpt_dev_page0[i].Header.PageVersion, 642 mpt->mpt_dev_page0[i].Header.PageLength, 643 mpt->mpt_dev_page0[i].Header.PageNumber, 644 mpt->mpt_dev_page0[i].Header.PageType); 645 646 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 647 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 648 if (rv) { 649 return (-1); 650 } 651 mpt_lprt(mpt, MPT_PRT_DEBUG, 652 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 653 mpt->mpt_dev_page1[i].Header.PageVersion, 654 mpt->mpt_dev_page1[i].Header.PageLength, 655 mpt->mpt_dev_page1[i].Header.PageNumber, 656 mpt->mpt_dev_page1[i].Header.PageType); 657 } 658 659 /* 660 * At this point, we don't *have* to fail. As long as we have 661 * valid config header information, we can (barely) lurch 662 * along. 663 */ 664 665 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 666 sizeof(mpt->mpt_port_page0), FALSE, 5000); 667 if (rv) { 668 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 669 } else { 670 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 671 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 672 mpt->mpt_port_page0.Capabilities, 673 mpt->mpt_port_page0.PhysicalInterface); 674 } 675 676 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 677 sizeof(mpt->mpt_port_page1), FALSE, 5000); 678 if (rv) { 679 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 680 } else { 681 mpt_lprt(mpt, MPT_PRT_DEBUG, 682 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 683 mpt->mpt_port_page1.Configuration, 684 mpt->mpt_port_page1.OnBusTimerValue); 685 } 686 687 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 688 sizeof(mpt->mpt_port_page2), FALSE, 5000); 689 if (rv) { 690 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 691 } else { 692 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 693 "Port Page 2: Flags %x Settings %x\n", 694 mpt->mpt_port_page2.PortFlags, 695 mpt->mpt_port_page2.PortSettings); 696 for (i = 0; i < 16; i++) { 697 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 698 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 699 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 700 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 701 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 702 } 703 } 704 705 for (i = 0; i < 16; i++) { 706 rv = mpt_read_cur_cfg_page(mpt, i, 707 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 708 FALSE, 5000); 709 if (rv) { 710 mpt_prt(mpt, 711 "cannot read SPI Target %d Device Page 0\n", i); 712 continue; 713 } 714 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 715 "target %d page 0: Negotiated Params %x Information %x\n", 716 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 717 mpt->mpt_dev_page0[i].Information); 718 719 rv = mpt_read_cur_cfg_page(mpt, i, 720 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 721 FALSE, 5000); 722 if (rv) { 723 mpt_prt(mpt, 724 "cannot read SPI Target %d Device Page 1\n", i); 725 continue; 726 } 727 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 728 "target %d page 1: Requested Params %x Configuration %x\n", 729 i, mpt->mpt_dev_page1[i].RequestedParameters, 730 mpt->mpt_dev_page1[i].Configuration); 731 } 732 return (0); 733 } 734 735 /* 736 * Validate SPI configuration information. 737 * 738 * In particular, validate SPI Port Page 1. 739 */ 740 static int 741 mpt_set_initial_config_spi(struct mpt_softc *mpt) 742 { 743 int i, j, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id; 744 int error; 745 746 mpt->mpt_disc_enable = 0xff; 747 mpt->mpt_tag_enable = 0; 748 749 if (mpt->mpt_port_page1.Configuration != pp1val) { 750 CONFIG_PAGE_SCSI_PORT_1 tmp; 751 752 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 753 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 754 tmp = mpt->mpt_port_page1; 755 tmp.Configuration = pp1val; 756 error = mpt_write_cur_cfg_page(mpt, 0, 757 &tmp.Header, sizeof(tmp), FALSE, 5000); 758 if (error) { 759 return (-1); 760 } 761 error = mpt_read_cur_cfg_page(mpt, 0, 762 &tmp.Header, sizeof(tmp), FALSE, 5000); 763 if (error) { 764 return (-1); 765 } 766 if (tmp.Configuration != pp1val) { 767 mpt_prt(mpt, 768 "failed to reset SPI Port Page 1 Config value\n"); 769 return (-1); 770 } 771 mpt->mpt_port_page1 = tmp; 772 } 773 774 /* 775 * The purpose of this exercise is to get 776 * all targets back to async/narrow. 777 * 778 * We skip this step if the BIOS has already negotiated 779 * speeds with the targets and does not require us to 780 * do Domain Validation. 781 */ 782 i = mpt->mpt_port_page2.PortSettings & 783 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 784 j = mpt->mpt_port_page2.PortFlags & 785 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 786 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS /* && 787 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV */) { 788 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 789 "honoring BIOS transfer negotiations\n"); 790 } else { 791 for (i = 0; i < 16; i++) { 792 mpt->mpt_dev_page1[i].RequestedParameters = 0; 793 mpt->mpt_dev_page1[i].Configuration = 0; 794 (void) mpt_update_spi_config(mpt, i); 795 } 796 } 797 return (0); 798 } 799 800 int 801 mpt_cam_enable(struct mpt_softc *mpt) 802 { 803 if (mpt->is_fc) { 804 if (mpt_read_config_info_fc(mpt)) { 805 return (EIO); 806 } 807 if (mpt_set_initial_config_fc(mpt)) { 808 return (EIO); 809 } 810 } else if (mpt->is_sas) { 811 if (mpt_read_config_info_sas(mpt)) { 812 return (EIO); 813 } 814 if (mpt_set_initial_config_sas(mpt)) { 815 return (EIO); 816 } 817 } else if (mpt->is_spi) { 818 if (mpt_read_config_info_spi(mpt)) { 819 return (EIO); 820 } 821 if (mpt_set_initial_config_spi(mpt)) { 822 return (EIO); 823 } 824 } 825 return (0); 826 } 827 828 void 829 mpt_cam_ready(struct mpt_softc *mpt) 830 { 831 /* 832 * If we're in target mode, hang out resources now 833 * so we don't cause the world to hang talking to us. 834 */ 835 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 836 /* 837 * Try to add some target command resources 838 */ 839 MPT_LOCK(mpt); 840 if (mpt_add_target_commands(mpt) == FALSE) { 841 mpt_prt(mpt, "failed to add target commands\n"); 842 } 843 MPT_UNLOCK(mpt); 844 } 845 } 846 847 void 848 mpt_cam_detach(struct mpt_softc *mpt) 849 { 850 mpt_handler_t handler; 851 852 mpt_terminate_recovery_thread(mpt); 853 854 handler.reply_handler = mpt_scsi_reply_handler; 855 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 856 scsi_io_handler_id); 857 handler.reply_handler = mpt_scsi_tmf_reply_handler; 858 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 859 scsi_tmf_handler_id); 860 handler.reply_handler = mpt_fc_els_reply_handler; 861 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 862 fc_els_handler_id); 863 handler.reply_handler = mpt_scsi_tgt_reply_handler; 864 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 865 mpt->scsi_tgt_handler_id); 866 867 if (mpt->tmf_req != NULL) { 868 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 869 mpt_free_request(mpt, mpt->tmf_req); 870 mpt->tmf_req = NULL; 871 } 872 873 if (mpt->sim != NULL) { 874 MPTLOCK_2_CAMLOCK(mpt); 875 xpt_free_path(mpt->path); 876 xpt_bus_deregister(cam_sim_path(mpt->sim)); 877 cam_sim_free(mpt->sim, TRUE); 878 mpt->sim = NULL; 879 CAMLOCK_2_MPTLOCK(mpt); 880 } 881 882 if (mpt->phydisk_sim != NULL) { 883 MPTLOCK_2_CAMLOCK(mpt); 884 xpt_free_path(mpt->phydisk_path); 885 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 886 cam_sim_free(mpt->phydisk_sim, TRUE); 887 mpt->phydisk_sim = NULL; 888 CAMLOCK_2_MPTLOCK(mpt); 889 } 890 } 891 892 /* This routine is used after a system crash to dump core onto the swap device. 893 */ 894 static void 895 mpt_poll(struct cam_sim *sim) 896 { 897 struct mpt_softc *mpt; 898 899 mpt = (struct mpt_softc *)cam_sim_softc(sim); 900 MPT_LOCK(mpt); 901 mpt_intr(mpt); 902 MPT_UNLOCK(mpt); 903 } 904 905 /* 906 * Watchdog timeout routine for SCSI requests. 907 */ 908 static void 909 mpt_timeout(void *arg) 910 { 911 union ccb *ccb; 912 struct mpt_softc *mpt; 913 request_t *req; 914 915 ccb = (union ccb *)arg; 916 mpt = ccb->ccb_h.ccb_mpt_ptr; 917 918 MPT_LOCK(mpt); 919 req = ccb->ccb_h.ccb_req_ptr; 920 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 921 req->serno, ccb, req->ccb); 922 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 923 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 924 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 925 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 926 req->state |= REQ_STATE_TIMEDOUT; 927 mpt_wakeup_recovery_thread(mpt); 928 } 929 MPT_UNLOCK(mpt); 930 } 931 932 /* 933 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 934 * 935 * Takes a list of physical segments and builds the SGL for SCSI IO command 936 * and forwards the commard to the IOC after one last check that CAM has not 937 * aborted the transaction. 938 */ 939 static void 940 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 941 { 942 request_t *req, *trq; 943 char *mpt_off; 944 union ccb *ccb; 945 struct mpt_softc *mpt; 946 int seg, first_lim; 947 uint32_t flags, nxt_off; 948 void *sglp = NULL; 949 MSG_REQUEST_HEADER *hdrp; 950 SGE_SIMPLE64 *se; 951 SGE_CHAIN64 *ce; 952 int istgt = 0; 953 954 req = (request_t *)arg; 955 ccb = req->ccb; 956 957 mpt = ccb->ccb_h.ccb_mpt_ptr; 958 req = ccb->ccb_h.ccb_req_ptr; 959 960 hdrp = req->req_vbuf; 961 mpt_off = req->req_vbuf; 962 963 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 964 error = EFBIG; 965 } 966 967 if (error == 0) { 968 switch (hdrp->Function) { 969 case MPI_FUNCTION_SCSI_IO_REQUEST: 970 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 971 istgt = 0; 972 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 973 break; 974 case MPI_FUNCTION_TARGET_ASSIST: 975 istgt = 1; 976 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 977 break; 978 default: 979 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 980 hdrp->Function); 981 error = EINVAL; 982 break; 983 } 984 } 985 986 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 987 error = EFBIG; 988 mpt_prt(mpt, "segment count %d too large (max %u)\n", 989 nseg, mpt->max_seg_cnt); 990 } 991 992 bad: 993 if (error != 0) { 994 if (error != EFBIG && error != ENOMEM) { 995 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 996 } 997 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 998 cam_status status; 999 mpt_freeze_ccb(ccb); 1000 if (error == EFBIG) { 1001 status = CAM_REQ_TOO_BIG; 1002 } else if (error == ENOMEM) { 1003 if (mpt->outofbeer == 0) { 1004 mpt->outofbeer = 1; 1005 xpt_freeze_simq(mpt->sim, 1); 1006 mpt_lprt(mpt, MPT_PRT_DEBUG, 1007 "FREEZEQ\n"); 1008 } 1009 status = CAM_REQUEUE_REQ; 1010 } else { 1011 status = CAM_REQ_CMP_ERR; 1012 } 1013 mpt_set_ccb_status(ccb, status); 1014 } 1015 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1016 request_t *cmd_req = 1017 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1018 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1019 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1020 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1021 } 1022 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1023 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1024 xpt_done(ccb); 1025 CAMLOCK_2_MPTLOCK(mpt); 1026 mpt_free_request(mpt, req); 1027 MPTLOCK_2_CAMLOCK(mpt); 1028 return; 1029 } 1030 1031 /* 1032 * No data to transfer? 1033 * Just make a single simple SGL with zero length. 1034 */ 1035 1036 if (mpt->verbose >= MPT_PRT_DEBUG) { 1037 int tidx = ((char *)sglp) - mpt_off; 1038 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1039 } 1040 1041 if (nseg == 0) { 1042 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1043 MPI_pSGE_SET_FLAGS(se1, 1044 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1045 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1046 se1->FlagsLength = htole32(se1->FlagsLength); 1047 goto out; 1048 } 1049 1050 1051 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1052 if (istgt == 0) { 1053 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1054 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1055 } 1056 } else { 1057 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1058 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1059 } 1060 } 1061 1062 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1063 bus_dmasync_op_t op; 1064 if (istgt == 0) { 1065 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1066 op = BUS_DMASYNC_PREREAD; 1067 } else { 1068 op = BUS_DMASYNC_PREWRITE; 1069 } 1070 } else { 1071 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1072 op = BUS_DMASYNC_PREWRITE; 1073 } else { 1074 op = BUS_DMASYNC_PREREAD; 1075 } 1076 } 1077 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1078 } 1079 1080 /* 1081 * Okay, fill in what we can at the end of the command frame. 1082 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1083 * the command frame. 1084 * 1085 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1086 * SIMPLE64 pointers and start doing CHAIN64 entries after 1087 * that. 1088 */ 1089 1090 if (nseg < MPT_NSGL_FIRST(mpt)) { 1091 first_lim = nseg; 1092 } else { 1093 /* 1094 * Leave room for CHAIN element 1095 */ 1096 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1097 } 1098 1099 se = (SGE_SIMPLE64 *) sglp; 1100 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1101 uint32_t tf; 1102 1103 memset(se, 0, sizeof (*se)); 1104 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1105 if (sizeof(bus_addr_t) > 4) { 1106 se->Address.High = ((uint64_t) dm_segs->ds_addr) >> 32; 1107 } 1108 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1109 tf = flags; 1110 if (seg == first_lim - 1) { 1111 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1112 } 1113 if (seg == nseg - 1) { 1114 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1115 MPI_SGE_FLAGS_END_OF_BUFFER; 1116 } 1117 MPI_pSGE_SET_FLAGS(se, tf); 1118 se->FlagsLength = htole32(se->FlagsLength); 1119 } 1120 1121 if (seg == nseg) { 1122 goto out; 1123 } 1124 1125 /* 1126 * Tell the IOC where to find the first chain element. 1127 */ 1128 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1129 nxt_off = MPT_RQSL(mpt); 1130 trq = req; 1131 1132 /* 1133 * Make up the rest of the data segments out of a chain element 1134 * (contiained in the current request frame) which points to 1135 * SIMPLE64 elements in the next request frame, possibly ending 1136 * with *another* chain element (if there's more). 1137 */ 1138 while (seg < nseg) { 1139 int this_seg_lim; 1140 uint32_t tf, cur_off; 1141 bus_addr_t chain_list_addr; 1142 1143 /* 1144 * Point to the chain descriptor. Note that the chain 1145 * descriptor is at the end of the *previous* list (whether 1146 * chain or simple). 1147 */ 1148 ce = (SGE_CHAIN64 *) se; 1149 1150 /* 1151 * Before we change our current pointer, make sure we won't 1152 * overflow the request area with this frame. Note that we 1153 * test against 'greater than' here as it's okay in this case 1154 * to have next offset be just outside the request area. 1155 */ 1156 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1157 nxt_off = MPT_REQUEST_AREA; 1158 goto next_chain; 1159 } 1160 1161 /* 1162 * Set our SGE element pointer to the beginning of the chain 1163 * list and update our next chain list offset. 1164 */ 1165 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1166 cur_off = nxt_off; 1167 nxt_off += MPT_RQSL(mpt); 1168 1169 /* 1170 * Now initialized the chain descriptor. 1171 */ 1172 memset(ce, 0, sizeof (*ce)); 1173 1174 /* 1175 * Get the physical address of the chain list. 1176 */ 1177 chain_list_addr = trq->req_pbuf; 1178 chain_list_addr += cur_off; 1179 if (sizeof (bus_addr_t) > 4) { 1180 ce->Address.High = 1181 htole32((uint32_t) ((uint64_t)chain_list_addr >> 32)); 1182 } 1183 ce->Address.Low = htole32((uint32_t) chain_list_addr); 1184 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1185 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1186 1187 /* 1188 * If we have more than a frame's worth of segments left, 1189 * set up the chain list to have the last element be another 1190 * chain descriptor. 1191 */ 1192 if ((nseg - seg) > MPT_NSGL(mpt)) { 1193 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1194 /* 1195 * The length of the chain is the length in bytes of the 1196 * number of segments plus the next chain element. 1197 * 1198 * The next chain descriptor offset is the length, 1199 * in words, of the number of segments. 1200 */ 1201 ce->Length = (this_seg_lim - seg) * 1202 sizeof (SGE_SIMPLE64); 1203 ce->NextChainOffset = ce->Length >> 2; 1204 ce->Length += sizeof (SGE_CHAIN64); 1205 } else { 1206 this_seg_lim = nseg; 1207 ce->Length = (this_seg_lim - seg) * 1208 sizeof (SGE_SIMPLE64); 1209 } 1210 1211 /* 1212 * Fill in the chain list SGE elements with our segment data. 1213 * 1214 * If we're the last element in this chain list, set the last 1215 * element flag. If we're the completely last element period, 1216 * set the end of list and end of buffer flags. 1217 */ 1218 while (seg < this_seg_lim) { 1219 memset(se, 0, sizeof (*se)); 1220 se->Address.Low = htole32(dm_segs->ds_addr); 1221 if (sizeof (bus_addr_t) > 4) { 1222 se->Address.High = 1223 htole32(((uint64_t)dm_segs->ds_addr) >> 32); 1224 } 1225 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1226 tf = flags; 1227 if (seg == this_seg_lim - 1) { 1228 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1229 } 1230 if (seg == nseg - 1) { 1231 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1232 MPI_SGE_FLAGS_END_OF_BUFFER; 1233 } 1234 MPI_pSGE_SET_FLAGS(se, tf); 1235 se->FlagsLength = htole32(se->FlagsLength); 1236 se++; 1237 seg++; 1238 dm_segs++; 1239 } 1240 1241 next_chain: 1242 /* 1243 * If we have more segments to do and we've used up all of 1244 * the space in a request area, go allocate another one 1245 * and chain to that. 1246 */ 1247 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1248 request_t *nrq; 1249 1250 CAMLOCK_2_MPTLOCK(mpt); 1251 nrq = mpt_get_request(mpt, FALSE); 1252 MPTLOCK_2_CAMLOCK(mpt); 1253 1254 if (nrq == NULL) { 1255 error = ENOMEM; 1256 goto bad; 1257 } 1258 1259 /* 1260 * Append the new request area on the tail of our list. 1261 */ 1262 if ((trq = req->chain) == NULL) { 1263 req->chain = nrq; 1264 } else { 1265 while (trq->chain != NULL) { 1266 trq = trq->chain; 1267 } 1268 trq->chain = nrq; 1269 } 1270 trq = nrq; 1271 mpt_off = trq->req_vbuf; 1272 if (mpt->verbose >= MPT_PRT_DEBUG) { 1273 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1274 } 1275 nxt_off = 0; 1276 } 1277 } 1278 out: 1279 1280 /* 1281 * Last time we need to check if this CCB needs to be aborted. 1282 */ 1283 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1284 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1285 request_t *cmd_req = 1286 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1287 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1288 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1289 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1290 } 1291 mpt_prt(mpt, 1292 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1293 ccb->ccb_h.status & CAM_STATUS_MASK); 1294 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1295 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1296 } 1297 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1298 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1299 xpt_done(ccb); 1300 CAMLOCK_2_MPTLOCK(mpt); 1301 mpt_free_request(mpt, req); 1302 MPTLOCK_2_CAMLOCK(mpt); 1303 return; 1304 } 1305 1306 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1307 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1308 ccb->ccb_h.timeout_ch = 1309 timeout(mpt_timeout, (caddr_t)ccb, 1310 (ccb->ccb_h.timeout * hz) / 1000); 1311 } else { 1312 callout_handle_init(&ccb->ccb_h.timeout_ch); 1313 } 1314 if (mpt->verbose > MPT_PRT_DEBUG) { 1315 int nc = 0; 1316 mpt_print_request(req->req_vbuf); 1317 for (trq = req->chain; trq; trq = trq->chain) { 1318 printf(" Additional Chain Area %d\n", nc++); 1319 mpt_dump_sgl(trq->req_vbuf, 0); 1320 } 1321 } 1322 1323 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1324 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1325 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1326 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1327 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1328 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1329 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1330 } else { 1331 tgt->state = TGT_STATE_MOVING_DATA; 1332 } 1333 #else 1334 tgt->state = TGT_STATE_MOVING_DATA; 1335 #endif 1336 } 1337 CAMLOCK_2_MPTLOCK(mpt); 1338 mpt_send_cmd(mpt, req); 1339 MPTLOCK_2_CAMLOCK(mpt); 1340 } 1341 1342 static void 1343 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1344 { 1345 request_t *req, *trq; 1346 char *mpt_off; 1347 union ccb *ccb; 1348 struct mpt_softc *mpt; 1349 int seg, first_lim; 1350 uint32_t flags, nxt_off; 1351 void *sglp = NULL; 1352 MSG_REQUEST_HEADER *hdrp; 1353 SGE_SIMPLE32 *se; 1354 SGE_CHAIN32 *ce; 1355 int istgt = 0; 1356 1357 req = (request_t *)arg; 1358 ccb = req->ccb; 1359 1360 mpt = ccb->ccb_h.ccb_mpt_ptr; 1361 req = ccb->ccb_h.ccb_req_ptr; 1362 1363 hdrp = req->req_vbuf; 1364 mpt_off = req->req_vbuf; 1365 1366 1367 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1368 error = EFBIG; 1369 } 1370 1371 if (error == 0) { 1372 switch (hdrp->Function) { 1373 case MPI_FUNCTION_SCSI_IO_REQUEST: 1374 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1375 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1376 break; 1377 case MPI_FUNCTION_TARGET_ASSIST: 1378 istgt = 1; 1379 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1380 break; 1381 default: 1382 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1383 hdrp->Function); 1384 error = EINVAL; 1385 break; 1386 } 1387 } 1388 1389 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1390 error = EFBIG; 1391 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1392 nseg, mpt->max_seg_cnt); 1393 } 1394 1395 bad: 1396 if (error != 0) { 1397 if (error != EFBIG && error != ENOMEM) { 1398 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1399 } 1400 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1401 cam_status status; 1402 mpt_freeze_ccb(ccb); 1403 if (error == EFBIG) { 1404 status = CAM_REQ_TOO_BIG; 1405 } else if (error == ENOMEM) { 1406 if (mpt->outofbeer == 0) { 1407 mpt->outofbeer = 1; 1408 xpt_freeze_simq(mpt->sim, 1); 1409 mpt_lprt(mpt, MPT_PRT_DEBUG, 1410 "FREEZEQ\n"); 1411 } 1412 status = CAM_REQUEUE_REQ; 1413 } else { 1414 status = CAM_REQ_CMP_ERR; 1415 } 1416 mpt_set_ccb_status(ccb, status); 1417 } 1418 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1419 request_t *cmd_req = 1420 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1421 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1422 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1423 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1424 } 1425 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1426 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1427 xpt_done(ccb); 1428 CAMLOCK_2_MPTLOCK(mpt); 1429 mpt_free_request(mpt, req); 1430 MPTLOCK_2_CAMLOCK(mpt); 1431 return; 1432 } 1433 1434 /* 1435 * No data to transfer? 1436 * Just make a single simple SGL with zero length. 1437 */ 1438 1439 if (mpt->verbose >= MPT_PRT_DEBUG) { 1440 int tidx = ((char *)sglp) - mpt_off; 1441 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1442 } 1443 1444 if (nseg == 0) { 1445 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1446 MPI_pSGE_SET_FLAGS(se1, 1447 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1448 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1449 se1->FlagsLength = htole32(se1->FlagsLength); 1450 goto out; 1451 } 1452 1453 1454 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1455 if (istgt == 0) { 1456 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1457 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1458 } 1459 } else { 1460 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1461 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1462 } 1463 } 1464 1465 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1466 bus_dmasync_op_t op; 1467 if (istgt) { 1468 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1469 op = BUS_DMASYNC_PREREAD; 1470 } else { 1471 op = BUS_DMASYNC_PREWRITE; 1472 } 1473 } else { 1474 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1475 op = BUS_DMASYNC_PREWRITE; 1476 } else { 1477 op = BUS_DMASYNC_PREREAD; 1478 } 1479 } 1480 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1481 } 1482 1483 /* 1484 * Okay, fill in what we can at the end of the command frame. 1485 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1486 * the command frame. 1487 * 1488 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1489 * SIMPLE32 pointers and start doing CHAIN32 entries after 1490 * that. 1491 */ 1492 1493 if (nseg < MPT_NSGL_FIRST(mpt)) { 1494 first_lim = nseg; 1495 } else { 1496 /* 1497 * Leave room for CHAIN element 1498 */ 1499 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1500 } 1501 1502 se = (SGE_SIMPLE32 *) sglp; 1503 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1504 uint32_t tf; 1505 1506 memset(se, 0,sizeof (*se)); 1507 se->Address = dm_segs->ds_addr; 1508 1509 1510 1511 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1512 tf = flags; 1513 if (seg == first_lim - 1) { 1514 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1515 } 1516 if (seg == nseg - 1) { 1517 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1518 MPI_SGE_FLAGS_END_OF_BUFFER; 1519 } 1520 MPI_pSGE_SET_FLAGS(se, tf); 1521 se->FlagsLength = htole32(se->FlagsLength); 1522 } 1523 1524 if (seg == nseg) { 1525 goto out; 1526 } 1527 1528 /* 1529 * Tell the IOC where to find the first chain element. 1530 */ 1531 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1532 nxt_off = MPT_RQSL(mpt); 1533 trq = req; 1534 1535 /* 1536 * Make up the rest of the data segments out of a chain element 1537 * (contiained in the current request frame) which points to 1538 * SIMPLE32 elements in the next request frame, possibly ending 1539 * with *another* chain element (if there's more). 1540 */ 1541 while (seg < nseg) { 1542 int this_seg_lim; 1543 uint32_t tf, cur_off; 1544 bus_addr_t chain_list_addr; 1545 1546 /* 1547 * Point to the chain descriptor. Note that the chain 1548 * descriptor is at the end of the *previous* list (whether 1549 * chain or simple). 1550 */ 1551 ce = (SGE_CHAIN32 *) se; 1552 1553 /* 1554 * Before we change our current pointer, make sure we won't 1555 * overflow the request area with this frame. Note that we 1556 * test against 'greater than' here as it's okay in this case 1557 * to have next offset be just outside the request area. 1558 */ 1559 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1560 nxt_off = MPT_REQUEST_AREA; 1561 goto next_chain; 1562 } 1563 1564 /* 1565 * Set our SGE element pointer to the beginning of the chain 1566 * list and update our next chain list offset. 1567 */ 1568 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1569 cur_off = nxt_off; 1570 nxt_off += MPT_RQSL(mpt); 1571 1572 /* 1573 * Now initialized the chain descriptor. 1574 */ 1575 memset(ce, 0, sizeof (*ce)); 1576 1577 /* 1578 * Get the physical address of the chain list. 1579 */ 1580 chain_list_addr = trq->req_pbuf; 1581 chain_list_addr += cur_off; 1582 1583 1584 1585 ce->Address = chain_list_addr; 1586 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1587 1588 1589 /* 1590 * If we have more than a frame's worth of segments left, 1591 * set up the chain list to have the last element be another 1592 * chain descriptor. 1593 */ 1594 if ((nseg - seg) > MPT_NSGL(mpt)) { 1595 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1596 /* 1597 * The length of the chain is the length in bytes of the 1598 * number of segments plus the next chain element. 1599 * 1600 * The next chain descriptor offset is the length, 1601 * in words, of the number of segments. 1602 */ 1603 ce->Length = (this_seg_lim - seg) * 1604 sizeof (SGE_SIMPLE32); 1605 ce->NextChainOffset = ce->Length >> 2; 1606 ce->Length += sizeof (SGE_CHAIN32); 1607 } else { 1608 this_seg_lim = nseg; 1609 ce->Length = (this_seg_lim - seg) * 1610 sizeof (SGE_SIMPLE32); 1611 } 1612 1613 /* 1614 * Fill in the chain list SGE elements with our segment data. 1615 * 1616 * If we're the last element in this chain list, set the last 1617 * element flag. If we're the completely last element period, 1618 * set the end of list and end of buffer flags. 1619 */ 1620 while (seg < this_seg_lim) { 1621 memset(se, 0, sizeof (*se)); 1622 se->Address = dm_segs->ds_addr; 1623 1624 1625 1626 1627 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1628 tf = flags; 1629 if (seg == this_seg_lim - 1) { 1630 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1631 } 1632 if (seg == nseg - 1) { 1633 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1634 MPI_SGE_FLAGS_END_OF_BUFFER; 1635 } 1636 MPI_pSGE_SET_FLAGS(se, tf); 1637 se->FlagsLength = htole32(se->FlagsLength); 1638 se++; 1639 seg++; 1640 dm_segs++; 1641 } 1642 1643 next_chain: 1644 /* 1645 * If we have more segments to do and we've used up all of 1646 * the space in a request area, go allocate another one 1647 * and chain to that. 1648 */ 1649 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1650 request_t *nrq; 1651 1652 CAMLOCK_2_MPTLOCK(mpt); 1653 nrq = mpt_get_request(mpt, FALSE); 1654 MPTLOCK_2_CAMLOCK(mpt); 1655 1656 if (nrq == NULL) { 1657 error = ENOMEM; 1658 goto bad; 1659 } 1660 1661 /* 1662 * Append the new request area on the tail of our list. 1663 */ 1664 if ((trq = req->chain) == NULL) { 1665 req->chain = nrq; 1666 } else { 1667 while (trq->chain != NULL) { 1668 trq = trq->chain; 1669 } 1670 trq->chain = nrq; 1671 } 1672 trq = nrq; 1673 mpt_off = trq->req_vbuf; 1674 if (mpt->verbose >= MPT_PRT_DEBUG) { 1675 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1676 } 1677 nxt_off = 0; 1678 } 1679 } 1680 out: 1681 1682 /* 1683 * Last time we need to check if this CCB needs to be aborted. 1684 */ 1685 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1686 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1687 request_t *cmd_req = 1688 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1689 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1690 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1691 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1692 } 1693 mpt_prt(mpt, 1694 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 1695 ccb->ccb_h.status & CAM_STATUS_MASK); 1696 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1697 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1698 } 1699 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1700 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 1701 xpt_done(ccb); 1702 CAMLOCK_2_MPTLOCK(mpt); 1703 mpt_free_request(mpt, req); 1704 MPTLOCK_2_CAMLOCK(mpt); 1705 return; 1706 } 1707 1708 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1709 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1710 ccb->ccb_h.timeout_ch = 1711 timeout(mpt_timeout, (caddr_t)ccb, 1712 (ccb->ccb_h.timeout * hz) / 1000); 1713 } else { 1714 callout_handle_init(&ccb->ccb_h.timeout_ch); 1715 } 1716 if (mpt->verbose > MPT_PRT_DEBUG) { 1717 int nc = 0; 1718 mpt_print_request(req->req_vbuf); 1719 for (trq = req->chain; trq; trq = trq->chain) { 1720 printf(" Additional Chain Area %d\n", nc++); 1721 mpt_dump_sgl(trq->req_vbuf, 0); 1722 } 1723 } 1724 1725 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1726 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1727 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1728 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1729 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1730 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1731 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1732 } else { 1733 tgt->state = TGT_STATE_MOVING_DATA; 1734 } 1735 #else 1736 tgt->state = TGT_STATE_MOVING_DATA; 1737 #endif 1738 } 1739 CAMLOCK_2_MPTLOCK(mpt); 1740 mpt_send_cmd(mpt, req); 1741 MPTLOCK_2_CAMLOCK(mpt); 1742 } 1743 1744 static void 1745 mpt_start(struct cam_sim *sim, union ccb *ccb) 1746 { 1747 request_t *req; 1748 struct mpt_softc *mpt; 1749 MSG_SCSI_IO_REQUEST *mpt_req; 1750 struct ccb_scsiio *csio = &ccb->csio; 1751 struct ccb_hdr *ccbh = &ccb->ccb_h; 1752 bus_dmamap_callback_t *cb; 1753 target_id_t tgt; 1754 int raid_passthru; 1755 1756 /* Get the pointer for the physical addapter */ 1757 mpt = ccb->ccb_h.ccb_mpt_ptr; 1758 raid_passthru = (sim == mpt->phydisk_sim); 1759 1760 CAMLOCK_2_MPTLOCK(mpt); 1761 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 1762 if (mpt->outofbeer == 0) { 1763 mpt->outofbeer = 1; 1764 xpt_freeze_simq(mpt->sim, 1); 1765 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 1766 } 1767 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1768 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 1769 MPTLOCK_2_CAMLOCK(mpt); 1770 xpt_done(ccb); 1771 return; 1772 } 1773 #ifdef INVARIANTS 1774 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 1775 #endif 1776 MPTLOCK_2_CAMLOCK(mpt); 1777 1778 if (sizeof (bus_addr_t) > 4) { 1779 cb = mpt_execute_req_a64; 1780 } else { 1781 cb = mpt_execute_req; 1782 } 1783 1784 /* 1785 * Link the ccb and the request structure so we can find 1786 * the other knowing either the request or the ccb 1787 */ 1788 req->ccb = ccb; 1789 ccb->ccb_h.ccb_req_ptr = req; 1790 1791 /* Now we build the command for the IOC */ 1792 mpt_req = req->req_vbuf; 1793 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 1794 1795 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 1796 if (raid_passthru) { 1797 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 1798 CAMLOCK_2_MPTLOCK(mpt); 1799 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 1800 MPTLOCK_2_CAMLOCK(mpt); 1801 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1802 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 1803 xpt_done(ccb); 1804 return; 1805 } 1806 MPTLOCK_2_CAMLOCK(mpt); 1807 mpt_req->Bus = 0; /* we never set bus here */ 1808 } else { 1809 tgt = ccb->ccb_h.target_id; 1810 mpt_req->Bus = 0; /* XXX */ 1811 1812 } 1813 mpt_req->SenseBufferLength = 1814 (csio->sense_len < MPT_SENSE_SIZE) ? 1815 csio->sense_len : MPT_SENSE_SIZE; 1816 1817 /* 1818 * We use the message context to find the request structure when we 1819 * Get the command completion interrupt from the IOC. 1820 */ 1821 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 1822 1823 /* Which physical device to do the I/O on */ 1824 mpt_req->TargetID = tgt; 1825 1826 /* We assume a single level LUN type */ 1827 if (ccb->ccb_h.target_lun >= 256) { 1828 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 1829 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 1830 } else { 1831 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 1832 } 1833 1834 /* Set the direction of the transfer */ 1835 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1836 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 1837 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1838 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 1839 } else { 1840 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 1841 } 1842 1843 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 1844 switch(ccb->csio.tag_action) { 1845 case MSG_HEAD_OF_Q_TAG: 1846 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 1847 break; 1848 case MSG_ACA_TASK: 1849 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 1850 break; 1851 case MSG_ORDERED_Q_TAG: 1852 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 1853 break; 1854 case MSG_SIMPLE_Q_TAG: 1855 default: 1856 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 1857 break; 1858 } 1859 } else { 1860 if (mpt->is_fc || mpt->is_sas) { 1861 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 1862 } else { 1863 /* XXX No such thing for a target doing packetized. */ 1864 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 1865 } 1866 } 1867 1868 if (mpt->is_spi) { 1869 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 1870 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 1871 } 1872 } 1873 1874 /* Copy the scsi command block into place */ 1875 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 1876 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 1877 } else { 1878 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 1879 } 1880 1881 mpt_req->CDBLength = csio->cdb_len; 1882 mpt_req->DataLength = htole32(csio->dxfer_len); 1883 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 1884 1885 /* 1886 * Do a *short* print here if we're set to MPT_PRT_DEBUG 1887 */ 1888 if (mpt->verbose == MPT_PRT_DEBUG) { 1889 U32 df; 1890 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 1891 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 1892 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 1893 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 1894 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 1895 mpt_prtc(mpt, "(%s %u byte%s ", 1896 (df == MPI_SCSIIO_CONTROL_READ)? 1897 "read" : "write", csio->dxfer_len, 1898 (csio->dxfer_len == 1)? ")" : "s)"); 1899 } 1900 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 1901 ccb->ccb_h.target_lun, req, req->serno); 1902 } 1903 1904 /* 1905 * If we have any data to send with this command map it into bus space. 1906 */ 1907 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1908 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 1909 /* 1910 * We've been given a pointer to a single buffer. 1911 */ 1912 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 1913 /* 1914 * Virtual address that needs to translated into 1915 * one or more physical address ranges. 1916 */ 1917 int error; 1918 int s = splsoftvm(); 1919 error = bus_dmamap_load(mpt->buffer_dmat, 1920 req->dmap, csio->data_ptr, csio->dxfer_len, 1921 cb, req, 0); 1922 splx(s); 1923 if (error == EINPROGRESS) { 1924 /* 1925 * So as to maintain ordering, 1926 * freeze the controller queue 1927 * until our mapping is 1928 * returned. 1929 */ 1930 xpt_freeze_simq(mpt->sim, 1); 1931 ccbh->status |= CAM_RELEASE_SIMQ; 1932 } 1933 } else { 1934 /* 1935 * We have been given a pointer to single 1936 * physical buffer. 1937 */ 1938 struct bus_dma_segment seg; 1939 seg.ds_addr = 1940 (bus_addr_t)(vm_offset_t)csio->data_ptr; 1941 seg.ds_len = csio->dxfer_len; 1942 (*cb)(req, &seg, 1, 0); 1943 } 1944 } else { 1945 /* 1946 * We have been given a list of addresses. 1947 * This case could be easily supported but they are not 1948 * currently generated by the CAM subsystem so there 1949 * is no point in wasting the time right now. 1950 */ 1951 struct bus_dma_segment *segs; 1952 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 1953 (*cb)(req, NULL, 0, EFAULT); 1954 } else { 1955 /* Just use the segments provided */ 1956 segs = (struct bus_dma_segment *)csio->data_ptr; 1957 (*cb)(req, segs, csio->sglist_cnt, 0); 1958 } 1959 } 1960 } else { 1961 (*cb)(req, NULL, 0, 0); 1962 } 1963 } 1964 1965 static int 1966 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 1967 int sleep_ok) 1968 { 1969 int error; 1970 uint16_t status; 1971 uint8_t response; 1972 1973 error = mpt_scsi_send_tmf(mpt, 1974 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 1975 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 1976 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 1977 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 1978 0, /* XXX How do I get the channel ID? */ 1979 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 1980 lun != CAM_LUN_WILDCARD ? lun : 0, 1981 0, sleep_ok); 1982 1983 if (error != 0) { 1984 /* 1985 * mpt_scsi_send_tmf hard resets on failure, so no 1986 * need to do so here. 1987 */ 1988 mpt_prt(mpt, 1989 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 1990 return (EIO); 1991 } 1992 1993 /* Wait for bus reset to be processed by the IOC. */ 1994 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 1995 REQ_STATE_DONE, sleep_ok, 5000); 1996 1997 status = mpt->tmf_req->IOCStatus; 1998 response = mpt->tmf_req->ResponseCode; 1999 mpt->tmf_req->state = REQ_STATE_FREE; 2000 2001 if (error) { 2002 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2003 "Resetting controller.\n"); 2004 mpt_reset(mpt, TRUE); 2005 return (ETIMEDOUT); 2006 } 2007 2008 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2009 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2010 "Resetting controller.\n", status); 2011 mpt_reset(mpt, TRUE); 2012 return (EIO); 2013 } 2014 2015 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2016 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2017 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2018 "Resetting controller.\n", response); 2019 mpt_reset(mpt, TRUE); 2020 return (EIO); 2021 } 2022 return (0); 2023 } 2024 2025 static int 2026 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2027 { 2028 int r = 0; 2029 request_t *req; 2030 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2031 2032 req = mpt_get_request(mpt, FALSE); 2033 if (req == NULL) { 2034 return (ENOMEM); 2035 } 2036 fc = req->req_vbuf; 2037 memset(fc, 0, sizeof(*fc)); 2038 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2039 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2040 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2041 mpt_send_cmd(mpt, req); 2042 if (dowait) { 2043 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2044 REQ_STATE_DONE, FALSE, 60 * 1000); 2045 if (r == 0) { 2046 mpt_free_request(mpt, req); 2047 } 2048 } 2049 return (r); 2050 } 2051 2052 static int 2053 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2054 MSG_EVENT_NOTIFY_REPLY *msg) 2055 { 2056 uint32_t data0, data1; 2057 2058 data0 = le32toh(msg->Data[0]); 2059 data1 = le32toh(msg->Data[1]); 2060 switch(msg->Event & 0xFF) { 2061 case MPI_EVENT_UNIT_ATTENTION: 2062 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2063 (data0 >> 8) & 0xff, data0 & 0xff); 2064 break; 2065 2066 case MPI_EVENT_IOC_BUS_RESET: 2067 /* We generated a bus reset */ 2068 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2069 (data0 >> 8) & 0xff); 2070 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2071 break; 2072 2073 case MPI_EVENT_EXT_BUS_RESET: 2074 /* Someone else generated a bus reset */ 2075 mpt_prt(mpt, "External Bus Reset Detected\n"); 2076 /* 2077 * These replies don't return EventData like the MPI 2078 * spec says they do 2079 */ 2080 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2081 break; 2082 2083 case MPI_EVENT_RESCAN: 2084 /* 2085 * In general this means a device has been added to the loop. 2086 */ 2087 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2088 /* xpt_async(AC_FOUND_DEVICE, path, NULL); */ 2089 break; 2090 2091 case MPI_EVENT_LINK_STATUS_CHANGE: 2092 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2093 (data1 >> 8) & 0xff, 2094 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2095 break; 2096 2097 case MPI_EVENT_LOOP_STATE_CHANGE: 2098 switch ((data0 >> 16) & 0xff) { 2099 case 0x01: 2100 mpt_prt(mpt, 2101 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2102 "(Loop Initialization)\n", 2103 (data1 >> 8) & 0xff, 2104 (data0 >> 8) & 0xff, 2105 (data0 ) & 0xff); 2106 switch ((data0 >> 8) & 0xff) { 2107 case 0xF7: 2108 if ((data0 & 0xff) == 0xF7) { 2109 mpt_prt(mpt, "Device needs AL_PA\n"); 2110 } else { 2111 mpt_prt(mpt, "Device %02x doesn't like " 2112 "FC performance\n", 2113 data0 & 0xFF); 2114 } 2115 break; 2116 case 0xF8: 2117 if ((data0 & 0xff) == 0xF7) { 2118 mpt_prt(mpt, "Device had loop failure " 2119 "at its receiver prior to acquiring" 2120 " AL_PA\n"); 2121 } else { 2122 mpt_prt(mpt, "Device %02x detected loop" 2123 " failure at its receiver\n", 2124 data0 & 0xFF); 2125 } 2126 break; 2127 default: 2128 mpt_prt(mpt, "Device %02x requests that device " 2129 "%02x reset itself\n", 2130 data0 & 0xFF, 2131 (data0 >> 8) & 0xFF); 2132 break; 2133 } 2134 break; 2135 case 0x02: 2136 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2137 "LPE(%02x,%02x) (Loop Port Enable)\n", 2138 (data1 >> 8) & 0xff, /* Port */ 2139 (data0 >> 8) & 0xff, /* Character 3 */ 2140 (data0 ) & 0xff /* Character 4 */); 2141 break; 2142 case 0x03: 2143 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2144 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2145 (data1 >> 8) & 0xff, /* Port */ 2146 (data0 >> 8) & 0xff, /* Character 3 */ 2147 (data0 ) & 0xff /* Character 4 */); 2148 break; 2149 default: 2150 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2151 "FC event (%02x %02x %02x)\n", 2152 (data1 >> 8) & 0xff, /* Port */ 2153 (data0 >> 16) & 0xff, /* Event */ 2154 (data0 >> 8) & 0xff, /* Character 3 */ 2155 (data0 ) & 0xff /* Character 4 */); 2156 } 2157 break; 2158 2159 case MPI_EVENT_LOGOUT: 2160 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2161 (data1 >> 8) & 0xff, data0); 2162 break; 2163 case MPI_EVENT_QUEUE_FULL: 2164 { 2165 struct cam_sim *sim; 2166 struct cam_path *tmppath; 2167 struct ccb_relsim crs; 2168 PTR_EVENT_DATA_QUEUE_FULL pqf = 2169 (PTR_EVENT_DATA_QUEUE_FULL) msg->Data; 2170 lun_id_t lun_id; 2171 2172 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2173 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2174 if (mpt->phydisk_sim) { 2175 sim = mpt->phydisk_sim; 2176 } else { 2177 sim = mpt->sim; 2178 } 2179 MPTLOCK_2_CAMLOCK(mpt); 2180 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2181 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2182 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2183 mpt_prt(mpt, "unable to create a path to send " 2184 "XPT_REL_SIMQ"); 2185 CAMLOCK_2_MPTLOCK(mpt); 2186 break; 2187 } 2188 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2189 crs.ccb_h.func_code = XPT_REL_SIMQ; 2190 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2191 crs.openings = pqf->CurrentDepth - 1; 2192 xpt_action((union ccb *)&crs); 2193 if (crs.ccb_h.status != CAM_REQ_CMP) { 2194 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2195 } 2196 xpt_free_path(tmppath); 2197 } 2198 CAMLOCK_2_MPTLOCK(mpt); 2199 break; 2200 } 2201 case MPI_EVENT_EVENT_CHANGE: 2202 case MPI_EVENT_INTEGRATED_RAID: 2203 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2204 case MPI_EVENT_SAS_SES: 2205 break; 2206 default: 2207 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2208 msg->Event & 0xFF); 2209 return (0); 2210 } 2211 return (1); 2212 } 2213 2214 /* 2215 * Reply path for all SCSI I/O requests, called from our 2216 * interrupt handler by extracting our handler index from 2217 * the MsgContext field of the reply from the IOC. 2218 * 2219 * This routine is optimized for the common case of a 2220 * completion without error. All exception handling is 2221 * offloaded to non-inlined helper routines to minimize 2222 * cache footprint. 2223 */ 2224 static int 2225 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2226 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2227 { 2228 MSG_SCSI_IO_REQUEST *scsi_req; 2229 union ccb *ccb; 2230 target_id_t tgt; 2231 2232 if (req->state == REQ_STATE_FREE) { 2233 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2234 return (TRUE); 2235 } 2236 2237 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2238 ccb = req->ccb; 2239 if (ccb == NULL) { 2240 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2241 req, req->serno); 2242 return (TRUE); 2243 } 2244 2245 tgt = scsi_req->TargetID; 2246 untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch); 2247 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2248 2249 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2250 bus_dmasync_op_t op; 2251 2252 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2253 op = BUS_DMASYNC_POSTREAD; 2254 else 2255 op = BUS_DMASYNC_POSTWRITE; 2256 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2257 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2258 } 2259 2260 if (reply_frame == NULL) { 2261 /* 2262 * Context only reply, completion without error status. 2263 */ 2264 ccb->csio.resid = 0; 2265 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2266 ccb->csio.scsi_status = SCSI_STATUS_OK; 2267 } else { 2268 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2269 } 2270 2271 if (mpt->outofbeer) { 2272 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2273 mpt->outofbeer = 0; 2274 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2275 } 2276 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2277 struct scsi_inquiry_data *iq = 2278 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2279 if (scsi_req->Function == 2280 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2281 /* 2282 * Fake out the device type so that only the 2283 * pass-thru device will attach. 2284 */ 2285 iq->device &= ~0x1F; 2286 iq->device |= T_NODEVICE; 2287 } 2288 } 2289 if (mpt->verbose == MPT_PRT_DEBUG) { 2290 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2291 req, req->serno); 2292 } 2293 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 2294 MPTLOCK_2_CAMLOCK(mpt); 2295 xpt_done(ccb); 2296 CAMLOCK_2_MPTLOCK(mpt); 2297 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2298 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2299 } else { 2300 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2301 req, req->serno); 2302 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2303 } 2304 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2305 ("CCB req needed wakeup")); 2306 #ifdef INVARIANTS 2307 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2308 #endif 2309 mpt_free_request(mpt, req); 2310 return (TRUE); 2311 } 2312 2313 static int 2314 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2315 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2316 { 2317 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2318 2319 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2320 #ifdef INVARIANTS 2321 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2322 #endif 2323 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2324 /* Record IOC Status and Response Code of TMF for any waiters. */ 2325 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2326 req->ResponseCode = tmf_reply->ResponseCode; 2327 2328 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2329 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2330 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2331 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2332 req->state |= REQ_STATE_DONE; 2333 wakeup(req); 2334 } else { 2335 mpt->tmf_req->state = REQ_STATE_FREE; 2336 } 2337 return (TRUE); 2338 } 2339 2340 /* 2341 * XXX: Move to definitions file 2342 */ 2343 #define ELS 0x22 2344 #define FC4LS 0x32 2345 #define ABTS 0x81 2346 #define BA_ACC 0x84 2347 2348 #define LS_RJT 0x01 2349 #define LS_ACC 0x02 2350 #define PLOGI 0x03 2351 #define LOGO 0x05 2352 #define SRR 0x14 2353 #define PRLI 0x20 2354 #define PRLO 0x21 2355 #define ADISC 0x52 2356 #define RSCN 0x61 2357 2358 static void 2359 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2360 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2361 { 2362 uint32_t fl; 2363 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2364 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2365 2366 /* 2367 * We are going to reuse the ELS request to send this response back. 2368 */ 2369 rsp = &tmp; 2370 memset(rsp, 0, sizeof(*rsp)); 2371 2372 #ifdef USE_IMMEDIATE_LINK_DATA 2373 /* 2374 * Apparently the IMMEDIATE stuff doesn't seem to work. 2375 */ 2376 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2377 #endif 2378 rsp->RspLength = length; 2379 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2380 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2381 2382 /* 2383 * Copy over information from the original reply frame to 2384 * it's correct place in the response. 2385 */ 2386 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2387 2388 /* 2389 * And now copy back the temporary area to the original frame. 2390 */ 2391 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2392 rsp = req->req_vbuf; 2393 2394 #ifdef USE_IMMEDIATE_LINK_DATA 2395 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2396 #else 2397 { 2398 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2399 bus_addr_t paddr = req->req_pbuf; 2400 paddr += MPT_RQSL(mpt); 2401 2402 fl = 2403 MPI_SGE_FLAGS_HOST_TO_IOC | 2404 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2405 MPI_SGE_FLAGS_LAST_ELEMENT | 2406 MPI_SGE_FLAGS_END_OF_LIST | 2407 MPI_SGE_FLAGS_END_OF_BUFFER; 2408 fl <<= MPI_SGE_FLAGS_SHIFT; 2409 fl |= (length); 2410 se->FlagsLength = htole32(fl); 2411 se->Address = htole32((uint32_t) paddr); 2412 } 2413 #endif 2414 2415 /* 2416 * Send it on... 2417 */ 2418 mpt_send_cmd(mpt, req); 2419 } 2420 2421 static int 2422 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2423 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2424 { 2425 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2426 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2427 U8 rctl; 2428 U8 type; 2429 U8 cmd; 2430 U16 status = le16toh(reply_frame->IOCStatus); 2431 U32 *elsbuf; 2432 int ioindex; 2433 int do_refresh = TRUE; 2434 2435 #ifdef INVARIANTS 2436 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2437 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2438 req, req->serno, rp->Function)); 2439 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2440 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2441 } else { 2442 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2443 } 2444 #endif 2445 mpt_lprt(mpt, MPT_PRT_DEBUG, 2446 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2447 req, req->serno, reply_frame, reply_frame->Function); 2448 2449 if (status != MPI_IOCSTATUS_SUCCESS) { 2450 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2451 status, reply_frame->Function); 2452 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2453 /* 2454 * XXX: to get around shutdown issue 2455 */ 2456 mpt->disabled = 1; 2457 return (TRUE); 2458 } 2459 return (TRUE); 2460 } 2461 2462 /* 2463 * If the function of a link service response, we recycle the 2464 * response to be a refresh for a new link service request. 2465 * 2466 * The request pointer is bogus in this case and we have to fetch 2467 * it based upon the TransactionContext. 2468 */ 2469 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2470 /* Freddie Uncle Charlie Katie */ 2471 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2472 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2473 if (mpt->els_cmd_ptrs[ioindex] == req) { 2474 break; 2475 } 2476 2477 KASSERT(ioindex < mpt->els_cmds_allocated, 2478 ("can't find my mommie!")); 2479 2480 /* remove from active list as we're going to re-post it */ 2481 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2482 req->state &= ~REQ_STATE_QUEUED; 2483 req->state |= REQ_STATE_DONE; 2484 mpt_fc_post_els(mpt, req, ioindex); 2485 return (TRUE); 2486 } 2487 2488 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2489 /* remove from active list as we're done */ 2490 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2491 req->state &= ~REQ_STATE_QUEUED; 2492 req->state |= REQ_STATE_DONE; 2493 if (req->state & REQ_STATE_TIMEDOUT) { 2494 mpt_lprt(mpt, MPT_PRT_DEBUG, 2495 "Sync Primitive Send Completed After Timeout\n"); 2496 mpt_free_request(mpt, req); 2497 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2498 mpt_lprt(mpt, MPT_PRT_DEBUG, 2499 "Async Primitive Send Complete\n"); 2500 mpt_free_request(mpt, req); 2501 } else { 2502 mpt_lprt(mpt, MPT_PRT_DEBUG, 2503 "Sync Primitive Send Complete- Waking Waiter\n"); 2504 wakeup(req); 2505 } 2506 return (TRUE); 2507 } 2508 2509 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2510 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2511 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2512 rp->MsgLength, rp->MsgFlags); 2513 return (TRUE); 2514 } 2515 2516 if (rp->MsgLength <= 5) { 2517 /* 2518 * This is just a ack of an original ELS buffer post 2519 */ 2520 mpt_lprt(mpt, MPT_PRT_DEBUG, 2521 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2522 return (TRUE); 2523 } 2524 2525 2526 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2527 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2528 2529 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2530 cmd = be32toh(elsbuf[0]) >> 24; 2531 2532 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2533 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2534 return (TRUE); 2535 } 2536 2537 ioindex = le32toh(rp->TransactionContext); 2538 req = mpt->els_cmd_ptrs[ioindex]; 2539 2540 if (rctl == ELS && type == 1) { 2541 switch (cmd) { 2542 case PRLI: 2543 /* 2544 * Send back a PRLI ACC 2545 */ 2546 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2547 le32toh(rp->Wwn.PortNameHigh), 2548 le32toh(rp->Wwn.PortNameLow)); 2549 elsbuf[0] = htobe32(0x02100014); 2550 elsbuf[1] |= htobe32(0x00000100); 2551 elsbuf[4] = htobe32(0x00000002); 2552 if (mpt->role & MPT_ROLE_TARGET) 2553 elsbuf[4] |= htobe32(0x00000010); 2554 if (mpt->role & MPT_ROLE_INITIATOR) 2555 elsbuf[4] |= htobe32(0x00000020); 2556 /* remove from active list as we're done */ 2557 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2558 req->state &= ~REQ_STATE_QUEUED; 2559 req->state |= REQ_STATE_DONE; 2560 mpt_fc_els_send_response(mpt, req, rp, 20); 2561 do_refresh = FALSE; 2562 break; 2563 case PRLO: 2564 memset(elsbuf, 0, 5 * (sizeof (U32))); 2565 elsbuf[0] = htobe32(0x02100014); 2566 elsbuf[1] = htobe32(0x08000100); 2567 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2568 le32toh(rp->Wwn.PortNameHigh), 2569 le32toh(rp->Wwn.PortNameLow)); 2570 /* remove from active list as we're done */ 2571 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2572 req->state &= ~REQ_STATE_QUEUED; 2573 req->state |= REQ_STATE_DONE; 2574 mpt_fc_els_send_response(mpt, req, rp, 20); 2575 do_refresh = FALSE; 2576 break; 2577 default: 2578 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2579 break; 2580 } 2581 } else if (rctl == ABTS && type == 0) { 2582 uint16_t rx_id = le16toh(rp->Rxid); 2583 uint16_t ox_id = le16toh(rp->Oxid); 2584 request_t *tgt_req = NULL; 2585 2586 mpt_prt(mpt, 2587 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2588 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2589 le32toh(rp->Wwn.PortNameLow)); 2590 if (rx_id >= mpt->mpt_max_tgtcmds) { 2591 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2592 } else if (mpt->tgt_cmd_ptrs == NULL) { 2593 mpt_prt(mpt, "No TGT CMD PTRS\n"); 2594 } else { 2595 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 2596 } 2597 if (tgt_req) { 2598 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 2599 uint8_t *vbuf; 2600 union ccb *ccb = tgt->ccb; 2601 uint32_t ct_id; 2602 2603 vbuf = tgt_req->req_vbuf; 2604 vbuf += MPT_RQSL(mpt); 2605 2606 /* 2607 * Check to make sure we have the correct command 2608 * The reply descriptor in the target state should 2609 * should contain an IoIndex that should match the 2610 * RX_ID. 2611 * 2612 * It'd be nice to have OX_ID to crosscheck with 2613 * as well. 2614 */ 2615 ct_id = GET_IO_INDEX(tgt->reply_desc); 2616 2617 if (ct_id != rx_id) { 2618 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2619 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 2620 rx_id, ct_id); 2621 goto skip; 2622 } 2623 2624 ccb = tgt->ccb; 2625 if (ccb) { 2626 mpt_prt(mpt, 2627 "CCB (%p): lun %u flags %x status %x\n", 2628 ccb, ccb->ccb_h.target_lun, 2629 ccb->ccb_h.flags, ccb->ccb_h.status); 2630 } 2631 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 2632 "%x nxfers %x\n", tgt->state, 2633 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 2634 tgt->nxfers); 2635 skip: 2636 if (mpt_abort_target_cmd(mpt, tgt_req)) { 2637 mpt_prt(mpt, "unable to start TargetAbort\n"); 2638 } 2639 } else { 2640 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 2641 } 2642 memset(elsbuf, 0, 5 * (sizeof (U32))); 2643 elsbuf[0] = htobe32(0); 2644 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 2645 elsbuf[2] = htobe32(0x000ffff); 2646 /* 2647 * Dork with the reply frame so that the reponse to it 2648 * will be correct. 2649 */ 2650 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 2651 /* remove from active list as we're done */ 2652 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2653 req->state &= ~REQ_STATE_QUEUED; 2654 req->state |= REQ_STATE_DONE; 2655 mpt_fc_els_send_response(mpt, req, rp, 12); 2656 do_refresh = FALSE; 2657 } else { 2658 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 2659 } 2660 if (do_refresh == TRUE) { 2661 /* remove from active list as we're done */ 2662 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2663 req->state &= ~REQ_STATE_QUEUED; 2664 req->state |= REQ_STATE_DONE; 2665 mpt_fc_post_els(mpt, req, ioindex); 2666 } 2667 return (TRUE); 2668 } 2669 2670 /* 2671 * Clean up all SCSI Initiator personality state in response 2672 * to a controller reset. 2673 */ 2674 static void 2675 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 2676 { 2677 /* 2678 * The pending list is already run down by 2679 * the generic handler. Perform the same 2680 * operation on the timed out request list. 2681 */ 2682 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 2683 MPI_IOCSTATUS_INVALID_STATE); 2684 2685 /* 2686 * XXX: We need to repost ELS and Target Command Buffers? 2687 */ 2688 2689 /* 2690 * Inform the XPT that a bus reset has occurred. 2691 */ 2692 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2693 } 2694 2695 /* 2696 * Parse additional completion information in the reply 2697 * frame for SCSI I/O requests. 2698 */ 2699 static int 2700 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 2701 MSG_DEFAULT_REPLY *reply_frame) 2702 { 2703 union ccb *ccb; 2704 MSG_SCSI_IO_REPLY *scsi_io_reply; 2705 u_int ioc_status; 2706 u_int sstate; 2707 u_int loginfo; 2708 2709 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 2710 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 2711 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 2712 ("MPT SCSI I/O Handler called with incorrect reply type")); 2713 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 2714 ("MPT SCSI I/O Handler called with continuation reply")); 2715 2716 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 2717 ioc_status = le16toh(scsi_io_reply->IOCStatus); 2718 loginfo = ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE; 2719 ioc_status &= MPI_IOCSTATUS_MASK; 2720 sstate = scsi_io_reply->SCSIState; 2721 2722 ccb = req->ccb; 2723 ccb->csio.resid = 2724 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 2725 2726 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 2727 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 2728 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2729 ccb->csio.sense_resid = 2730 ccb->csio.sense_len - scsi_io_reply->SenseCount; 2731 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 2732 min(ccb->csio.sense_len, scsi_io_reply->SenseCount)); 2733 } 2734 2735 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 2736 /* 2737 * Tag messages rejected, but non-tagged retry 2738 * was successful. 2739 XXXX 2740 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 2741 */ 2742 } 2743 2744 switch(ioc_status) { 2745 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2746 /* 2747 * XXX 2748 * Linux driver indicates that a zero 2749 * transfer length with this error code 2750 * indicates a CRC error. 2751 * 2752 * No need to swap the bytes for checking 2753 * against zero. 2754 */ 2755 if (scsi_io_reply->TransferCount == 0) { 2756 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 2757 break; 2758 } 2759 /* FALLTHROUGH */ 2760 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 2761 case MPI_IOCSTATUS_SUCCESS: 2762 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 2763 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 2764 /* 2765 * Status was never returned for this transaction. 2766 */ 2767 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 2768 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 2769 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 2770 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 2771 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 2772 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 2773 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 2774 2775 /* XXX Handle SPI-Packet and FCP-2 reponse info. */ 2776 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 2777 } else 2778 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2779 break; 2780 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 2781 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 2782 break; 2783 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 2784 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 2785 break; 2786 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2787 /* 2788 * Since selection timeouts and "device really not 2789 * there" are grouped into this error code, report 2790 * selection timeout. Selection timeouts are 2791 * typically retried before giving up on the device 2792 * whereas "device not there" errors are considered 2793 * unretryable. 2794 */ 2795 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 2796 break; 2797 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2798 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 2799 break; 2800 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 2801 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 2802 break; 2803 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 2804 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 2805 break; 2806 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2807 ccb->ccb_h.status = CAM_UA_TERMIO; 2808 break; 2809 case MPI_IOCSTATUS_INVALID_STATE: 2810 /* 2811 * The IOC has been reset. Emulate a bus reset. 2812 */ 2813 /* FALLTHROUGH */ 2814 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 2815 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2816 break; 2817 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 2818 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 2819 /* 2820 * Don't clobber any timeout status that has 2821 * already been set for this transaction. We 2822 * want the SCSI layer to be able to differentiate 2823 * between the command we aborted due to timeout 2824 * and any innocent bystanders. 2825 */ 2826 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 2827 break; 2828 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 2829 break; 2830 2831 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 2832 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 2833 break; 2834 case MPI_IOCSTATUS_BUSY: 2835 mpt_set_ccb_status(ccb, CAM_BUSY); 2836 break; 2837 case MPI_IOCSTATUS_INVALID_FUNCTION: 2838 case MPI_IOCSTATUS_INVALID_SGL: 2839 case MPI_IOCSTATUS_INTERNAL_ERROR: 2840 case MPI_IOCSTATUS_INVALID_FIELD: 2841 default: 2842 /* XXX 2843 * Some of the above may need to kick 2844 * of a recovery action!!!! 2845 */ 2846 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 2847 break; 2848 } 2849 2850 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2851 mpt_freeze_ccb(ccb); 2852 } 2853 2854 return (TRUE); 2855 } 2856 2857 static void 2858 mpt_action(struct cam_sim *sim, union ccb *ccb) 2859 { 2860 struct mpt_softc *mpt; 2861 struct ccb_trans_settings *cts; 2862 target_id_t tgt; 2863 lun_id_t lun; 2864 int raid_passthru; 2865 2866 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 2867 2868 mpt = (struct mpt_softc *)cam_sim_softc(sim); 2869 KASSERT(MPT_OWNED(mpt) == 0, ("mpt owned on entrance to mpt_action")); 2870 raid_passthru = (sim == mpt->phydisk_sim); 2871 2872 tgt = ccb->ccb_h.target_id; 2873 lun = ccb->ccb_h.target_lun; 2874 if (raid_passthru && 2875 ccb->ccb_h.func_code != XPT_PATH_INQ && 2876 ccb->ccb_h.func_code != XPT_RESET_BUS && 2877 ccb->ccb_h.func_code != XPT_RESET_DEV) { 2878 CAMLOCK_2_MPTLOCK(mpt); 2879 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2880 MPTLOCK_2_CAMLOCK(mpt); 2881 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2882 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2883 xpt_done(ccb); 2884 return; 2885 } 2886 MPTLOCK_2_CAMLOCK(mpt); 2887 } 2888 ccb->ccb_h.ccb_mpt_ptr = mpt; 2889 2890 switch (ccb->ccb_h.func_code) { 2891 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 2892 /* 2893 * Do a couple of preliminary checks... 2894 */ 2895 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2896 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 2897 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2898 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 2899 break; 2900 } 2901 } 2902 /* Max supported CDB length is 16 bytes */ 2903 /* XXX Unless we implement the new 32byte message type */ 2904 if (ccb->csio.cdb_len > 2905 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 2906 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2907 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 2908 break; 2909 } 2910 ccb->csio.scsi_status = SCSI_STATUS_OK; 2911 mpt_start(sim, ccb); 2912 return; 2913 2914 case XPT_RESET_BUS: 2915 if (raid_passthru) { 2916 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2917 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2918 break; 2919 } 2920 case XPT_RESET_DEV: 2921 xpt_print(ccb->ccb_h.path, "reset %s\n", 2922 ccb->ccb_h.func_code == XPT_RESET_BUS? "bus" : "device"); 2923 CAMLOCK_2_MPTLOCK(mpt); 2924 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 2925 MPTLOCK_2_CAMLOCK(mpt); 2926 2927 /* 2928 * mpt_bus_reset is always successful in that it 2929 * will fall back to a hard reset should a bus 2930 * reset attempt fail. 2931 */ 2932 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2933 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2934 break; 2935 2936 case XPT_ABORT: 2937 { 2938 union ccb *accb = ccb->cab.abort_ccb; 2939 CAMLOCK_2_MPTLOCK(mpt); 2940 switch (accb->ccb_h.func_code) { 2941 case XPT_ACCEPT_TARGET_IO: 2942 case XPT_IMMED_NOTIFY: 2943 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 2944 break; 2945 case XPT_CONT_TARGET_IO: 2946 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 2947 ccb->ccb_h.status = CAM_UA_ABORT; 2948 break; 2949 case XPT_SCSI_IO: 2950 ccb->ccb_h.status = CAM_UA_ABORT; 2951 break; 2952 default: 2953 ccb->ccb_h.status = CAM_REQ_INVALID; 2954 break; 2955 } 2956 MPTLOCK_2_CAMLOCK(mpt); 2957 break; 2958 } 2959 2960 #ifdef CAM_NEW_TRAN_CODE 2961 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 2962 #else 2963 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) 2964 #endif 2965 #define DP_DISC_ENABLE 0x1 2966 #define DP_DISC_DISABL 0x2 2967 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 2968 2969 #define DP_TQING_ENABLE 0x4 2970 #define DP_TQING_DISABL 0x8 2971 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 2972 2973 #define DP_WIDE 0x10 2974 #define DP_NARROW 0x20 2975 #define DP_WIDTH (DP_WIDE|DP_NARROW) 2976 2977 #define DP_SYNC 0x40 2978 2979 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 2980 { 2981 #ifdef CAM_NEW_TRAN_CODE 2982 struct ccb_trans_settings_scsi *scsi; 2983 struct ccb_trans_settings_spi *spi; 2984 #endif 2985 uint8_t dval; 2986 u_int period; 2987 u_int offset; 2988 int i, j; 2989 2990 cts = &ccb->cts; 2991 2992 if (mpt->is_fc || mpt->is_sas) { 2993 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2994 break; 2995 } 2996 2997 #ifdef CAM_NEW_TRAN_CODE 2998 scsi = &cts->proto_specific.scsi; 2999 spi = &cts->xport_specific.spi; 3000 3001 /* 3002 * We can be called just to valid transport and proto versions 3003 */ 3004 if (scsi->valid == 0 && spi->valid == 0) { 3005 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3006 break; 3007 } 3008 #endif 3009 3010 /* 3011 * Skip attempting settings on RAID volume disks. 3012 * Other devices on the bus get the normal treatment. 3013 */ 3014 if (mpt->phydisk_sim && raid_passthru == 0 && 3015 mpt_is_raid_volume(mpt, tgt) != 0) { 3016 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3017 "no transfer settings for RAID vols\n"); 3018 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3019 break; 3020 } 3021 3022 i = mpt->mpt_port_page2.PortSettings & 3023 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3024 j = mpt->mpt_port_page2.PortFlags & 3025 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3026 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3027 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3028 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3029 "honoring BIOS transfer negotiations\n"); 3030 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3031 break; 3032 } 3033 3034 dval = 0; 3035 period = 0; 3036 offset = 0; 3037 3038 #ifndef CAM_NEW_TRAN_CODE 3039 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 3040 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? 3041 DP_DISC_ENABLE : DP_DISC_DISABL; 3042 } 3043 3044 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 3045 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? 3046 DP_TQING_ENABLE : DP_TQING_DISABL; 3047 } 3048 3049 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 3050 dval |= cts->bus_width ? DP_WIDE : DP_NARROW; 3051 } 3052 3053 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 3054 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { 3055 dval |= DP_SYNC; 3056 period = cts->sync_period; 3057 offset = cts->sync_offset; 3058 } 3059 #else 3060 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3061 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3062 DP_DISC_ENABLE : DP_DISC_DISABL; 3063 } 3064 3065 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3066 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3067 DP_TQING_ENABLE : DP_TQING_DISABL; 3068 } 3069 3070 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3071 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3072 DP_WIDE : DP_NARROW; 3073 } 3074 3075 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3076 dval |= DP_SYNC; 3077 offset = spi->sync_offset; 3078 } else { 3079 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3080 &mpt->mpt_dev_page1[tgt]; 3081 offset = ptr->RequestedParameters; 3082 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3083 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3084 } 3085 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3086 dval |= DP_SYNC; 3087 period = spi->sync_period; 3088 } else { 3089 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3090 &mpt->mpt_dev_page1[tgt]; 3091 period = ptr->RequestedParameters; 3092 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3093 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3094 } 3095 #endif 3096 CAMLOCK_2_MPTLOCK(mpt); 3097 if (dval & DP_DISC_ENABLE) { 3098 mpt->mpt_disc_enable |= (1 << tgt); 3099 } else if (dval & DP_DISC_DISABL) { 3100 mpt->mpt_disc_enable &= ~(1 << tgt); 3101 } 3102 if (dval & DP_TQING_ENABLE) { 3103 mpt->mpt_tag_enable |= (1 << tgt); 3104 } else if (dval & DP_TQING_DISABL) { 3105 mpt->mpt_tag_enable &= ~(1 << tgt); 3106 } 3107 if (dval & DP_WIDTH) { 3108 mpt_setwidth(mpt, tgt, 1); 3109 } 3110 if (dval & DP_SYNC) { 3111 mpt_setsync(mpt, tgt, period, offset); 3112 } 3113 if (dval == 0) { 3114 MPTLOCK_2_CAMLOCK(mpt); 3115 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3116 break; 3117 } 3118 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3119 "set [%d]: 0x%x period 0x%x offset %d\n", 3120 tgt, dval, period, offset); 3121 if (mpt_update_spi_config(mpt, tgt)) { 3122 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3123 } else { 3124 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3125 } 3126 MPTLOCK_2_CAMLOCK(mpt); 3127 break; 3128 } 3129 case XPT_GET_TRAN_SETTINGS: 3130 { 3131 #ifdef CAM_NEW_TRAN_CODE 3132 struct ccb_trans_settings_scsi *scsi; 3133 cts = &ccb->cts; 3134 cts->protocol = PROTO_SCSI; 3135 if (mpt->is_fc) { 3136 struct ccb_trans_settings_fc *fc = 3137 &cts->xport_specific.fc; 3138 cts->protocol_version = SCSI_REV_SPC; 3139 cts->transport = XPORT_FC; 3140 cts->transport_version = 0; 3141 fc->valid = CTS_FC_VALID_SPEED; 3142 fc->bitrate = 100000; 3143 } else if (mpt->is_sas) { 3144 struct ccb_trans_settings_sas *sas = 3145 &cts->xport_specific.sas; 3146 cts->protocol_version = SCSI_REV_SPC2; 3147 cts->transport = XPORT_SAS; 3148 cts->transport_version = 0; 3149 sas->valid = CTS_SAS_VALID_SPEED; 3150 sas->bitrate = 300000; 3151 } else { 3152 cts->protocol_version = SCSI_REV_2; 3153 cts->transport = XPORT_SPI; 3154 cts->transport_version = 2; 3155 if (mpt_get_spi_settings(mpt, cts) != 0) { 3156 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3157 break; 3158 } 3159 } 3160 scsi = &cts->proto_specific.scsi; 3161 scsi->valid = CTS_SCSI_VALID_TQ; 3162 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3163 #else 3164 cts = &ccb->cts; 3165 if (mpt->is_fc) { 3166 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3167 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3168 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3169 } else if (mpt->is_sas) { 3170 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3171 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3172 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3173 } else if (mpt_get_spi_settings(mpt, cts) != 0) { 3174 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3175 break; 3176 } 3177 #endif 3178 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3179 break; 3180 } 3181 case XPT_CALC_GEOMETRY: 3182 { 3183 struct ccb_calc_geometry *ccg; 3184 3185 ccg = &ccb->ccg; 3186 if (ccg->block_size == 0) { 3187 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3188 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3189 break; 3190 } 3191 mpt_calc_geometry(ccg, /*extended*/1); 3192 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); 3193 break; 3194 } 3195 case XPT_PATH_INQ: /* Path routing inquiry */ 3196 { 3197 struct ccb_pathinq *cpi = &ccb->cpi; 3198 3199 cpi->version_num = 1; 3200 cpi->target_sprt = 0; 3201 cpi->hba_eng_cnt = 0; 3202 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3203 /* 3204 * FC cards report MAX_DEVICES of 512, but 3205 * the MSG_SCSI_IO_REQUEST target id field 3206 * is only 8 bits. Until we fix the driver 3207 * to support 'channels' for bus overflow, 3208 * just limit it. 3209 */ 3210 if (cpi->max_target > 255) { 3211 cpi->max_target = 255; 3212 } 3213 3214 /* 3215 * VMware ESX reports > 16 devices and then dies when we probe. 3216 */ 3217 if (mpt->is_spi && cpi->max_target > 15) { 3218 cpi->max_target = 15; 3219 } 3220 cpi->max_lun = 7; 3221 cpi->initiator_id = mpt->mpt_ini_id; 3222 cpi->bus_id = cam_sim_bus(sim); 3223 3224 /* 3225 * The base speed is the speed of the underlying connection. 3226 */ 3227 cpi->protocol = PROTO_SCSI; 3228 if (mpt->is_fc) { 3229 cpi->hba_misc = PIM_NOBUSRESET; 3230 cpi->base_transfer_speed = 100000; 3231 cpi->hba_inquiry = PI_TAG_ABLE; 3232 cpi->transport = XPORT_FC; 3233 cpi->transport_version = 0; 3234 cpi->protocol_version = SCSI_REV_SPC; 3235 } else if (mpt->is_sas) { 3236 cpi->hba_misc = PIM_NOBUSRESET; 3237 cpi->base_transfer_speed = 300000; 3238 cpi->hba_inquiry = PI_TAG_ABLE; 3239 cpi->transport = XPORT_SAS; 3240 cpi->transport_version = 0; 3241 cpi->protocol_version = SCSI_REV_SPC2; 3242 } else { 3243 cpi->hba_misc = PIM_SEQSCAN; 3244 cpi->base_transfer_speed = 3300; 3245 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3246 cpi->transport = XPORT_SPI; 3247 cpi->transport_version = 2; 3248 cpi->protocol_version = SCSI_REV_2; 3249 } 3250 3251 /* 3252 * We give our fake RAID passhtru bus a width that is MaxVolumes 3253 * wide and restrict it to one lun. 3254 */ 3255 if (raid_passthru) { 3256 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3257 cpi->initiator_id = cpi->max_target + 1; 3258 cpi->max_lun = 0; 3259 } 3260 3261 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3262 cpi->hba_misc |= PIM_NOINITIATOR; 3263 } 3264 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3265 cpi->target_sprt = 3266 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3267 } else { 3268 cpi->target_sprt = 0; 3269 } 3270 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3271 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3272 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3273 cpi->unit_number = cam_sim_unit(sim); 3274 cpi->ccb_h.status = CAM_REQ_CMP; 3275 break; 3276 } 3277 case XPT_EN_LUN: /* Enable LUN as a target */ 3278 { 3279 int result; 3280 3281 CAMLOCK_2_MPTLOCK(mpt); 3282 if (ccb->cel.enable) 3283 result = mpt_enable_lun(mpt, 3284 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3285 else 3286 result = mpt_disable_lun(mpt, 3287 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3288 MPTLOCK_2_CAMLOCK(mpt); 3289 if (result == 0) { 3290 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3291 } else { 3292 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3293 } 3294 break; 3295 } 3296 case XPT_NOTIFY_ACK: /* recycle notify ack */ 3297 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ 3298 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3299 { 3300 tgt_resource_t *trtp; 3301 lun_id_t lun = ccb->ccb_h.target_lun; 3302 ccb->ccb_h.sim_priv.entries[0].field = 0; 3303 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3304 ccb->ccb_h.flags = 0; 3305 3306 if (lun == CAM_LUN_WILDCARD) { 3307 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3308 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3309 break; 3310 } 3311 trtp = &mpt->trt_wildcard; 3312 } else if (lun >= MPT_MAX_LUNS) { 3313 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3314 break; 3315 } else { 3316 trtp = &mpt->trt[lun]; 3317 } 3318 CAMLOCK_2_MPTLOCK(mpt); 3319 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3320 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3321 "Put FREE ATIO %p lun %d\n", ccb, lun); 3322 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3323 sim_links.stqe); 3324 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 3325 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3326 "Put FREE INOT lun %d\n", lun); 3327 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3328 sim_links.stqe); 3329 } else { 3330 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3331 } 3332 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3333 MPTLOCK_2_CAMLOCK(mpt); 3334 return; 3335 } 3336 case XPT_CONT_TARGET_IO: 3337 CAMLOCK_2_MPTLOCK(mpt); 3338 mpt_target_start_io(mpt, ccb); 3339 MPTLOCK_2_CAMLOCK(mpt); 3340 return; 3341 3342 default: 3343 ccb->ccb_h.status = CAM_REQ_INVALID; 3344 break; 3345 } 3346 xpt_done(ccb); 3347 } 3348 3349 static int 3350 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3351 { 3352 #ifdef CAM_NEW_TRAN_CODE 3353 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3354 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3355 #endif 3356 target_id_t tgt; 3357 uint32_t dval, pval, oval; 3358 int rv; 3359 3360 if (IS_CURRENT_SETTINGS(cts) == 0) { 3361 tgt = cts->ccb_h.target_id; 3362 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3363 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3364 return (-1); 3365 } 3366 } else { 3367 tgt = cts->ccb_h.target_id; 3368 } 3369 3370 /* 3371 * We aren't looking at Port Page 2 BIOS settings here- 3372 * sometimes these have been known to be bogus XXX. 3373 * 3374 * For user settings, we pick the max from port page 0 3375 * 3376 * For current settings we read the current settings out from 3377 * device page 0 for that target. 3378 */ 3379 if (IS_CURRENT_SETTINGS(cts)) { 3380 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3381 dval = 0; 3382 3383 CAMLOCK_2_MPTLOCK(mpt); 3384 tmp = mpt->mpt_dev_page0[tgt]; 3385 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3386 sizeof(tmp), FALSE, 5000); 3387 if (rv) { 3388 MPTLOCK_2_CAMLOCK(mpt); 3389 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3390 return (rv); 3391 } 3392 MPTLOCK_2_CAMLOCK(mpt); 3393 mpt_lprt(mpt, MPT_PRT_DEBUG, 3394 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3395 tmp.NegotiatedParameters, tmp.Information); 3396 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3397 DP_WIDE : DP_NARROW; 3398 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3399 DP_DISC_ENABLE : DP_DISC_DISABL; 3400 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3401 DP_TQING_ENABLE : DP_TQING_DISABL; 3402 oval = tmp.NegotiatedParameters; 3403 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3404 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3405 pval = tmp.NegotiatedParameters; 3406 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3407 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3408 mpt->mpt_dev_page0[tgt] = tmp; 3409 } else { 3410 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3411 oval = mpt->mpt_port_page0.Capabilities; 3412 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3413 pval = mpt->mpt_port_page0.Capabilities; 3414 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3415 } 3416 3417 #ifndef CAM_NEW_TRAN_CODE 3418 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 3419 cts->valid = 0; 3420 cts->sync_period = pval; 3421 cts->sync_offset = oval; 3422 cts->valid |= CCB_TRANS_SYNC_RATE_VALID; 3423 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; 3424 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; 3425 if (dval & DP_WIDE) { 3426 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3427 } else { 3428 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3429 } 3430 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3431 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3432 if (dval & DP_DISC_ENABLE) { 3433 cts->flags |= CCB_TRANS_DISC_ENB; 3434 } 3435 if (dval & DP_TQING_ENABLE) { 3436 cts->flags |= CCB_TRANS_TAG_ENB; 3437 } 3438 } 3439 #else 3440 spi->valid = 0; 3441 scsi->valid = 0; 3442 spi->flags = 0; 3443 scsi->flags = 0; 3444 spi->sync_offset = oval; 3445 spi->sync_period = pval; 3446 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3447 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3448 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3449 if (dval & DP_WIDE) { 3450 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3451 } else { 3452 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3453 } 3454 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3455 scsi->valid = CTS_SCSI_VALID_TQ; 3456 if (dval & DP_TQING_ENABLE) { 3457 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3458 } 3459 spi->valid |= CTS_SPI_VALID_DISC; 3460 if (dval & DP_DISC_ENABLE) { 3461 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3462 } 3463 } 3464 #endif 3465 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3466 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3467 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); 3468 return (0); 3469 } 3470 3471 static void 3472 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3473 { 3474 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3475 3476 ptr = &mpt->mpt_dev_page1[tgt]; 3477 if (onoff) { 3478 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3479 } else { 3480 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3481 } 3482 } 3483 3484 static void 3485 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3486 { 3487 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3488 3489 ptr = &mpt->mpt_dev_page1[tgt]; 3490 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3491 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3492 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3493 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3494 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3495 if (period == 0) { 3496 return; 3497 } 3498 ptr->RequestedParameters |= 3499 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3500 ptr->RequestedParameters |= 3501 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3502 if (period < 0xa) { 3503 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3504 } 3505 if (period < 0x9) { 3506 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3507 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3508 } 3509 } 3510 3511 static int 3512 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3513 { 3514 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3515 int rv; 3516 3517 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3518 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3519 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3520 tmp = mpt->mpt_dev_page1[tgt]; 3521 rv = mpt_write_cur_cfg_page(mpt, tgt, 3522 &tmp.Header, sizeof(tmp), FALSE, 5000); 3523 if (rv) { 3524 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3525 return (-1); 3526 } 3527 return (0); 3528 } 3529 3530 static void 3531 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) 3532 { 3533 #if __FreeBSD_version >= 500000 3534 cam_calc_geometry(ccg, extended); 3535 #else 3536 uint32_t size_mb; 3537 uint32_t secs_per_cylinder; 3538 3539 if (ccg->block_size == 0) { 3540 ccg->ccb_h.status = CAM_REQ_INVALID; 3541 return; 3542 } 3543 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); 3544 if (size_mb > 1024 && extended) { 3545 ccg->heads = 255; 3546 ccg->secs_per_track = 63; 3547 } else { 3548 ccg->heads = 64; 3549 ccg->secs_per_track = 32; 3550 } 3551 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 3552 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 3553 ccg->ccb_h.status = CAM_REQ_CMP; 3554 #endif 3555 } 3556 3557 /****************************** Timeout Recovery ******************************/ 3558 static int 3559 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3560 { 3561 int error; 3562 3563 error = mpt_kthread_create(mpt_recovery_thread, mpt, 3564 &mpt->recovery_thread, /*flags*/0, 3565 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3566 return (error); 3567 } 3568 3569 static void 3570 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3571 { 3572 if (mpt->recovery_thread == NULL) { 3573 return; 3574 } 3575 mpt->shutdwn_recovery = 1; 3576 wakeup(mpt); 3577 /* 3578 * Sleep on a slightly different location 3579 * for this interlock just for added safety. 3580 */ 3581 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 3582 } 3583 3584 static void 3585 mpt_recovery_thread(void *arg) 3586 { 3587 struct mpt_softc *mpt; 3588 3589 #if __FreeBSD_version >= 500000 3590 mtx_lock(&Giant); 3591 #endif 3592 mpt = (struct mpt_softc *)arg; 3593 MPT_LOCK(mpt); 3594 for (;;) { 3595 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3596 if (mpt->shutdwn_recovery == 0) { 3597 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 3598 } 3599 } 3600 if (mpt->shutdwn_recovery != 0) { 3601 break; 3602 } 3603 mpt_recover_commands(mpt); 3604 } 3605 mpt->recovery_thread = NULL; 3606 wakeup(&mpt->recovery_thread); 3607 MPT_UNLOCK(mpt); 3608 #if __FreeBSD_version >= 500000 3609 mtx_unlock(&Giant); 3610 #endif 3611 kthread_exit(0); 3612 } 3613 3614 static int 3615 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 3616 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 3617 { 3618 MSG_SCSI_TASK_MGMT *tmf_req; 3619 int error; 3620 3621 /* 3622 * Wait for any current TMF request to complete. 3623 * We're only allowed to issue one TMF at a time. 3624 */ 3625 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 3626 sleep_ok, MPT_TMF_MAX_TIMEOUT); 3627 if (error != 0) { 3628 mpt_reset(mpt, TRUE); 3629 return (ETIMEDOUT); 3630 } 3631 3632 mpt_assign_serno(mpt, mpt->tmf_req); 3633 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 3634 3635 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 3636 memset(tmf_req, 0, sizeof(*tmf_req)); 3637 tmf_req->TargetID = target; 3638 tmf_req->Bus = channel; 3639 tmf_req->ChainOffset = 0; 3640 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 3641 tmf_req->Reserved = 0; 3642 tmf_req->TaskType = type; 3643 tmf_req->Reserved1 = 0; 3644 tmf_req->MsgFlags = flags; 3645 tmf_req->MsgContext = 3646 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 3647 memset(&tmf_req->LUN, 0, 3648 sizeof(tmf_req->LUN) + sizeof(tmf_req->Reserved2)); 3649 if (lun > 256) { 3650 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 3651 tmf_req->LUN[1] = lun & 0xff; 3652 } else { 3653 tmf_req->LUN[1] = lun; 3654 } 3655 tmf_req->TaskMsgContext = abort_ctx; 3656 3657 mpt_lprt(mpt, MPT_PRT_DEBUG, 3658 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 3659 mpt->tmf_req->serno, tmf_req->MsgContext); 3660 if (mpt->verbose > MPT_PRT_DEBUG) { 3661 mpt_print_request(tmf_req); 3662 } 3663 3664 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 3665 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 3666 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 3667 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 3668 if (error != MPT_OK) { 3669 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 3670 mpt->tmf_req->state = REQ_STATE_FREE; 3671 mpt_reset(mpt, TRUE); 3672 } 3673 return (error); 3674 } 3675 3676 /* 3677 * When a command times out, it is placed on the requeust_timeout_list 3678 * and we wake our recovery thread. The MPT-Fusion architecture supports 3679 * only a single TMF operation at a time, so we serially abort/bdr, etc, 3680 * the timedout transactions. The next TMF is issued either by the 3681 * completion handler of the current TMF waking our recovery thread, 3682 * or the TMF timeout handler causing a hard reset sequence. 3683 */ 3684 static void 3685 mpt_recover_commands(struct mpt_softc *mpt) 3686 { 3687 request_t *req; 3688 union ccb *ccb; 3689 int error; 3690 3691 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3692 /* 3693 * No work to do- leave. 3694 */ 3695 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 3696 return; 3697 } 3698 3699 /* 3700 * Flush any commands whose completion coincides with their timeout. 3701 */ 3702 mpt_intr(mpt); 3703 3704 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3705 /* 3706 * The timedout commands have already 3707 * completed. This typically means 3708 * that either the timeout value was on 3709 * the hairy edge of what the device 3710 * requires or - more likely - interrupts 3711 * are not happening. 3712 */ 3713 mpt_prt(mpt, "Timedout requests already complete. " 3714 "Interrupts may not be functioning.\n"); 3715 mpt_enable_ints(mpt); 3716 return; 3717 } 3718 3719 /* 3720 * We have no visibility into the current state of the 3721 * controller, so attempt to abort the commands in the 3722 * order they timed-out. For initiator commands, we 3723 * depend on the reply handler pulling requests off 3724 * the timeout list. 3725 */ 3726 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 3727 uint16_t status; 3728 uint8_t response; 3729 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 3730 3731 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 3732 req, req->serno, hdrp->Function); 3733 ccb = req->ccb; 3734 if (ccb == NULL) { 3735 mpt_prt(mpt, "null ccb in timed out request. " 3736 "Resetting Controller.\n"); 3737 mpt_reset(mpt, TRUE); 3738 continue; 3739 } 3740 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 3741 3742 /* 3743 * Check to see if this is not an initiator command and 3744 * deal with it differently if it is. 3745 */ 3746 switch (hdrp->Function) { 3747 case MPI_FUNCTION_SCSI_IO_REQUEST: 3748 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 3749 break; 3750 default: 3751 /* 3752 * XXX: FIX ME: need to abort target assists... 3753 */ 3754 mpt_prt(mpt, "just putting it back on the pend q\n"); 3755 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 3756 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 3757 links); 3758 continue; 3759 } 3760 3761 error = mpt_scsi_send_tmf(mpt, 3762 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 3763 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 3764 htole32(req->index | scsi_io_handler_id), TRUE); 3765 3766 if (error != 0) { 3767 /* 3768 * mpt_scsi_send_tmf hard resets on failure, so no 3769 * need to do so here. Our queue should be emptied 3770 * by the hard reset. 3771 */ 3772 continue; 3773 } 3774 3775 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 3776 REQ_STATE_DONE, TRUE, 500); 3777 3778 status = mpt->tmf_req->IOCStatus; 3779 response = mpt->tmf_req->ResponseCode; 3780 mpt->tmf_req->state = REQ_STATE_FREE; 3781 3782 if (error != 0) { 3783 /* 3784 * If we've errored out,, reset the controller. 3785 */ 3786 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 3787 "Resetting controller\n"); 3788 mpt_reset(mpt, TRUE); 3789 continue; 3790 } 3791 3792 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 3793 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 3794 "Resetting controller.\n", status); 3795 mpt_reset(mpt, TRUE); 3796 continue; 3797 } 3798 3799 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 3800 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 3801 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 3802 "Resetting controller.\n", response); 3803 mpt_reset(mpt, TRUE); 3804 continue; 3805 } 3806 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 3807 } 3808 } 3809 3810 /************************ Target Mode Support ****************************/ 3811 static void 3812 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 3813 { 3814 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 3815 PTR_SGE_TRANSACTION32 tep; 3816 PTR_SGE_SIMPLE32 se; 3817 bus_addr_t paddr; 3818 uint32_t fl; 3819 3820 paddr = req->req_pbuf; 3821 paddr += MPT_RQSL(mpt); 3822 3823 fc = req->req_vbuf; 3824 memset(fc, 0, MPT_REQUEST_AREA); 3825 fc->BufferCount = 1; 3826 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 3827 fc->MsgContext = htole32(req->index | fc_els_handler_id); 3828 3829 /* 3830 * Okay, set up ELS buffer pointers. ELS buffer pointers 3831 * consist of a TE SGL element (with details length of zero) 3832 * followe by a SIMPLE SGL element which holds the address 3833 * of the buffer. 3834 */ 3835 3836 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 3837 3838 tep->ContextSize = 4; 3839 tep->Flags = 0; 3840 tep->TransactionContext[0] = htole32(ioindex); 3841 3842 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 3843 fl = 3844 MPI_SGE_FLAGS_HOST_TO_IOC | 3845 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 3846 MPI_SGE_FLAGS_LAST_ELEMENT | 3847 MPI_SGE_FLAGS_END_OF_LIST | 3848 MPI_SGE_FLAGS_END_OF_BUFFER; 3849 fl <<= MPI_SGE_FLAGS_SHIFT; 3850 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 3851 se->FlagsLength = htole32(fl); 3852 se->Address = htole32((uint32_t) paddr); 3853 mpt_lprt(mpt, MPT_PRT_DEBUG, 3854 "add ELS index %d ioindex %d for %p:%u\n", 3855 req->index, ioindex, req, req->serno); 3856 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 3857 ("mpt_fc_post_els: request not locked")); 3858 mpt_send_cmd(mpt, req); 3859 } 3860 3861 static void 3862 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 3863 { 3864 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 3865 PTR_CMD_BUFFER_DESCRIPTOR cb; 3866 bus_addr_t paddr; 3867 3868 paddr = req->req_pbuf; 3869 paddr += MPT_RQSL(mpt); 3870 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 3871 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 3872 3873 fc = req->req_vbuf; 3874 fc->BufferCount = 1; 3875 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 3876 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 3877 3878 cb = &fc->Buffer[0]; 3879 cb->IoIndex = htole16(ioindex); 3880 cb->u.PhysicalAddress32 = htole32((U32) paddr); 3881 3882 mpt_check_doorbell(mpt); 3883 mpt_send_cmd(mpt, req); 3884 } 3885 3886 static int 3887 mpt_add_els_buffers(struct mpt_softc *mpt) 3888 { 3889 int i; 3890 3891 if (mpt->is_fc == 0) { 3892 return (TRUE); 3893 } 3894 3895 if (mpt->els_cmds_allocated) { 3896 return (TRUE); 3897 } 3898 3899 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), 3900 M_DEVBUF, M_NOWAIT | M_ZERO); 3901 3902 if (mpt->els_cmd_ptrs == NULL) { 3903 return (FALSE); 3904 } 3905 3906 /* 3907 * Feed the chip some ELS buffer resources 3908 */ 3909 for (i = 0; i < MPT_MAX_ELS; i++) { 3910 request_t *req = mpt_get_request(mpt, FALSE); 3911 if (req == NULL) { 3912 break; 3913 } 3914 req->state |= REQ_STATE_LOCKED; 3915 mpt->els_cmd_ptrs[i] = req; 3916 mpt_fc_post_els(mpt, req, i); 3917 } 3918 3919 if (i == 0) { 3920 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 3921 free(mpt->els_cmd_ptrs, M_DEVBUF); 3922 mpt->els_cmd_ptrs = NULL; 3923 return (FALSE); 3924 } 3925 if (i != MPT_MAX_ELS) { 3926 mpt_lprt(mpt, MPT_PRT_INFO, 3927 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 3928 } 3929 mpt->els_cmds_allocated = i; 3930 return(TRUE); 3931 } 3932 3933 static int 3934 mpt_add_target_commands(struct mpt_softc *mpt) 3935 { 3936 int i, max; 3937 3938 if (mpt->tgt_cmd_ptrs) { 3939 return (TRUE); 3940 } 3941 3942 max = MPT_MAX_REQUESTS(mpt) >> 1; 3943 if (max > mpt->mpt_max_tgtcmds) { 3944 max = mpt->mpt_max_tgtcmds; 3945 } 3946 mpt->tgt_cmd_ptrs = 3947 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 3948 if (mpt->tgt_cmd_ptrs == NULL) { 3949 mpt_prt(mpt, 3950 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 3951 return (FALSE); 3952 } 3953 3954 for (i = 0; i < max; i++) { 3955 request_t *req; 3956 3957 req = mpt_get_request(mpt, FALSE); 3958 if (req == NULL) { 3959 break; 3960 } 3961 req->state |= REQ_STATE_LOCKED; 3962 mpt->tgt_cmd_ptrs[i] = req; 3963 mpt_post_target_command(mpt, req, i); 3964 } 3965 3966 3967 if (i == 0) { 3968 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 3969 free(mpt->tgt_cmd_ptrs, M_DEVBUF); 3970 mpt->tgt_cmd_ptrs = NULL; 3971 return (FALSE); 3972 } 3973 3974 mpt->tgt_cmds_allocated = i; 3975 3976 if (i < max) { 3977 mpt_lprt(mpt, MPT_PRT_INFO, 3978 "added %d of %d target bufs\n", i, max); 3979 } 3980 return (i); 3981 } 3982 3983 static int 3984 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 3985 { 3986 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 3987 mpt->twildcard = 1; 3988 } else if (lun >= MPT_MAX_LUNS) { 3989 return (EINVAL); 3990 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 3991 return (EINVAL); 3992 } 3993 if (mpt->tenabled == 0) { 3994 if (mpt->is_fc) { 3995 (void) mpt_fc_reset_link(mpt, 0); 3996 } 3997 mpt->tenabled = 1; 3998 } 3999 if (lun == CAM_LUN_WILDCARD) { 4000 mpt->trt_wildcard.enabled = 1; 4001 } else { 4002 mpt->trt[lun].enabled = 1; 4003 } 4004 return (0); 4005 } 4006 4007 static int 4008 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4009 { 4010 int i; 4011 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4012 mpt->twildcard = 0; 4013 } else if (lun >= MPT_MAX_LUNS) { 4014 return (EINVAL); 4015 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4016 return (EINVAL); 4017 } 4018 if (lun == CAM_LUN_WILDCARD) { 4019 mpt->trt_wildcard.enabled = 0; 4020 } else { 4021 mpt->trt[lun].enabled = 0; 4022 } 4023 for (i = 0; i < MPT_MAX_LUNS; i++) { 4024 if (mpt->trt[lun].enabled) { 4025 break; 4026 } 4027 } 4028 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4029 if (mpt->is_fc) { 4030 (void) mpt_fc_reset_link(mpt, 0); 4031 } 4032 mpt->tenabled = 0; 4033 } 4034 return (0); 4035 } 4036 4037 /* 4038 * Called with MPT lock held 4039 */ 4040 static void 4041 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4042 { 4043 struct ccb_scsiio *csio = &ccb->csio; 4044 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4045 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4046 4047 switch (tgt->state) { 4048 case TGT_STATE_IN_CAM: 4049 break; 4050 case TGT_STATE_MOVING_DATA: 4051 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4052 xpt_freeze_simq(mpt->sim, 1); 4053 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4054 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4055 MPTLOCK_2_CAMLOCK(mpt); 4056 xpt_done(ccb); 4057 CAMLOCK_2_MPTLOCK(mpt); 4058 return; 4059 default: 4060 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4061 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4062 mpt_tgt_dump_req_state(mpt, cmd_req); 4063 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4064 MPTLOCK_2_CAMLOCK(mpt); 4065 xpt_done(ccb); 4066 CAMLOCK_2_MPTLOCK(mpt); 4067 return; 4068 } 4069 4070 if (csio->dxfer_len) { 4071 bus_dmamap_callback_t *cb; 4072 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4073 request_t *req; 4074 4075 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4076 ("dxfer_len %u but direction is NONE\n", csio->dxfer_len)); 4077 4078 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4079 if (mpt->outofbeer == 0) { 4080 mpt->outofbeer = 1; 4081 xpt_freeze_simq(mpt->sim, 1); 4082 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4083 } 4084 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4085 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4086 MPTLOCK_2_CAMLOCK(mpt); 4087 xpt_done(ccb); 4088 CAMLOCK_2_MPTLOCK(mpt); 4089 return; 4090 } 4091 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4092 if (sizeof (bus_addr_t) > 4) { 4093 cb = mpt_execute_req_a64; 4094 } else { 4095 cb = mpt_execute_req; 4096 } 4097 4098 req->ccb = ccb; 4099 ccb->ccb_h.ccb_req_ptr = req; 4100 4101 /* 4102 * Record the currently active ccb and the 4103 * request for it in our target state area. 4104 */ 4105 tgt->ccb = ccb; 4106 tgt->req = req; 4107 4108 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4109 ta = req->req_vbuf; 4110 4111 if (mpt->is_sas) { 4112 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4113 cmd_req->req_vbuf; 4114 ta->QueueTag = ssp->InitiatorTag; 4115 } else if (mpt->is_spi) { 4116 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4117 cmd_req->req_vbuf; 4118 ta->QueueTag = sp->Tag; 4119 } 4120 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4121 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4122 ta->ReplyWord = htole32(tgt->reply_desc); 4123 if (csio->ccb_h.target_lun > 256) { 4124 ta->LUN[0] = 4125 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4126 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4127 } else { 4128 ta->LUN[1] = csio->ccb_h.target_lun; 4129 } 4130 4131 ta->RelativeOffset = tgt->bytes_xfered; 4132 ta->DataLength = ccb->csio.dxfer_len; 4133 if (ta->DataLength > tgt->resid) { 4134 ta->DataLength = tgt->resid; 4135 } 4136 4137 /* 4138 * XXX Should be done after data transfer completes? 4139 */ 4140 tgt->resid -= csio->dxfer_len; 4141 tgt->bytes_xfered += csio->dxfer_len; 4142 4143 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4144 ta->TargetAssistFlags |= 4145 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4146 } 4147 4148 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4149 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4150 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4151 ta->TargetAssistFlags |= 4152 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4153 } 4154 #endif 4155 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4156 4157 mpt_lprt(mpt, MPT_PRT_DEBUG, 4158 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4159 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4160 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4161 4162 MPTLOCK_2_CAMLOCK(mpt); 4163 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4164 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4165 int error; 4166 int s = splsoftvm(); 4167 error = bus_dmamap_load(mpt->buffer_dmat, 4168 req->dmap, csio->data_ptr, csio->dxfer_len, 4169 cb, req, 0); 4170 splx(s); 4171 if (error == EINPROGRESS) { 4172 xpt_freeze_simq(mpt->sim, 1); 4173 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4174 } 4175 } else { 4176 /* 4177 * We have been given a pointer to single 4178 * physical buffer. 4179 */ 4180 struct bus_dma_segment seg; 4181 seg.ds_addr = (bus_addr_t) 4182 (vm_offset_t)csio->data_ptr; 4183 seg.ds_len = csio->dxfer_len; 4184 (*cb)(req, &seg, 1, 0); 4185 } 4186 } else { 4187 /* 4188 * We have been given a list of addresses. 4189 * This case could be easily supported but they are not 4190 * currently generated by the CAM subsystem so there 4191 * is no point in wasting the time right now. 4192 */ 4193 struct bus_dma_segment *sgs; 4194 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4195 (*cb)(req, NULL, 0, EFAULT); 4196 } else { 4197 /* Just use the segments provided */ 4198 sgs = (struct bus_dma_segment *)csio->data_ptr; 4199 (*cb)(req, sgs, csio->sglist_cnt, 0); 4200 } 4201 } 4202 CAMLOCK_2_MPTLOCK(mpt); 4203 } else { 4204 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4205 4206 /* 4207 * XXX: I don't know why this seems to happen, but 4208 * XXX: completing the CCB seems to make things happy. 4209 * XXX: This seems to happen if the initiator requests 4210 * XXX: enough data that we have to do multiple CTIOs. 4211 */ 4212 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4213 mpt_lprt(mpt, MPT_PRT_DEBUG, 4214 "Meaningless STATUS CCB (%p): flags %x status %x " 4215 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4216 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4217 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4218 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4219 MPTLOCK_2_CAMLOCK(mpt); 4220 xpt_done(ccb); 4221 CAMLOCK_2_MPTLOCK(mpt); 4222 return; 4223 } 4224 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4225 sp = sense; 4226 memcpy(sp, &csio->sense_data, 4227 min(csio->sense_len, MPT_SENSE_SIZE)); 4228 } 4229 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4230 } 4231 } 4232 4233 static void 4234 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4235 uint32_t lun, int send, uint8_t *data, size_t length) 4236 { 4237 mpt_tgt_state_t *tgt; 4238 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4239 SGE_SIMPLE32 *se; 4240 uint32_t flags; 4241 uint8_t *dptr; 4242 bus_addr_t pptr; 4243 request_t *req; 4244 4245 /* 4246 * We enter with resid set to the data load for the command. 4247 */ 4248 tgt = MPT_TGT_STATE(mpt, cmd_req); 4249 if (length == 0 || tgt->resid == 0) { 4250 tgt->resid = 0; 4251 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4252 return; 4253 } 4254 4255 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4256 mpt_prt(mpt, "out of resources- dropping local response\n"); 4257 return; 4258 } 4259 tgt->is_local = 1; 4260 4261 4262 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4263 ta = req->req_vbuf; 4264 4265 if (mpt->is_sas) { 4266 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4267 ta->QueueTag = ssp->InitiatorTag; 4268 } else if (mpt->is_spi) { 4269 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4270 ta->QueueTag = sp->Tag; 4271 } 4272 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4273 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4274 ta->ReplyWord = htole32(tgt->reply_desc); 4275 if (lun > 256) { 4276 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4277 ta->LUN[1] = lun & 0xff; 4278 } else { 4279 ta->LUN[1] = lun; 4280 } 4281 ta->RelativeOffset = 0; 4282 ta->DataLength = length; 4283 4284 dptr = req->req_vbuf; 4285 dptr += MPT_RQSL(mpt); 4286 pptr = req->req_pbuf; 4287 pptr += MPT_RQSL(mpt); 4288 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4289 4290 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4291 memset(se, 0,sizeof (*se)); 4292 4293 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4294 if (send) { 4295 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4296 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4297 } 4298 se->Address = pptr; 4299 MPI_pSGE_SET_LENGTH(se, length); 4300 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4301 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4302 MPI_pSGE_SET_FLAGS(se, flags); 4303 4304 tgt->ccb = NULL; 4305 tgt->req = req; 4306 tgt->resid -= length; 4307 tgt->bytes_xfered = length; 4308 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4309 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4310 #else 4311 tgt->state = TGT_STATE_MOVING_DATA; 4312 #endif 4313 mpt_send_cmd(mpt, req); 4314 } 4315 4316 /* 4317 * Abort queued up CCBs 4318 */ 4319 static cam_status 4320 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4321 { 4322 struct mpt_hdr_stailq *lp; 4323 struct ccb_hdr *srch; 4324 int found = 0; 4325 union ccb *accb = ccb->cab.abort_ccb; 4326 tgt_resource_t *trtp; 4327 4328 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4329 4330 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4331 trtp = &mpt->trt_wildcard; 4332 } else { 4333 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4334 } 4335 4336 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4337 lp = &trtp->atios; 4338 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { 4339 lp = &trtp->inots; 4340 } else { 4341 return (CAM_REQ_INVALID); 4342 } 4343 4344 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4345 if (srch == &accb->ccb_h) { 4346 found = 1; 4347 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4348 break; 4349 } 4350 } 4351 if (found) { 4352 accb->ccb_h.status = CAM_REQ_ABORTED; 4353 xpt_done(accb); 4354 return (CAM_REQ_CMP); 4355 } 4356 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4357 return (CAM_PATH_INVALID); 4358 } 4359 4360 /* 4361 * Ask the MPT to abort the current target command 4362 */ 4363 static int 4364 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4365 { 4366 int error; 4367 request_t *req; 4368 PTR_MSG_TARGET_MODE_ABORT abtp; 4369 4370 req = mpt_get_request(mpt, FALSE); 4371 if (req == NULL) { 4372 return (-1); 4373 } 4374 abtp = req->req_vbuf; 4375 memset(abtp, 0, sizeof (*abtp)); 4376 4377 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4378 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4379 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4380 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4381 error = 0; 4382 if (mpt->is_fc || mpt->is_sas) { 4383 mpt_send_cmd(mpt, req); 4384 } else { 4385 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4386 } 4387 return (error); 4388 } 4389 4390 /* 4391 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4392 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4393 * FC929 to set bogus FC_RSP fields (nonzero residuals 4394 * but w/o RESID fields set). This causes QLogic initiators 4395 * to think maybe that a frame was lost. 4396 * 4397 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4398 * we use allocated requests to do TARGET_ASSIST and we 4399 * need to know when to release them. 4400 */ 4401 4402 static void 4403 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4404 uint8_t status, uint8_t const *sense_data) 4405 { 4406 uint8_t *cmd_vbuf; 4407 mpt_tgt_state_t *tgt; 4408 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4409 request_t *req; 4410 bus_addr_t paddr; 4411 int resplen = 0; 4412 uint32_t fl; 4413 4414 cmd_vbuf = cmd_req->req_vbuf; 4415 cmd_vbuf += MPT_RQSL(mpt); 4416 tgt = MPT_TGT_STATE(mpt, cmd_req); 4417 4418 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4419 if (mpt->outofbeer == 0) { 4420 mpt->outofbeer = 1; 4421 xpt_freeze_simq(mpt->sim, 1); 4422 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4423 } 4424 if (ccb) { 4425 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4426 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4427 MPTLOCK_2_CAMLOCK(mpt); 4428 xpt_done(ccb); 4429 CAMLOCK_2_MPTLOCK(mpt); 4430 } else { 4431 mpt_prt(mpt, 4432 "could not allocate status request- dropping\n"); 4433 } 4434 return; 4435 } 4436 req->ccb = ccb; 4437 if (ccb) { 4438 ccb->ccb_h.ccb_mpt_ptr = mpt; 4439 ccb->ccb_h.ccb_req_ptr = req; 4440 } 4441 4442 /* 4443 * Record the currently active ccb, if any, and the 4444 * request for it in our target state area. 4445 */ 4446 tgt->ccb = ccb; 4447 tgt->req = req; 4448 tgt->state = TGT_STATE_SENDING_STATUS; 4449 4450 tp = req->req_vbuf; 4451 paddr = req->req_pbuf; 4452 paddr += MPT_RQSL(mpt); 4453 4454 memset(tp, 0, sizeof (*tp)); 4455 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4456 if (mpt->is_fc) { 4457 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4458 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4459 uint8_t *sts_vbuf; 4460 uint32_t *rsp; 4461 4462 sts_vbuf = req->req_vbuf; 4463 sts_vbuf += MPT_RQSL(mpt); 4464 rsp = (uint32_t *) sts_vbuf; 4465 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4466 4467 /* 4468 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4469 * It has to be big-endian in memory and is organized 4470 * in 32 bit words, which are much easier to deal with 4471 * as words which are swizzled as needed. 4472 * 4473 * All we're filling here is the FC_RSP payload. 4474 * We may just have the chip synthesize it if 4475 * we have no residual and an OK status. 4476 * 4477 */ 4478 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4479 4480 rsp[2] = status; 4481 if (tgt->resid) { 4482 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4483 rsp[3] = htobe32(tgt->resid); 4484 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4485 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4486 #endif 4487 } 4488 if (status == SCSI_STATUS_CHECK_COND) { 4489 int i; 4490 4491 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4492 rsp[4] = htobe32(MPT_SENSE_SIZE); 4493 if (sense_data) { 4494 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4495 } else { 4496 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4497 "TION but no sense data?\n"); 4498 memset(&rsp, 0, MPT_SENSE_SIZE); 4499 } 4500 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4501 rsp[i] = htobe32(rsp[i]); 4502 } 4503 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4504 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4505 #endif 4506 } 4507 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4508 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4509 #endif 4510 rsp[2] = htobe32(rsp[2]); 4511 } else if (mpt->is_sas) { 4512 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4513 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4514 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4515 } else { 4516 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4517 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4518 tp->StatusCode = status; 4519 tp->QueueTag = htole16(sp->Tag); 4520 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4521 } 4522 4523 tp->ReplyWord = htole32(tgt->reply_desc); 4524 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4525 4526 #ifdef WE_CAN_USE_AUTO_REPOST 4527 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4528 #endif 4529 if (status == SCSI_STATUS_OK && resplen == 0) { 4530 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4531 } else { 4532 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4533 fl = 4534 MPI_SGE_FLAGS_HOST_TO_IOC | 4535 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4536 MPI_SGE_FLAGS_LAST_ELEMENT | 4537 MPI_SGE_FLAGS_END_OF_LIST | 4538 MPI_SGE_FLAGS_END_OF_BUFFER; 4539 fl <<= MPI_SGE_FLAGS_SHIFT; 4540 fl |= resplen; 4541 tp->StatusDataSGE.FlagsLength = htole32(fl); 4542 } 4543 4544 mpt_lprt(mpt, MPT_PRT_DEBUG, 4545 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 4546 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 4547 req->serno, tgt->resid); 4548 if (ccb) { 4549 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4550 ccb->ccb_h.timeout_ch = timeout(mpt_timeout, ccb, 60 * hz); 4551 } 4552 mpt_send_cmd(mpt, req); 4553 } 4554 4555 static void 4556 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4557 tgt_resource_t *trtp, int init_id) 4558 { 4559 struct ccb_immed_notify *inot; 4560 mpt_tgt_state_t *tgt; 4561 4562 tgt = MPT_TGT_STATE(mpt, req); 4563 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); 4564 if (inot == NULL) { 4565 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4566 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 4567 return; 4568 } 4569 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4570 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4571 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 4572 4573 memset(&inot->sense_data, 0, sizeof (inot->sense_data)); 4574 inot->sense_len = 0; 4575 memset(inot->message_args, 0, sizeof (inot->message_args)); 4576 inot->initiator_id = init_id; /* XXX */ 4577 4578 /* 4579 * This is a somewhat grotesque attempt to map from task management 4580 * to old style SCSI messages. God help us all. 4581 */ 4582 switch (fc) { 4583 case MPT_ABORT_TASK_SET: 4584 inot->message_args[0] = MSG_ABORT_TAG; 4585 break; 4586 case MPT_CLEAR_TASK_SET: 4587 inot->message_args[0] = MSG_CLEAR_TASK_SET; 4588 break; 4589 case MPT_TARGET_RESET: 4590 inot->message_args[0] = MSG_TARGET_RESET; 4591 break; 4592 case MPT_CLEAR_ACA: 4593 inot->message_args[0] = MSG_CLEAR_ACA; 4594 break; 4595 case MPT_TERMINATE_TASK: 4596 inot->message_args[0] = MSG_ABORT_TAG; 4597 break; 4598 default: 4599 inot->message_args[0] = MSG_NOOP; 4600 break; 4601 } 4602 tgt->ccb = (union ccb *) inot; 4603 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 4604 MPTLOCK_2_CAMLOCK(mpt); 4605 xpt_done((union ccb *)inot); 4606 CAMLOCK_2_MPTLOCK(mpt); 4607 } 4608 4609 static void 4610 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 4611 { 4612 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 4613 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 4614 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 4615 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 4616 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 4617 '0', '0', '0', '1' 4618 }; 4619 struct ccb_accept_tio *atiop; 4620 lun_id_t lun; 4621 int tag_action = 0; 4622 mpt_tgt_state_t *tgt; 4623 tgt_resource_t *trtp = NULL; 4624 U8 *lunptr; 4625 U8 *vbuf; 4626 U16 itag; 4627 U16 ioindex; 4628 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 4629 uint8_t *cdbp; 4630 4631 /* 4632 * First, DMA sync the received command- 4633 * which is in the *request* * phys area. 4634 * 4635 * XXX: We could optimize this for a range 4636 */ 4637 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 4638 BUS_DMASYNC_POSTREAD); 4639 4640 /* 4641 * Stash info for the current command where we can get at it later. 4642 */ 4643 vbuf = req->req_vbuf; 4644 vbuf += MPT_RQSL(mpt); 4645 4646 /* 4647 * Get our state pointer set up. 4648 */ 4649 tgt = MPT_TGT_STATE(mpt, req); 4650 if (tgt->state != TGT_STATE_LOADED) { 4651 mpt_tgt_dump_req_state(mpt, req); 4652 panic("bad target state in mpt_scsi_tgt_atio"); 4653 } 4654 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 4655 tgt->state = TGT_STATE_IN_CAM; 4656 tgt->reply_desc = reply_desc; 4657 ioindex = GET_IO_INDEX(reply_desc); 4658 if (mpt->verbose >= MPT_PRT_DEBUG) { 4659 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 4660 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 4661 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 4662 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 4663 } 4664 if (mpt->is_fc) { 4665 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 4666 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 4667 if (fc->FcpCntl[2]) { 4668 /* 4669 * Task Management Request 4670 */ 4671 switch (fc->FcpCntl[2]) { 4672 case 0x2: 4673 fct = MPT_ABORT_TASK_SET; 4674 break; 4675 case 0x4: 4676 fct = MPT_CLEAR_TASK_SET; 4677 break; 4678 case 0x20: 4679 fct = MPT_TARGET_RESET; 4680 break; 4681 case 0x40: 4682 fct = MPT_CLEAR_ACA; 4683 break; 4684 case 0x80: 4685 fct = MPT_TERMINATE_TASK; 4686 break; 4687 default: 4688 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 4689 fc->FcpCntl[2]); 4690 mpt_scsi_tgt_status(mpt, 0, req, 4691 SCSI_STATUS_OK, 0); 4692 return; 4693 } 4694 } else { 4695 switch (fc->FcpCntl[1]) { 4696 case 0: 4697 tag_action = MSG_SIMPLE_Q_TAG; 4698 break; 4699 case 1: 4700 tag_action = MSG_HEAD_OF_Q_TAG; 4701 break; 4702 case 2: 4703 tag_action = MSG_ORDERED_Q_TAG; 4704 break; 4705 default: 4706 /* 4707 * Bah. Ignore Untagged Queing and ACA 4708 */ 4709 tag_action = MSG_SIMPLE_Q_TAG; 4710 break; 4711 } 4712 } 4713 tgt->resid = be32toh(fc->FcpDl); 4714 cdbp = fc->FcpCdb; 4715 lunptr = fc->FcpLun; 4716 itag = be16toh(fc->OptionalOxid); 4717 } else if (mpt->is_sas) { 4718 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 4719 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 4720 cdbp = ssp->CDB; 4721 lunptr = ssp->LogicalUnitNumber; 4722 itag = ssp->InitiatorTag; 4723 } else { 4724 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 4725 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 4726 cdbp = sp->CDB; 4727 lunptr = sp->LogicalUnitNumber; 4728 itag = sp->Tag; 4729 } 4730 4731 /* 4732 * Generate a simple lun 4733 */ 4734 switch (lunptr[0] & 0xc0) { 4735 case 0x40: 4736 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 4737 break; 4738 case 0: 4739 lun = lunptr[1]; 4740 break; 4741 default: 4742 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 4743 lun = 0xffff; 4744 break; 4745 } 4746 4747 /* 4748 * Deal with non-enabled or bad luns here. 4749 */ 4750 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 4751 mpt->trt[lun].enabled == 0) { 4752 if (mpt->twildcard) { 4753 trtp = &mpt->trt_wildcard; 4754 } else if (fct == MPT_NIL_TMT_VALUE) { 4755 /* 4756 * In this case, we haven't got an upstream listener 4757 * for either a specific lun or wildcard luns. We 4758 * have to make some sensible response. For regular 4759 * inquiry, just return some NOT HERE inquiry data. 4760 * For VPD inquiry, report illegal field in cdb. 4761 * For REQUEST SENSE, just return NO SENSE data. 4762 * REPORT LUNS gets illegal command. 4763 * All other commands get 'no such device'. 4764 */ 4765 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 4766 size_t len; 4767 4768 memset(buf, 0, MPT_SENSE_SIZE); 4769 cond = SCSI_STATUS_CHECK_COND; 4770 buf[0] = 0xf0; 4771 buf[2] = 0x5; 4772 buf[7] = 0x8; 4773 sp = buf; 4774 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 4775 4776 switch (cdbp[0]) { 4777 case INQUIRY: 4778 { 4779 if (cdbp[1] != 0) { 4780 buf[12] = 0x26; 4781 buf[13] = 0x01; 4782 break; 4783 } 4784 len = min(tgt->resid, cdbp[4]); 4785 len = min(len, sizeof (null_iqd)); 4786 mpt_lprt(mpt, MPT_PRT_DEBUG, 4787 "local inquiry %ld bytes\n", (long) len); 4788 mpt_scsi_tgt_local(mpt, req, lun, 1, 4789 null_iqd, len); 4790 return; 4791 } 4792 case REQUEST_SENSE: 4793 { 4794 buf[2] = 0x0; 4795 len = min(tgt->resid, cdbp[4]); 4796 len = min(len, sizeof (buf)); 4797 mpt_lprt(mpt, MPT_PRT_DEBUG, 4798 "local reqsense %ld bytes\n", (long) len); 4799 mpt_scsi_tgt_local(mpt, req, lun, 1, 4800 buf, len); 4801 return; 4802 } 4803 case REPORT_LUNS: 4804 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 4805 buf[12] = 0x26; 4806 return; 4807 default: 4808 mpt_lprt(mpt, MPT_PRT_DEBUG, 4809 "CMD 0x%x to unmanaged lun %u\n", 4810 cdbp[0], lun); 4811 buf[12] = 0x25; 4812 break; 4813 } 4814 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 4815 return; 4816 } 4817 /* otherwise, leave trtp NULL */ 4818 } else { 4819 trtp = &mpt->trt[lun]; 4820 } 4821 4822 /* 4823 * Deal with any task management 4824 */ 4825 if (fct != MPT_NIL_TMT_VALUE) { 4826 if (trtp == NULL) { 4827 mpt_prt(mpt, "task mgmt function %x but no listener\n", 4828 fct); 4829 mpt_scsi_tgt_status(mpt, 0, req, 4830 SCSI_STATUS_OK, 0); 4831 } else { 4832 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 4833 GET_INITIATOR_INDEX(reply_desc)); 4834 } 4835 return; 4836 } 4837 4838 4839 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 4840 if (atiop == NULL) { 4841 mpt_lprt(mpt, MPT_PRT_WARN, 4842 "no ATIOs for lun %u- sending back %s\n", lun, 4843 mpt->tenabled? "QUEUE FULL" : "BUSY"); 4844 mpt_scsi_tgt_status(mpt, NULL, req, 4845 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 4846 NULL); 4847 return; 4848 } 4849 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 4850 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4851 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 4852 atiop->ccb_h.ccb_mpt_ptr = mpt; 4853 atiop->ccb_h.status = CAM_CDB_RECVD; 4854 atiop->ccb_h.target_lun = lun; 4855 atiop->sense_len = 0; 4856 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 4857 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 4858 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 4859 4860 /* 4861 * The tag we construct here allows us to find the 4862 * original request that the command came in with. 4863 * 4864 * This way we don't have to depend on anything but the 4865 * tag to find things when CCBs show back up from CAM. 4866 */ 4867 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 4868 tgt->tag_id = atiop->tag_id; 4869 if (tag_action) { 4870 atiop->tag_action = tag_action; 4871 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 4872 } 4873 if (mpt->verbose >= MPT_PRT_DEBUG) { 4874 int i; 4875 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 4876 atiop->ccb_h.target_lun); 4877 for (i = 0; i < atiop->cdb_len; i++) { 4878 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 4879 (i == (atiop->cdb_len - 1))? '>' : ' '); 4880 } 4881 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 4882 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 4883 } 4884 4885 MPTLOCK_2_CAMLOCK(mpt); 4886 xpt_done((union ccb *)atiop); 4887 CAMLOCK_2_MPTLOCK(mpt); 4888 } 4889 4890 static void 4891 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 4892 { 4893 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 4894 4895 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 4896 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 4897 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 4898 tgt->tag_id, tgt->state); 4899 } 4900 4901 static void 4902 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 4903 { 4904 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 4905 req->index, req->index, req->state); 4906 mpt_tgt_dump_tgt_state(mpt, req); 4907 } 4908 4909 static int 4910 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 4911 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 4912 { 4913 int dbg; 4914 union ccb *ccb; 4915 U16 status; 4916 4917 if (reply_frame == NULL) { 4918 /* 4919 * Figure out what the state of the command is. 4920 */ 4921 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 4922 4923 #ifdef INVARIANTS 4924 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 4925 if (tgt->req) { 4926 mpt_req_not_spcl(mpt, tgt->req, 4927 "turbo scsi_tgt_reply associated req", __LINE__); 4928 } 4929 #endif 4930 switch(tgt->state) { 4931 case TGT_STATE_LOADED: 4932 /* 4933 * This is a new command starting. 4934 */ 4935 mpt_scsi_tgt_atio(mpt, req, reply_desc); 4936 break; 4937 case TGT_STATE_MOVING_DATA: 4938 { 4939 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4940 4941 ccb = tgt->ccb; 4942 if (tgt->req == NULL) { 4943 panic("mpt: turbo target reply with null " 4944 "associated request moving data"); 4945 /* NOTREACHED */ 4946 } 4947 if (ccb == NULL) { 4948 if (tgt->is_local == 0) { 4949 panic("mpt: turbo target reply with " 4950 "null associated ccb moving data"); 4951 /* NOTREACHED */ 4952 } 4953 mpt_lprt(mpt, MPT_PRT_DEBUG, 4954 "TARGET_ASSIST local done\n"); 4955 TAILQ_REMOVE(&mpt->request_pending_list, 4956 tgt->req, links); 4957 mpt_free_request(mpt, tgt->req); 4958 tgt->req = NULL; 4959 mpt_scsi_tgt_status(mpt, NULL, req, 4960 0, NULL); 4961 return (TRUE); 4962 } 4963 tgt->ccb = NULL; 4964 tgt->nxfers++; 4965 untimeout(mpt_timeout, ccb, ccb->ccb_h.timeout_ch); 4966 mpt_lprt(mpt, MPT_PRT_DEBUG, 4967 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 4968 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 4969 /* 4970 * Free the Target Assist Request 4971 */ 4972 KASSERT(tgt->req->ccb == ccb, 4973 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 4974 tgt->req->serno, tgt->req->ccb)); 4975 TAILQ_REMOVE(&mpt->request_pending_list, 4976 tgt->req, links); 4977 mpt_free_request(mpt, tgt->req); 4978 tgt->req = NULL; 4979 4980 /* 4981 * Do we need to send status now? That is, are 4982 * we done with all our data transfers? 4983 */ 4984 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4985 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4986 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4987 KASSERT(ccb->ccb_h.status, 4988 ("zero ccb sts at %d\n", __LINE__)); 4989 tgt->state = TGT_STATE_IN_CAM; 4990 if (mpt->outofbeer) { 4991 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4992 mpt->outofbeer = 0; 4993 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 4994 } 4995 MPTLOCK_2_CAMLOCK(mpt); 4996 xpt_done(ccb); 4997 CAMLOCK_2_MPTLOCK(mpt); 4998 break; 4999 } 5000 /* 5001 * Otherwise, send status (and sense) 5002 */ 5003 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5004 sp = sense; 5005 memcpy(sp, &ccb->csio.sense_data, 5006 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5007 } 5008 mpt_scsi_tgt_status(mpt, ccb, req, 5009 ccb->csio.scsi_status, sp); 5010 break; 5011 } 5012 case TGT_STATE_SENDING_STATUS: 5013 case TGT_STATE_MOVING_DATA_AND_STATUS: 5014 { 5015 int ioindex; 5016 ccb = tgt->ccb; 5017 5018 if (tgt->req == NULL) { 5019 panic("mpt: turbo target reply with null " 5020 "associated request sending status"); 5021 /* NOTREACHED */ 5022 } 5023 5024 if (ccb) { 5025 tgt->ccb = NULL; 5026 if (tgt->state == 5027 TGT_STATE_MOVING_DATA_AND_STATUS) { 5028 tgt->nxfers++; 5029 } 5030 untimeout(mpt_timeout, ccb, 5031 ccb->ccb_h.timeout_ch); 5032 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5033 ccb->ccb_h.status |= CAM_SENT_SENSE; 5034 } 5035 mpt_lprt(mpt, MPT_PRT_DEBUG, 5036 "TARGET_STATUS tag %x sts %x flgs %x req " 5037 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5038 ccb->ccb_h.flags, tgt->req); 5039 /* 5040 * Free the Target Send Status Request 5041 */ 5042 KASSERT(tgt->req->ccb == ccb, 5043 ("tgt->req %p:%u tgt->req->ccb %p", 5044 tgt->req, tgt->req->serno, tgt->req->ccb)); 5045 /* 5046 * Notify CAM that we're done 5047 */ 5048 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5049 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5050 KASSERT(ccb->ccb_h.status, 5051 ("ZERO ccb sts at %d\n", __LINE__)); 5052 tgt->ccb = NULL; 5053 } else { 5054 mpt_lprt(mpt, MPT_PRT_DEBUG, 5055 "TARGET_STATUS non-CAM for req %p:%u\n", 5056 tgt->req, tgt->req->serno); 5057 } 5058 TAILQ_REMOVE(&mpt->request_pending_list, 5059 tgt->req, links); 5060 mpt_free_request(mpt, tgt->req); 5061 tgt->req = NULL; 5062 5063 /* 5064 * And re-post the Command Buffer. 5065 * This will reset the state. 5066 */ 5067 ioindex = GET_IO_INDEX(reply_desc); 5068 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5069 tgt->is_local = 0; 5070 mpt_post_target_command(mpt, req, ioindex); 5071 5072 /* 5073 * And post a done for anyone who cares 5074 */ 5075 if (ccb) { 5076 if (mpt->outofbeer) { 5077 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5078 mpt->outofbeer = 0; 5079 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5080 } 5081 MPTLOCK_2_CAMLOCK(mpt); 5082 xpt_done(ccb); 5083 CAMLOCK_2_MPTLOCK(mpt); 5084 } 5085 break; 5086 } 5087 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5088 tgt->state = TGT_STATE_LOADED; 5089 break; 5090 default: 5091 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5092 "Reply Function\n", tgt->state); 5093 } 5094 return (TRUE); 5095 } 5096 5097 status = le16toh(reply_frame->IOCStatus); 5098 if (status != MPI_IOCSTATUS_SUCCESS) { 5099 dbg = MPT_PRT_ERROR; 5100 } else { 5101 dbg = MPT_PRT_DEBUG1; 5102 } 5103 5104 mpt_lprt(mpt, dbg, 5105 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5106 req, req->serno, reply_frame, reply_frame->Function, status); 5107 5108 switch (reply_frame->Function) { 5109 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5110 { 5111 mpt_tgt_state_t *tgt; 5112 #ifdef INVARIANTS 5113 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5114 #endif 5115 if (status != MPI_IOCSTATUS_SUCCESS) { 5116 /* 5117 * XXX What to do? 5118 */ 5119 break; 5120 } 5121 tgt = MPT_TGT_STATE(mpt, req); 5122 KASSERT(tgt->state == TGT_STATE_LOADING, 5123 ("bad state 0x%x on reply to buffer post\n", tgt->state)); 5124 mpt_assign_serno(mpt, req); 5125 tgt->state = TGT_STATE_LOADED; 5126 break; 5127 } 5128 case MPI_FUNCTION_TARGET_ASSIST: 5129 #ifdef INVARIANTS 5130 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5131 #endif 5132 mpt_prt(mpt, "target assist completion\n"); 5133 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5134 mpt_free_request(mpt, req); 5135 break; 5136 case MPI_FUNCTION_TARGET_STATUS_SEND: 5137 #ifdef INVARIANTS 5138 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5139 #endif 5140 mpt_prt(mpt, "status send completion\n"); 5141 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5142 mpt_free_request(mpt, req); 5143 break; 5144 case MPI_FUNCTION_TARGET_MODE_ABORT: 5145 { 5146 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5147 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5148 PTR_MSG_TARGET_MODE_ABORT abtp = 5149 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5150 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5151 #ifdef INVARIANTS 5152 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5153 #endif 5154 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5155 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5156 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5157 mpt_free_request(mpt, req); 5158 break; 5159 } 5160 default: 5161 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5162 "0x%x\n", reply_frame->Function); 5163 break; 5164 } 5165 return (TRUE); 5166 } 5167