1 /*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 /*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64 /*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 */ 96 #include <sys/cdefs.h> 97 __FBSDID("$FreeBSD$"); 98 99 #include <dev/mpt/mpt.h> 100 #include <dev/mpt/mpt_cam.h> 101 #include <dev/mpt/mpt_raid.h> 102 103 #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104 #include "dev/mpt/mpilib/mpi_init.h" 105 #include "dev/mpt/mpilib/mpi_targ.h" 106 #include "dev/mpt/mpilib/mpi_fc.h" 107 #include "dev/mpt/mpilib/mpi_sas.h" 108 #if __FreeBSD_version >= 500000 109 #include <sys/sysctl.h> 110 #endif 111 #include <sys/callout.h> 112 #include <sys/kthread.h> 113 114 #if __FreeBSD_version >= 700025 115 #ifndef CAM_NEW_TRAN_CODE 116 #define CAM_NEW_TRAN_CODE 1 117 #endif 118 #endif 119 120 static void mpt_poll(struct cam_sim *); 121 static timeout_t mpt_timeout; 122 static void mpt_action(struct cam_sim *, union ccb *); 123 static int 124 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 125 static void mpt_setwidth(struct mpt_softc *, int, int); 126 static void mpt_setsync(struct mpt_softc *, int, int, int); 127 static int mpt_update_spi_config(struct mpt_softc *, int); 128 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); 129 130 static mpt_reply_handler_t mpt_scsi_reply_handler; 131 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 132 static mpt_reply_handler_t mpt_fc_els_reply_handler; 133 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 134 MSG_DEFAULT_REPLY *); 135 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 136 static int mpt_fc_reset_link(struct mpt_softc *, int); 137 138 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 139 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 140 static void mpt_recovery_thread(void *arg); 141 static void mpt_recover_commands(struct mpt_softc *mpt); 142 143 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 144 u_int, u_int, u_int, int); 145 146 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 147 static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 148 static int mpt_add_els_buffers(struct mpt_softc *mpt); 149 static int mpt_add_target_commands(struct mpt_softc *mpt); 150 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 151 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 152 static void mpt_target_start_io(struct mpt_softc *, union ccb *); 153 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 154 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 155 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 156 uint8_t, uint8_t const *); 157 static void 158 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 159 tgt_resource_t *, int); 160 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 161 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 162 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 163 static mpt_reply_handler_t mpt_sata_pass_reply_handler; 164 165 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 166 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 167 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 168 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 169 170 static mpt_probe_handler_t mpt_cam_probe; 171 static mpt_attach_handler_t mpt_cam_attach; 172 static mpt_enable_handler_t mpt_cam_enable; 173 static mpt_ready_handler_t mpt_cam_ready; 174 static mpt_event_handler_t mpt_cam_event; 175 static mpt_reset_handler_t mpt_cam_ioc_reset; 176 static mpt_detach_handler_t mpt_cam_detach; 177 178 static struct mpt_personality mpt_cam_personality = 179 { 180 .name = "mpt_cam", 181 .probe = mpt_cam_probe, 182 .attach = mpt_cam_attach, 183 .enable = mpt_cam_enable, 184 .ready = mpt_cam_ready, 185 .event = mpt_cam_event, 186 .reset = mpt_cam_ioc_reset, 187 .detach = mpt_cam_detach, 188 }; 189 190 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 191 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 192 193 int mpt_enable_sata_wc = -1; 194 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 195 196 static int 197 mpt_cam_probe(struct mpt_softc *mpt) 198 { 199 int role; 200 201 /* 202 * Only attach to nodes that support the initiator or target role 203 * (or want to) or have RAID physical devices that need CAM pass-thru 204 * support. 205 */ 206 if (mpt->do_cfg_role) { 207 role = mpt->cfg_role; 208 } else { 209 role = mpt->role; 210 } 211 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 212 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 213 return (0); 214 } 215 return (ENODEV); 216 } 217 218 static int 219 mpt_cam_attach(struct mpt_softc *mpt) 220 { 221 struct cam_devq *devq; 222 mpt_handler_t handler; 223 int maxq; 224 int error; 225 226 MPT_LOCK(mpt); 227 TAILQ_INIT(&mpt->request_timeout_list); 228 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 229 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 230 231 handler.reply_handler = mpt_scsi_reply_handler; 232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 233 &scsi_io_handler_id); 234 if (error != 0) { 235 MPT_UNLOCK(mpt); 236 goto cleanup; 237 } 238 239 handler.reply_handler = mpt_scsi_tmf_reply_handler; 240 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 241 &scsi_tmf_handler_id); 242 if (error != 0) { 243 MPT_UNLOCK(mpt); 244 goto cleanup; 245 } 246 247 /* 248 * If we're fibre channel and could support target mode, we register 249 * an ELS reply handler and give it resources. 250 */ 251 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 252 handler.reply_handler = mpt_fc_els_reply_handler; 253 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 254 &fc_els_handler_id); 255 if (error != 0) { 256 MPT_UNLOCK(mpt); 257 goto cleanup; 258 } 259 if (mpt_add_els_buffers(mpt) == FALSE) { 260 error = ENOMEM; 261 MPT_UNLOCK(mpt); 262 goto cleanup; 263 } 264 maxq -= mpt->els_cmds_allocated; 265 } 266 267 /* 268 * If we support target mode, we register a reply handler for it, 269 * but don't add command resources until we actually enable target 270 * mode. 271 */ 272 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 273 handler.reply_handler = mpt_scsi_tgt_reply_handler; 274 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 275 &mpt->scsi_tgt_handler_id); 276 if (error != 0) { 277 MPT_UNLOCK(mpt); 278 goto cleanup; 279 } 280 } 281 282 if (mpt->is_sas) { 283 handler.reply_handler = mpt_sata_pass_reply_handler; 284 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 285 &sata_pass_handler_id); 286 if (error != 0) { 287 MPT_UNLOCK(mpt); 288 goto cleanup; 289 } 290 } 291 292 /* 293 * We keep one request reserved for timeout TMF requests. 294 */ 295 mpt->tmf_req = mpt_get_request(mpt, FALSE); 296 if (mpt->tmf_req == NULL) { 297 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 298 error = ENOMEM; 299 MPT_UNLOCK(mpt); 300 goto cleanup; 301 } 302 303 /* 304 * Mark the request as free even though not on the free list. 305 * There is only one TMF request allowed to be outstanding at 306 * a time and the TMF routines perform their own allocation 307 * tracking using the standard state flags. 308 */ 309 mpt->tmf_req->state = REQ_STATE_FREE; 310 maxq--; 311 312 /* 313 * The rest of this is CAM foo, for which we need to drop our lock 314 */ 315 MPT_UNLOCK(mpt); 316 317 if (mpt_spawn_recovery_thread(mpt) != 0) { 318 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 319 error = ENOMEM; 320 goto cleanup; 321 } 322 323 /* 324 * Create the device queue for our SIM(s). 325 */ 326 devq = cam_simq_alloc(maxq); 327 if (devq == NULL) { 328 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 329 error = ENOMEM; 330 goto cleanup; 331 } 332 333 /* 334 * Construct our SIM entry. 335 */ 336 mpt->sim = 337 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 338 if (mpt->sim == NULL) { 339 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 340 cam_simq_free(devq); 341 error = ENOMEM; 342 goto cleanup; 343 } 344 345 /* 346 * Register exactly this bus. 347 */ 348 MPT_LOCK(mpt); 349 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { 350 mpt_prt(mpt, "Bus registration Failed!\n"); 351 error = ENOMEM; 352 MPT_UNLOCK(mpt); 353 goto cleanup; 354 } 355 356 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 357 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 358 mpt_prt(mpt, "Unable to allocate Path!\n"); 359 error = ENOMEM; 360 MPT_UNLOCK(mpt); 361 goto cleanup; 362 } 363 MPT_UNLOCK(mpt); 364 365 /* 366 * Only register a second bus for RAID physical 367 * devices if the controller supports RAID. 368 */ 369 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 370 return (0); 371 } 372 373 /* 374 * Create a "bus" to export all hidden disks to CAM. 375 */ 376 mpt->phydisk_sim = 377 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 378 if (mpt->phydisk_sim == NULL) { 379 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 380 error = ENOMEM; 381 goto cleanup; 382 } 383 384 /* 385 * Register this bus. 386 */ 387 MPT_LOCK(mpt); 388 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != 389 CAM_SUCCESS) { 390 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 391 error = ENOMEM; 392 MPT_UNLOCK(mpt); 393 goto cleanup; 394 } 395 396 if (xpt_create_path(&mpt->phydisk_path, NULL, 397 cam_sim_path(mpt->phydisk_sim), 398 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 399 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 400 error = ENOMEM; 401 MPT_UNLOCK(mpt); 402 goto cleanup; 403 } 404 MPT_UNLOCK(mpt); 405 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 406 return (0); 407 408 cleanup: 409 mpt_cam_detach(mpt); 410 return (error); 411 } 412 413 /* 414 * Read FC configuration information 415 */ 416 static int 417 mpt_read_config_info_fc(struct mpt_softc *mpt) 418 { 419 char *topology = NULL; 420 int rv; 421 422 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 423 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 424 if (rv) { 425 return (-1); 426 } 427 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 428 mpt->mpt_fcport_page0.Header.PageVersion, 429 mpt->mpt_fcport_page0.Header.PageLength, 430 mpt->mpt_fcport_page0.Header.PageNumber, 431 mpt->mpt_fcport_page0.Header.PageType); 432 433 434 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 435 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 436 if (rv) { 437 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 438 return (-1); 439 } 440 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 441 442 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; 443 444 switch (mpt->mpt_fcport_page0.Flags & 445 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 447 mpt->mpt_fcport_speed = 0; 448 topology = "<NO LOOP>"; 449 break; 450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 451 topology = "N-Port"; 452 break; 453 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 454 topology = "NL-Port"; 455 break; 456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 457 topology = "F-Port"; 458 break; 459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 460 topology = "FL-Port"; 461 break; 462 default: 463 mpt->mpt_fcport_speed = 0; 464 topology = "?"; 465 break; 466 } 467 468 mpt_lprt(mpt, MPT_PRT_INFO, 469 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " 470 "Speed %u-Gbit\n", topology, 471 mpt->mpt_fcport_page0.WWNN.High, 472 mpt->mpt_fcport_page0.WWNN.Low, 473 mpt->mpt_fcport_page0.WWPN.High, 474 mpt->mpt_fcport_page0.WWPN.Low, 475 mpt->mpt_fcport_speed); 476 #if __FreeBSD_version >= 500000 477 MPT_UNLOCK(mpt); 478 { 479 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 480 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 481 482 snprintf(mpt->scinfo.fc.wwnn, 483 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", 484 mpt->mpt_fcport_page0.WWNN.High, 485 mpt->mpt_fcport_page0.WWNN.Low); 486 487 snprintf(mpt->scinfo.fc.wwpn, 488 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", 489 mpt->mpt_fcport_page0.WWPN.High, 490 mpt->mpt_fcport_page0.WWPN.Low); 491 492 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 493 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, 494 "World Wide Node Name"); 495 496 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 497 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, 498 "World Wide Port Name"); 499 500 } 501 MPT_LOCK(mpt); 502 #endif 503 return (0); 504 } 505 506 /* 507 * Set FC configuration information. 508 */ 509 static int 510 mpt_set_initial_config_fc(struct mpt_softc *mpt) 511 { 512 CONFIG_PAGE_FC_PORT_1 fc; 513 U32 fl; 514 int r, doit = 0; 515 int role; 516 517 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 518 &fc.Header, FALSE, 5000); 519 if (r) { 520 mpt_prt(mpt, "failed to read FC page 1 header\n"); 521 return (mpt_fc_reset_link(mpt, 1)); 522 } 523 524 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 525 &fc.Header, sizeof (fc), FALSE, 5000); 526 if (r) { 527 mpt_prt(mpt, "failed to read FC page 1\n"); 528 return (mpt_fc_reset_link(mpt, 1)); 529 } 530 mpt2host_config_page_fc_port_1(&fc); 531 532 /* 533 * Check our flags to make sure we support the role we want. 534 */ 535 doit = 0; 536 role = 0; 537 fl = fc.Flags; 538 539 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 540 role |= MPT_ROLE_INITIATOR; 541 } 542 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 543 role |= MPT_ROLE_TARGET; 544 } 545 546 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 547 548 if (mpt->do_cfg_role == 0) { 549 role = mpt->cfg_role; 550 } else { 551 mpt->do_cfg_role = 0; 552 } 553 554 if (role != mpt->cfg_role) { 555 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 556 if ((role & MPT_ROLE_INITIATOR) == 0) { 557 mpt_prt(mpt, "adding initiator role\n"); 558 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 559 doit++; 560 } else { 561 mpt_prt(mpt, "keeping initiator role\n"); 562 } 563 } else if (role & MPT_ROLE_INITIATOR) { 564 mpt_prt(mpt, "removing initiator role\n"); 565 doit++; 566 } 567 if (mpt->cfg_role & MPT_ROLE_TARGET) { 568 if ((role & MPT_ROLE_TARGET) == 0) { 569 mpt_prt(mpt, "adding target role\n"); 570 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 571 doit++; 572 } else { 573 mpt_prt(mpt, "keeping target role\n"); 574 } 575 } else if (role & MPT_ROLE_TARGET) { 576 mpt_prt(mpt, "removing target role\n"); 577 doit++; 578 } 579 mpt->role = mpt->cfg_role; 580 } 581 582 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 583 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 584 mpt_prt(mpt, "adding OXID option\n"); 585 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 586 doit++; 587 } 588 } 589 590 if (doit) { 591 fc.Flags = fl; 592 host2mpt_config_page_fc_port_1(&fc); 593 r = mpt_write_cfg_page(mpt, 594 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 595 sizeof(fc), FALSE, 5000); 596 if (r != 0) { 597 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 598 return (0); 599 } 600 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 601 "effect until next reboot or IOC reset\n"); 602 } 603 return (0); 604 } 605 606 static int 607 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 608 { 609 ConfigExtendedPageHeader_t hdr; 610 struct mptsas_phyinfo *phyinfo; 611 SasIOUnitPage0_t *buffer; 612 int error, len, i; 613 614 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 615 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 616 &hdr, 0, 10000); 617 if (error) 618 goto out; 619 if (hdr.ExtPageLength == 0) { 620 error = ENXIO; 621 goto out; 622 } 623 624 len = hdr.ExtPageLength * 4; 625 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 626 if (buffer == NULL) { 627 error = ENOMEM; 628 goto out; 629 } 630 631 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 632 0, &hdr, buffer, len, 0, 10000); 633 if (error) { 634 free(buffer, M_DEVBUF); 635 goto out; 636 } 637 638 portinfo->num_phys = buffer->NumPhys; 639 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * 640 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 641 if (portinfo->phy_info == NULL) { 642 free(buffer, M_DEVBUF); 643 error = ENOMEM; 644 goto out; 645 } 646 647 for (i = 0; i < portinfo->num_phys; i++) { 648 phyinfo = &portinfo->phy_info[i]; 649 phyinfo->phy_num = i; 650 phyinfo->port_id = buffer->PhyData[i].Port; 651 phyinfo->negotiated_link_rate = 652 buffer->PhyData[i].NegotiatedLinkRate; 653 phyinfo->handle = 654 le16toh(buffer->PhyData[i].ControllerDevHandle); 655 } 656 657 free(buffer, M_DEVBUF); 658 out: 659 return (error); 660 } 661 662 static int 663 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 664 uint32_t form, uint32_t form_specific) 665 { 666 ConfigExtendedPageHeader_t hdr; 667 SasPhyPage0_t *buffer; 668 int error; 669 670 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 671 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 672 0, 10000); 673 if (error) 674 goto out; 675 if (hdr.ExtPageLength == 0) { 676 error = ENXIO; 677 goto out; 678 } 679 680 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 681 if (buffer == NULL) { 682 error = ENOMEM; 683 goto out; 684 } 685 686 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 687 form + form_specific, &hdr, buffer, 688 sizeof(SasPhyPage0_t), 0, 10000); 689 if (error) { 690 free(buffer, M_DEVBUF); 691 goto out; 692 } 693 694 phy_info->hw_link_rate = buffer->HwLinkRate; 695 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 696 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 697 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 698 699 free(buffer, M_DEVBUF); 700 out: 701 return (error); 702 } 703 704 static int 705 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 706 uint32_t form, uint32_t form_specific) 707 { 708 ConfigExtendedPageHeader_t hdr; 709 SasDevicePage0_t *buffer; 710 uint64_t sas_address; 711 int error = 0; 712 713 bzero(device_info, sizeof(*device_info)); 714 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 715 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 716 &hdr, 0, 10000); 717 if (error) 718 goto out; 719 if (hdr.ExtPageLength == 0) { 720 error = ENXIO; 721 goto out; 722 } 723 724 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 725 if (buffer == NULL) { 726 error = ENOMEM; 727 goto out; 728 } 729 730 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 731 form + form_specific, &hdr, buffer, 732 sizeof(SasDevicePage0_t), 0, 10000); 733 if (error) { 734 free(buffer, M_DEVBUF); 735 goto out; 736 } 737 738 device_info->dev_handle = le16toh(buffer->DevHandle); 739 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 740 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 741 device_info->slot = le16toh(buffer->Slot); 742 device_info->phy_num = buffer->PhyNum; 743 device_info->physical_port = buffer->PhysicalPort; 744 device_info->target_id = buffer->TargetID; 745 device_info->bus = buffer->Bus; 746 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 747 device_info->sas_address = le64toh(sas_address); 748 device_info->device_info = le32toh(buffer->DeviceInfo); 749 750 free(buffer, M_DEVBUF); 751 out: 752 return (error); 753 } 754 755 /* 756 * Read SAS configuration information. Nothing to do yet. 757 */ 758 static int 759 mpt_read_config_info_sas(struct mpt_softc *mpt) 760 { 761 struct mptsas_portinfo *portinfo; 762 struct mptsas_phyinfo *phyinfo; 763 int error, i; 764 765 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 766 if (portinfo == NULL) 767 return (ENOMEM); 768 769 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 770 if (error) { 771 free(portinfo, M_DEVBUF); 772 return (0); 773 } 774 775 for (i = 0; i < portinfo->num_phys; i++) { 776 phyinfo = &portinfo->phy_info[i]; 777 error = mptsas_sas_phy_pg0(mpt, phyinfo, 778 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 779 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 780 if (error) 781 break; 782 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 783 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 784 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 785 phyinfo->handle); 786 if (error) 787 break; 788 phyinfo->identify.phy_num = phyinfo->phy_num = i; 789 if (phyinfo->attached.dev_handle) 790 error = mptsas_sas_device_pg0(mpt, 791 &phyinfo->attached, 792 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 793 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 794 phyinfo->attached.dev_handle); 795 if (error) 796 break; 797 } 798 mpt->sas_portinfo = portinfo; 799 return (0); 800 } 801 802 static void 803 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 804 int enabled) 805 { 806 SataPassthroughRequest_t *pass; 807 request_t *req; 808 int error, status; 809 810 req = mpt_get_request(mpt, 0); 811 if (req == NULL) 812 return; 813 814 pass = req->req_vbuf; 815 bzero(pass, sizeof(SataPassthroughRequest_t)); 816 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 817 pass->TargetID = devinfo->target_id; 818 pass->Bus = devinfo->bus; 819 pass->PassthroughFlags = 0; 820 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 821 pass->DataLength = 0; 822 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 823 pass->CommandFIS[0] = 0x27; 824 pass->CommandFIS[1] = 0x80; 825 pass->CommandFIS[2] = 0xef; 826 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 827 pass->CommandFIS[7] = 0x40; 828 pass->CommandFIS[15] = 0x08; 829 830 mpt_check_doorbell(mpt); 831 mpt_send_cmd(mpt, req); 832 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 833 10 * 1000); 834 if (error) { 835 mpt_free_request(mpt, req); 836 printf("error %d sending passthrough\n", error); 837 return; 838 } 839 840 status = le16toh(req->IOCStatus); 841 if (status != MPI_IOCSTATUS_SUCCESS) { 842 mpt_free_request(mpt, req); 843 printf("IOCSTATUS %d\n", status); 844 return; 845 } 846 847 mpt_free_request(mpt, req); 848 } 849 850 /* 851 * Set SAS configuration information. Nothing to do yet. 852 */ 853 static int 854 mpt_set_initial_config_sas(struct mpt_softc *mpt) 855 { 856 struct mptsas_phyinfo *phyinfo; 857 int i; 858 859 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 860 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 861 phyinfo = &mpt->sas_portinfo->phy_info[i]; 862 if (phyinfo->attached.dev_handle == 0) 863 continue; 864 if ((phyinfo->attached.device_info & 865 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 866 continue; 867 if (bootverbose) 868 device_printf(mpt->dev, 869 "%sabling SATA WC on phy %d\n", 870 (mpt_enable_sata_wc) ? "En" : "Dis", i); 871 mptsas_set_sata_wc(mpt, &phyinfo->attached, 872 mpt_enable_sata_wc); 873 } 874 } 875 876 return (0); 877 } 878 879 static int 880 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 881 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 882 { 883 884 if (req != NULL) { 885 if (reply_frame != NULL) { 886 req->IOCStatus = le16toh(reply_frame->IOCStatus); 887 } 888 req->state &= ~REQ_STATE_QUEUED; 889 req->state |= REQ_STATE_DONE; 890 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 891 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 892 wakeup(req); 893 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 894 /* 895 * Whew- we can free this request (late completion) 896 */ 897 mpt_free_request(mpt, req); 898 } 899 } 900 901 return (TRUE); 902 } 903 904 /* 905 * Read SCSI configuration information 906 */ 907 static int 908 mpt_read_config_info_spi(struct mpt_softc *mpt) 909 { 910 int rv, i; 911 912 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 913 &mpt->mpt_port_page0.Header, FALSE, 5000); 914 if (rv) { 915 return (-1); 916 } 917 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 918 mpt->mpt_port_page0.Header.PageVersion, 919 mpt->mpt_port_page0.Header.PageLength, 920 mpt->mpt_port_page0.Header.PageNumber, 921 mpt->mpt_port_page0.Header.PageType); 922 923 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 924 &mpt->mpt_port_page1.Header, FALSE, 5000); 925 if (rv) { 926 return (-1); 927 } 928 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 929 mpt->mpt_port_page1.Header.PageVersion, 930 mpt->mpt_port_page1.Header.PageLength, 931 mpt->mpt_port_page1.Header.PageNumber, 932 mpt->mpt_port_page1.Header.PageType); 933 934 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 935 &mpt->mpt_port_page2.Header, FALSE, 5000); 936 if (rv) { 937 return (-1); 938 } 939 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 940 mpt->mpt_port_page2.Header.PageVersion, 941 mpt->mpt_port_page2.Header.PageLength, 942 mpt->mpt_port_page2.Header.PageNumber, 943 mpt->mpt_port_page2.Header.PageType); 944 945 for (i = 0; i < 16; i++) { 946 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 947 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 948 if (rv) { 949 return (-1); 950 } 951 mpt_lprt(mpt, MPT_PRT_DEBUG, 952 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 953 mpt->mpt_dev_page0[i].Header.PageVersion, 954 mpt->mpt_dev_page0[i].Header.PageLength, 955 mpt->mpt_dev_page0[i].Header.PageNumber, 956 mpt->mpt_dev_page0[i].Header.PageType); 957 958 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 959 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 960 if (rv) { 961 return (-1); 962 } 963 mpt_lprt(mpt, MPT_PRT_DEBUG, 964 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 965 mpt->mpt_dev_page1[i].Header.PageVersion, 966 mpt->mpt_dev_page1[i].Header.PageLength, 967 mpt->mpt_dev_page1[i].Header.PageNumber, 968 mpt->mpt_dev_page1[i].Header.PageType); 969 } 970 971 /* 972 * At this point, we don't *have* to fail. As long as we have 973 * valid config header information, we can (barely) lurch 974 * along. 975 */ 976 977 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 978 sizeof(mpt->mpt_port_page0), FALSE, 5000); 979 if (rv) { 980 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 981 } else { 982 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 983 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 984 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 985 mpt->mpt_port_page0.Capabilities, 986 mpt->mpt_port_page0.PhysicalInterface); 987 } 988 989 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 990 sizeof(mpt->mpt_port_page1), FALSE, 5000); 991 if (rv) { 992 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 993 } else { 994 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 995 mpt_lprt(mpt, MPT_PRT_DEBUG, 996 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 997 mpt->mpt_port_page1.Configuration, 998 mpt->mpt_port_page1.OnBusTimerValue); 999 } 1000 1001 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 1002 sizeof(mpt->mpt_port_page2), FALSE, 5000); 1003 if (rv) { 1004 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 1005 } else { 1006 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1007 "Port Page 2: Flags %x Settings %x\n", 1008 mpt->mpt_port_page2.PortFlags, 1009 mpt->mpt_port_page2.PortSettings); 1010 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 1011 for (i = 0; i < 16; i++) { 1012 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1013 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1014 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1015 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1016 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1017 } 1018 } 1019 1020 for (i = 0; i < 16; i++) { 1021 rv = mpt_read_cur_cfg_page(mpt, i, 1022 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1023 FALSE, 5000); 1024 if (rv) { 1025 mpt_prt(mpt, 1026 "cannot read SPI Target %d Device Page 0\n", i); 1027 continue; 1028 } 1029 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 1030 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1031 "target %d page 0: Negotiated Params %x Information %x\n", 1032 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1033 mpt->mpt_dev_page0[i].Information); 1034 1035 rv = mpt_read_cur_cfg_page(mpt, i, 1036 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1037 FALSE, 5000); 1038 if (rv) { 1039 mpt_prt(mpt, 1040 "cannot read SPI Target %d Device Page 1\n", i); 1041 continue; 1042 } 1043 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 1044 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1045 "target %d page 1: Requested Params %x Configuration %x\n", 1046 i, mpt->mpt_dev_page1[i].RequestedParameters, 1047 mpt->mpt_dev_page1[i].Configuration); 1048 } 1049 return (0); 1050 } 1051 1052 /* 1053 * Validate SPI configuration information. 1054 * 1055 * In particular, validate SPI Port Page 1. 1056 */ 1057 static int 1058 mpt_set_initial_config_spi(struct mpt_softc *mpt) 1059 { 1060 int error, i, pp1val; 1061 1062 mpt->mpt_disc_enable = 0xff; 1063 mpt->mpt_tag_enable = 0; 1064 1065 pp1val = ((1 << mpt->mpt_ini_id) << 1066 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; 1067 if (mpt->mpt_port_page1.Configuration != pp1val) { 1068 CONFIG_PAGE_SCSI_PORT_1 tmp; 1069 1070 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1071 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 1072 tmp = mpt->mpt_port_page1; 1073 tmp.Configuration = pp1val; 1074 host2mpt_config_page_scsi_port_1(&tmp); 1075 error = mpt_write_cur_cfg_page(mpt, 0, 1076 &tmp.Header, sizeof(tmp), FALSE, 5000); 1077 if (error) { 1078 return (-1); 1079 } 1080 error = mpt_read_cur_cfg_page(mpt, 0, 1081 &tmp.Header, sizeof(tmp), FALSE, 5000); 1082 if (error) { 1083 return (-1); 1084 } 1085 mpt2host_config_page_scsi_port_1(&tmp); 1086 if (tmp.Configuration != pp1val) { 1087 mpt_prt(mpt, 1088 "failed to reset SPI Port Page 1 Config value\n"); 1089 return (-1); 1090 } 1091 mpt->mpt_port_page1 = tmp; 1092 } 1093 1094 /* 1095 * The purpose of this exercise is to get 1096 * all targets back to async/narrow. 1097 * 1098 * We skip this step if the BIOS has already negotiated 1099 * speeds with the targets. 1100 */ 1101 i = mpt->mpt_port_page2.PortSettings & 1102 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1103 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 1104 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1105 "honoring BIOS transfer negotiations\n"); 1106 } else { 1107 for (i = 0; i < 16; i++) { 1108 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1109 mpt->mpt_dev_page1[i].Configuration = 0; 1110 (void) mpt_update_spi_config(mpt, i); 1111 } 1112 } 1113 return (0); 1114 } 1115 1116 static int 1117 mpt_cam_enable(struct mpt_softc *mpt) 1118 { 1119 int error; 1120 1121 MPT_LOCK(mpt); 1122 1123 error = EIO; 1124 if (mpt->is_fc) { 1125 if (mpt_read_config_info_fc(mpt)) { 1126 goto out; 1127 } 1128 if (mpt_set_initial_config_fc(mpt)) { 1129 goto out; 1130 } 1131 } else if (mpt->is_sas) { 1132 if (mpt_read_config_info_sas(mpt)) { 1133 goto out; 1134 } 1135 if (mpt_set_initial_config_sas(mpt)) { 1136 goto out; 1137 } 1138 } else if (mpt->is_spi) { 1139 if (mpt_read_config_info_spi(mpt)) { 1140 goto out; 1141 } 1142 if (mpt_set_initial_config_spi(mpt)) { 1143 goto out; 1144 } 1145 } 1146 error = 0; 1147 1148 out: 1149 MPT_UNLOCK(mpt); 1150 return (error); 1151 } 1152 1153 static void 1154 mpt_cam_ready(struct mpt_softc *mpt) 1155 { 1156 1157 /* 1158 * If we're in target mode, hang out resources now 1159 * so we don't cause the world to hang talking to us. 1160 */ 1161 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1162 /* 1163 * Try to add some target command resources 1164 */ 1165 MPT_LOCK(mpt); 1166 if (mpt_add_target_commands(mpt) == FALSE) { 1167 mpt_prt(mpt, "failed to add target commands\n"); 1168 } 1169 MPT_UNLOCK(mpt); 1170 } 1171 mpt->ready = 1; 1172 } 1173 1174 static void 1175 mpt_cam_detach(struct mpt_softc *mpt) 1176 { 1177 mpt_handler_t handler; 1178 1179 MPT_LOCK(mpt); 1180 mpt->ready = 0; 1181 mpt_terminate_recovery_thread(mpt); 1182 1183 handler.reply_handler = mpt_scsi_reply_handler; 1184 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1185 scsi_io_handler_id); 1186 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1188 scsi_tmf_handler_id); 1189 handler.reply_handler = mpt_fc_els_reply_handler; 1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1191 fc_els_handler_id); 1192 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1193 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1194 mpt->scsi_tgt_handler_id); 1195 handler.reply_handler = mpt_sata_pass_reply_handler; 1196 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1197 sata_pass_handler_id); 1198 1199 if (mpt->tmf_req != NULL) { 1200 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1201 mpt_free_request(mpt, mpt->tmf_req); 1202 mpt->tmf_req = NULL; 1203 } 1204 if (mpt->sas_portinfo != NULL) { 1205 free(mpt->sas_portinfo, M_DEVBUF); 1206 mpt->sas_portinfo = NULL; 1207 } 1208 1209 if (mpt->sim != NULL) { 1210 xpt_free_path(mpt->path); 1211 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1212 cam_sim_free(mpt->sim, TRUE); 1213 mpt->sim = NULL; 1214 } 1215 1216 if (mpt->phydisk_sim != NULL) { 1217 xpt_free_path(mpt->phydisk_path); 1218 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1219 cam_sim_free(mpt->phydisk_sim, TRUE); 1220 mpt->phydisk_sim = NULL; 1221 } 1222 MPT_UNLOCK(mpt); 1223 } 1224 1225 /* This routine is used after a system crash to dump core onto the swap device. 1226 */ 1227 static void 1228 mpt_poll(struct cam_sim *sim) 1229 { 1230 struct mpt_softc *mpt; 1231 1232 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1233 mpt_intr(mpt); 1234 } 1235 1236 /* 1237 * Watchdog timeout routine for SCSI requests. 1238 */ 1239 static void 1240 mpt_timeout(void *arg) 1241 { 1242 union ccb *ccb; 1243 struct mpt_softc *mpt; 1244 request_t *req; 1245 1246 ccb = (union ccb *)arg; 1247 mpt = ccb->ccb_h.ccb_mpt_ptr; 1248 1249 #if __FreeBSD_version < 500000 1250 MPT_LOCK(mpt); 1251 #endif 1252 MPT_LOCK_ASSERT(mpt); 1253 req = ccb->ccb_h.ccb_req_ptr; 1254 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1255 req->serno, ccb, req->ccb); 1256 /* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1257 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1258 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1259 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1260 req->state |= REQ_STATE_TIMEDOUT; 1261 mpt_wakeup_recovery_thread(mpt); 1262 } 1263 #if __FreeBSD_version < 500000 1264 MPT_UNLOCK(mpt); 1265 #endif 1266 } 1267 1268 /* 1269 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. 1270 * 1271 * Takes a list of physical segments and builds the SGL for SCSI IO command 1272 * and forwards the commard to the IOC after one last check that CAM has not 1273 * aborted the transaction. 1274 */ 1275 static void 1276 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1277 { 1278 request_t *req, *trq; 1279 char *mpt_off; 1280 union ccb *ccb; 1281 struct mpt_softc *mpt; 1282 bus_addr_t chain_list_addr; 1283 int first_lim, seg, this_seg_lim; 1284 uint32_t addr, cur_off, flags, nxt_off, tf; 1285 void *sglp = NULL; 1286 MSG_REQUEST_HEADER *hdrp; 1287 SGE_SIMPLE64 *se; 1288 SGE_CHAIN64 *ce; 1289 int istgt = 0; 1290 1291 req = (request_t *)arg; 1292 ccb = req->ccb; 1293 1294 mpt = ccb->ccb_h.ccb_mpt_ptr; 1295 req = ccb->ccb_h.ccb_req_ptr; 1296 1297 hdrp = req->req_vbuf; 1298 mpt_off = req->req_vbuf; 1299 1300 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1301 error = EFBIG; 1302 } 1303 1304 if (error == 0) { 1305 switch (hdrp->Function) { 1306 case MPI_FUNCTION_SCSI_IO_REQUEST: 1307 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1308 istgt = 0; 1309 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1310 break; 1311 case MPI_FUNCTION_TARGET_ASSIST: 1312 istgt = 1; 1313 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1314 break; 1315 default: 1316 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1317 hdrp->Function); 1318 error = EINVAL; 1319 break; 1320 } 1321 } 1322 1323 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1324 error = EFBIG; 1325 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1326 nseg, mpt->max_seg_cnt); 1327 } 1328 1329 bad: 1330 if (error != 0) { 1331 if (error != EFBIG && error != ENOMEM) { 1332 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1333 } 1334 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1335 cam_status status; 1336 mpt_freeze_ccb(ccb); 1337 if (error == EFBIG) { 1338 status = CAM_REQ_TOO_BIG; 1339 } else if (error == ENOMEM) { 1340 if (mpt->outofbeer == 0) { 1341 mpt->outofbeer = 1; 1342 xpt_freeze_simq(mpt->sim, 1); 1343 mpt_lprt(mpt, MPT_PRT_DEBUG, 1344 "FREEZEQ\n"); 1345 } 1346 status = CAM_REQUEUE_REQ; 1347 } else { 1348 status = CAM_REQ_CMP_ERR; 1349 } 1350 mpt_set_ccb_status(ccb, status); 1351 } 1352 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1353 request_t *cmd_req = 1354 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1355 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1356 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1357 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1358 } 1359 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1360 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1361 xpt_done(ccb); 1362 CAMLOCK_2_MPTLOCK(mpt); 1363 mpt_free_request(mpt, req); 1364 MPTLOCK_2_CAMLOCK(mpt); 1365 return; 1366 } 1367 1368 /* 1369 * No data to transfer? 1370 * Just make a single simple SGL with zero length. 1371 */ 1372 1373 if (mpt->verbose >= MPT_PRT_DEBUG) { 1374 int tidx = ((char *)sglp) - mpt_off; 1375 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1376 } 1377 1378 if (nseg == 0) { 1379 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1380 MPI_pSGE_SET_FLAGS(se1, 1381 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1382 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1383 se1->FlagsLength = htole32(se1->FlagsLength); 1384 goto out; 1385 } 1386 1387 1388 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1389 if (istgt == 0) { 1390 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1391 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1392 } 1393 } else { 1394 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1395 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1396 } 1397 } 1398 1399 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1400 bus_dmasync_op_t op; 1401 if (istgt == 0) { 1402 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1403 op = BUS_DMASYNC_PREREAD; 1404 } else { 1405 op = BUS_DMASYNC_PREWRITE; 1406 } 1407 } else { 1408 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1409 op = BUS_DMASYNC_PREWRITE; 1410 } else { 1411 op = BUS_DMASYNC_PREREAD; 1412 } 1413 } 1414 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1415 } 1416 1417 /* 1418 * Okay, fill in what we can at the end of the command frame. 1419 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1420 * the command frame. 1421 * 1422 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1423 * SIMPLE64 pointers and start doing CHAIN64 entries after 1424 * that. 1425 */ 1426 1427 if (nseg < MPT_NSGL_FIRST(mpt)) { 1428 first_lim = nseg; 1429 } else { 1430 /* 1431 * Leave room for CHAIN element 1432 */ 1433 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1434 } 1435 1436 se = (SGE_SIMPLE64 *) sglp; 1437 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1438 tf = flags; 1439 memset(se, 0, sizeof (*se)); 1440 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1441 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1442 if (sizeof(bus_addr_t) > 4) { 1443 addr = ((uint64_t)dm_segs->ds_addr) >> 32; 1444 /* SAS1078 36GB limitation WAR */ 1445 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + 1446 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { 1447 addr |= (1 << 31); 1448 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 1449 } 1450 se->Address.High = htole32(addr); 1451 } 1452 if (seg == first_lim - 1) { 1453 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1454 } 1455 if (seg == nseg - 1) { 1456 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1457 MPI_SGE_FLAGS_END_OF_BUFFER; 1458 } 1459 MPI_pSGE_SET_FLAGS(se, tf); 1460 se->FlagsLength = htole32(se->FlagsLength); 1461 } 1462 1463 if (seg == nseg) { 1464 goto out; 1465 } 1466 1467 /* 1468 * Tell the IOC where to find the first chain element. 1469 */ 1470 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1471 nxt_off = MPT_RQSL(mpt); 1472 trq = req; 1473 1474 /* 1475 * Make up the rest of the data segments out of a chain element 1476 * (contained in the current request frame) which points to 1477 * SIMPLE64 elements in the next request frame, possibly ending 1478 * with *another* chain element (if there's more). 1479 */ 1480 while (seg < nseg) { 1481 /* 1482 * Point to the chain descriptor. Note that the chain 1483 * descriptor is at the end of the *previous* list (whether 1484 * chain or simple). 1485 */ 1486 ce = (SGE_CHAIN64 *) se; 1487 1488 /* 1489 * Before we change our current pointer, make sure we won't 1490 * overflow the request area with this frame. Note that we 1491 * test against 'greater than' here as it's okay in this case 1492 * to have next offset be just outside the request area. 1493 */ 1494 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1495 nxt_off = MPT_REQUEST_AREA; 1496 goto next_chain; 1497 } 1498 1499 /* 1500 * Set our SGE element pointer to the beginning of the chain 1501 * list and update our next chain list offset. 1502 */ 1503 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1504 cur_off = nxt_off; 1505 nxt_off += MPT_RQSL(mpt); 1506 1507 /* 1508 * Now initialize the chain descriptor. 1509 */ 1510 memset(ce, 0, sizeof (*ce)); 1511 1512 /* 1513 * Get the physical address of the chain list. 1514 */ 1515 chain_list_addr = trq->req_pbuf; 1516 chain_list_addr += cur_off; 1517 if (sizeof (bus_addr_t) > 4) { 1518 ce->Address.High = 1519 htole32(((uint64_t)chain_list_addr) >> 32); 1520 } 1521 ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 1522 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1523 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1524 1525 /* 1526 * If we have more than a frame's worth of segments left, 1527 * set up the chain list to have the last element be another 1528 * chain descriptor. 1529 */ 1530 if ((nseg - seg) > MPT_NSGL(mpt)) { 1531 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1532 /* 1533 * The length of the chain is the length in bytes of the 1534 * number of segments plus the next chain element. 1535 * 1536 * The next chain descriptor offset is the length, 1537 * in words, of the number of segments. 1538 */ 1539 ce->Length = (this_seg_lim - seg) * 1540 sizeof (SGE_SIMPLE64); 1541 ce->NextChainOffset = ce->Length >> 2; 1542 ce->Length += sizeof (SGE_CHAIN64); 1543 } else { 1544 this_seg_lim = nseg; 1545 ce->Length = (this_seg_lim - seg) * 1546 sizeof (SGE_SIMPLE64); 1547 } 1548 ce->Length = htole16(ce->Length); 1549 1550 /* 1551 * Fill in the chain list SGE elements with our segment data. 1552 * 1553 * If we're the last element in this chain list, set the last 1554 * element flag. If we're the completely last element period, 1555 * set the end of list and end of buffer flags. 1556 */ 1557 while (seg < this_seg_lim) { 1558 tf = flags; 1559 memset(se, 0, sizeof (*se)); 1560 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1561 se->Address.Low = htole32(dm_segs->ds_addr & 1562 0xffffffff); 1563 if (sizeof (bus_addr_t) > 4) { 1564 addr = ((uint64_t)dm_segs->ds_addr) >> 32; 1565 /* SAS1078 36GB limitation WAR */ 1566 if (mpt->is_1078 && 1567 (((uint64_t)dm_segs->ds_addr + 1568 MPI_SGE_LENGTH(se->FlagsLength)) >> 1569 32) == 9) { 1570 addr |= (1 << 31); 1571 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 1572 } 1573 se->Address.High = htole32(addr); 1574 } 1575 if (seg == this_seg_lim - 1) { 1576 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1577 } 1578 if (seg == nseg - 1) { 1579 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1580 MPI_SGE_FLAGS_END_OF_BUFFER; 1581 } 1582 MPI_pSGE_SET_FLAGS(se, tf); 1583 se->FlagsLength = htole32(se->FlagsLength); 1584 se++; 1585 seg++; 1586 dm_segs++; 1587 } 1588 1589 next_chain: 1590 /* 1591 * If we have more segments to do and we've used up all of 1592 * the space in a request area, go allocate another one 1593 * and chain to that. 1594 */ 1595 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1596 request_t *nrq; 1597 1598 CAMLOCK_2_MPTLOCK(mpt); 1599 nrq = mpt_get_request(mpt, FALSE); 1600 MPTLOCK_2_CAMLOCK(mpt); 1601 1602 if (nrq == NULL) { 1603 error = ENOMEM; 1604 goto bad; 1605 } 1606 1607 /* 1608 * Append the new request area on the tail of our list. 1609 */ 1610 if ((trq = req->chain) == NULL) { 1611 req->chain = nrq; 1612 } else { 1613 while (trq->chain != NULL) { 1614 trq = trq->chain; 1615 } 1616 trq->chain = nrq; 1617 } 1618 trq = nrq; 1619 mpt_off = trq->req_vbuf; 1620 if (mpt->verbose >= MPT_PRT_DEBUG) { 1621 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1622 } 1623 nxt_off = 0; 1624 } 1625 } 1626 out: 1627 1628 /* 1629 * Last time we need to check if this CCB needs to be aborted. 1630 */ 1631 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1632 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1633 request_t *cmd_req = 1634 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1635 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1636 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1637 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1638 } 1639 mpt_prt(mpt, 1640 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1641 ccb->ccb_h.status & CAM_STATUS_MASK); 1642 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1643 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1644 } 1645 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1646 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1647 xpt_done(ccb); 1648 CAMLOCK_2_MPTLOCK(mpt); 1649 mpt_free_request(mpt, req); 1650 MPTLOCK_2_CAMLOCK(mpt); 1651 return; 1652 } 1653 1654 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1655 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1656 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 1657 mpt_timeout, ccb); 1658 } 1659 if (mpt->verbose > MPT_PRT_DEBUG) { 1660 int nc = 0; 1661 mpt_print_request(req->req_vbuf); 1662 for (trq = req->chain; trq; trq = trq->chain) { 1663 printf(" Additional Chain Area %d\n", nc++); 1664 mpt_dump_sgl(trq->req_vbuf, 0); 1665 } 1666 } 1667 1668 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1669 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1670 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1671 #ifdef WE_TRUST_AUTO_GOOD_STATUS 1672 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1673 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1674 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1675 } else { 1676 tgt->state = TGT_STATE_MOVING_DATA; 1677 } 1678 #else 1679 tgt->state = TGT_STATE_MOVING_DATA; 1680 #endif 1681 } 1682 CAMLOCK_2_MPTLOCK(mpt); 1683 mpt_send_cmd(mpt, req); 1684 MPTLOCK_2_CAMLOCK(mpt); 1685 } 1686 1687 static void 1688 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1689 { 1690 request_t *req, *trq; 1691 char *mpt_off; 1692 union ccb *ccb; 1693 struct mpt_softc *mpt; 1694 int seg, first_lim; 1695 uint32_t flags, nxt_off; 1696 void *sglp = NULL; 1697 MSG_REQUEST_HEADER *hdrp; 1698 SGE_SIMPLE32 *se; 1699 SGE_CHAIN32 *ce; 1700 int istgt = 0; 1701 1702 req = (request_t *)arg; 1703 ccb = req->ccb; 1704 1705 mpt = ccb->ccb_h.ccb_mpt_ptr; 1706 req = ccb->ccb_h.ccb_req_ptr; 1707 1708 hdrp = req->req_vbuf; 1709 mpt_off = req->req_vbuf; 1710 1711 1712 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1713 error = EFBIG; 1714 } 1715 1716 if (error == 0) { 1717 switch (hdrp->Function) { 1718 case MPI_FUNCTION_SCSI_IO_REQUEST: 1719 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1720 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1721 break; 1722 case MPI_FUNCTION_TARGET_ASSIST: 1723 istgt = 1; 1724 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1725 break; 1726 default: 1727 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1728 hdrp->Function); 1729 error = EINVAL; 1730 break; 1731 } 1732 } 1733 1734 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1735 error = EFBIG; 1736 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1737 nseg, mpt->max_seg_cnt); 1738 } 1739 1740 bad: 1741 if (error != 0) { 1742 if (error != EFBIG && error != ENOMEM) { 1743 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1744 } 1745 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1746 cam_status status; 1747 mpt_freeze_ccb(ccb); 1748 if (error == EFBIG) { 1749 status = CAM_REQ_TOO_BIG; 1750 } else if (error == ENOMEM) { 1751 if (mpt->outofbeer == 0) { 1752 mpt->outofbeer = 1; 1753 xpt_freeze_simq(mpt->sim, 1); 1754 mpt_lprt(mpt, MPT_PRT_DEBUG, 1755 "FREEZEQ\n"); 1756 } 1757 status = CAM_REQUEUE_REQ; 1758 } else { 1759 status = CAM_REQ_CMP_ERR; 1760 } 1761 mpt_set_ccb_status(ccb, status); 1762 } 1763 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1764 request_t *cmd_req = 1765 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1766 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1767 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1768 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1769 } 1770 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1771 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1772 xpt_done(ccb); 1773 CAMLOCK_2_MPTLOCK(mpt); 1774 mpt_free_request(mpt, req); 1775 MPTLOCK_2_CAMLOCK(mpt); 1776 return; 1777 } 1778 1779 /* 1780 * No data to transfer? 1781 * Just make a single simple SGL with zero length. 1782 */ 1783 1784 if (mpt->verbose >= MPT_PRT_DEBUG) { 1785 int tidx = ((char *)sglp) - mpt_off; 1786 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1787 } 1788 1789 if (nseg == 0) { 1790 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1791 MPI_pSGE_SET_FLAGS(se1, 1792 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1793 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1794 se1->FlagsLength = htole32(se1->FlagsLength); 1795 goto out; 1796 } 1797 1798 1799 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1800 if (istgt == 0) { 1801 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1802 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1803 } 1804 } else { 1805 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1806 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1807 } 1808 } 1809 1810 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { 1811 bus_dmasync_op_t op; 1812 if (istgt) { 1813 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1814 op = BUS_DMASYNC_PREREAD; 1815 } else { 1816 op = BUS_DMASYNC_PREWRITE; 1817 } 1818 } else { 1819 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1820 op = BUS_DMASYNC_PREWRITE; 1821 } else { 1822 op = BUS_DMASYNC_PREREAD; 1823 } 1824 } 1825 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1826 } 1827 1828 /* 1829 * Okay, fill in what we can at the end of the command frame. 1830 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1831 * the command frame. 1832 * 1833 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1834 * SIMPLE32 pointers and start doing CHAIN32 entries after 1835 * that. 1836 */ 1837 1838 if (nseg < MPT_NSGL_FIRST(mpt)) { 1839 first_lim = nseg; 1840 } else { 1841 /* 1842 * Leave room for CHAIN element 1843 */ 1844 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1845 } 1846 1847 se = (SGE_SIMPLE32 *) sglp; 1848 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1849 uint32_t tf; 1850 1851 memset(se, 0,sizeof (*se)); 1852 se->Address = htole32(dm_segs->ds_addr); 1853 1854 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1855 tf = flags; 1856 if (seg == first_lim - 1) { 1857 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1858 } 1859 if (seg == nseg - 1) { 1860 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1861 MPI_SGE_FLAGS_END_OF_BUFFER; 1862 } 1863 MPI_pSGE_SET_FLAGS(se, tf); 1864 se->FlagsLength = htole32(se->FlagsLength); 1865 } 1866 1867 if (seg == nseg) { 1868 goto out; 1869 } 1870 1871 /* 1872 * Tell the IOC where to find the first chain element. 1873 */ 1874 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1875 nxt_off = MPT_RQSL(mpt); 1876 trq = req; 1877 1878 /* 1879 * Make up the rest of the data segments out of a chain element 1880 * (contained in the current request frame) which points to 1881 * SIMPLE32 elements in the next request frame, possibly ending 1882 * with *another* chain element (if there's more). 1883 */ 1884 while (seg < nseg) { 1885 int this_seg_lim; 1886 uint32_t tf, cur_off; 1887 bus_addr_t chain_list_addr; 1888 1889 /* 1890 * Point to the chain descriptor. Note that the chain 1891 * descriptor is at the end of the *previous* list (whether 1892 * chain or simple). 1893 */ 1894 ce = (SGE_CHAIN32 *) se; 1895 1896 /* 1897 * Before we change our current pointer, make sure we won't 1898 * overflow the request area with this frame. Note that we 1899 * test against 'greater than' here as it's okay in this case 1900 * to have next offset be just outside the request area. 1901 */ 1902 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1903 nxt_off = MPT_REQUEST_AREA; 1904 goto next_chain; 1905 } 1906 1907 /* 1908 * Set our SGE element pointer to the beginning of the chain 1909 * list and update our next chain list offset. 1910 */ 1911 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1912 cur_off = nxt_off; 1913 nxt_off += MPT_RQSL(mpt); 1914 1915 /* 1916 * Now initialize the chain descriptor. 1917 */ 1918 memset(ce, 0, sizeof (*ce)); 1919 1920 /* 1921 * Get the physical address of the chain list. 1922 */ 1923 chain_list_addr = trq->req_pbuf; 1924 chain_list_addr += cur_off; 1925 1926 1927 1928 ce->Address = htole32(chain_list_addr); 1929 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1930 1931 1932 /* 1933 * If we have more than a frame's worth of segments left, 1934 * set up the chain list to have the last element be another 1935 * chain descriptor. 1936 */ 1937 if ((nseg - seg) > MPT_NSGL(mpt)) { 1938 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1939 /* 1940 * The length of the chain is the length in bytes of the 1941 * number of segments plus the next chain element. 1942 * 1943 * The next chain descriptor offset is the length, 1944 * in words, of the number of segments. 1945 */ 1946 ce->Length = (this_seg_lim - seg) * 1947 sizeof (SGE_SIMPLE32); 1948 ce->NextChainOffset = ce->Length >> 2; 1949 ce->Length += sizeof (SGE_CHAIN32); 1950 } else { 1951 this_seg_lim = nseg; 1952 ce->Length = (this_seg_lim - seg) * 1953 sizeof (SGE_SIMPLE32); 1954 } 1955 ce->Length = htole16(ce->Length); 1956 1957 /* 1958 * Fill in the chain list SGE elements with our segment data. 1959 * 1960 * If we're the last element in this chain list, set the last 1961 * element flag. If we're the completely last element period, 1962 * set the end of list and end of buffer flags. 1963 */ 1964 while (seg < this_seg_lim) { 1965 memset(se, 0, sizeof (*se)); 1966 se->Address = htole32(dm_segs->ds_addr); 1967 1968 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1969 tf = flags; 1970 if (seg == this_seg_lim - 1) { 1971 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1972 } 1973 if (seg == nseg - 1) { 1974 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1975 MPI_SGE_FLAGS_END_OF_BUFFER; 1976 } 1977 MPI_pSGE_SET_FLAGS(se, tf); 1978 se->FlagsLength = htole32(se->FlagsLength); 1979 se++; 1980 seg++; 1981 dm_segs++; 1982 } 1983 1984 next_chain: 1985 /* 1986 * If we have more segments to do and we've used up all of 1987 * the space in a request area, go allocate another one 1988 * and chain to that. 1989 */ 1990 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1991 request_t *nrq; 1992 1993 CAMLOCK_2_MPTLOCK(mpt); 1994 nrq = mpt_get_request(mpt, FALSE); 1995 MPTLOCK_2_CAMLOCK(mpt); 1996 1997 if (nrq == NULL) { 1998 error = ENOMEM; 1999 goto bad; 2000 } 2001 2002 /* 2003 * Append the new request area on the tail of our list. 2004 */ 2005 if ((trq = req->chain) == NULL) { 2006 req->chain = nrq; 2007 } else { 2008 while (trq->chain != NULL) { 2009 trq = trq->chain; 2010 } 2011 trq->chain = nrq; 2012 } 2013 trq = nrq; 2014 mpt_off = trq->req_vbuf; 2015 if (mpt->verbose >= MPT_PRT_DEBUG) { 2016 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 2017 } 2018 nxt_off = 0; 2019 } 2020 } 2021 out: 2022 2023 /* 2024 * Last time we need to check if this CCB needs to be aborted. 2025 */ 2026 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2027 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2028 request_t *cmd_req = 2029 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2030 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2031 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2032 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2033 } 2034 mpt_prt(mpt, 2035 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2036 ccb->ccb_h.status & CAM_STATUS_MASK); 2037 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2038 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2039 } 2040 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2041 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 2042 xpt_done(ccb); 2043 CAMLOCK_2_MPTLOCK(mpt); 2044 mpt_free_request(mpt, req); 2045 MPTLOCK_2_CAMLOCK(mpt); 2046 return; 2047 } 2048 2049 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2050 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2051 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, 2052 mpt_timeout, ccb); 2053 } 2054 if (mpt->verbose > MPT_PRT_DEBUG) { 2055 int nc = 0; 2056 mpt_print_request(req->req_vbuf); 2057 for (trq = req->chain; trq; trq = trq->chain) { 2058 printf(" Additional Chain Area %d\n", nc++); 2059 mpt_dump_sgl(trq->req_vbuf, 0); 2060 } 2061 } 2062 2063 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2064 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2065 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2066 #ifdef WE_TRUST_AUTO_GOOD_STATUS 2067 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2068 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2069 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2070 } else { 2071 tgt->state = TGT_STATE_MOVING_DATA; 2072 } 2073 #else 2074 tgt->state = TGT_STATE_MOVING_DATA; 2075 #endif 2076 } 2077 CAMLOCK_2_MPTLOCK(mpt); 2078 mpt_send_cmd(mpt, req); 2079 MPTLOCK_2_CAMLOCK(mpt); 2080 } 2081 2082 static void 2083 mpt_start(struct cam_sim *sim, union ccb *ccb) 2084 { 2085 request_t *req; 2086 struct mpt_softc *mpt; 2087 MSG_SCSI_IO_REQUEST *mpt_req; 2088 struct ccb_scsiio *csio = &ccb->csio; 2089 struct ccb_hdr *ccbh = &ccb->ccb_h; 2090 bus_dmamap_callback_t *cb; 2091 target_id_t tgt; 2092 int raid_passthru; 2093 2094 /* Get the pointer for the physical addapter */ 2095 mpt = ccb->ccb_h.ccb_mpt_ptr; 2096 raid_passthru = (sim == mpt->phydisk_sim); 2097 2098 CAMLOCK_2_MPTLOCK(mpt); 2099 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2100 if (mpt->outofbeer == 0) { 2101 mpt->outofbeer = 1; 2102 xpt_freeze_simq(mpt->sim, 1); 2103 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2104 } 2105 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2106 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2107 MPTLOCK_2_CAMLOCK(mpt); 2108 xpt_done(ccb); 2109 return; 2110 } 2111 #ifdef INVARIANTS 2112 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2113 #endif 2114 MPTLOCK_2_CAMLOCK(mpt); 2115 2116 if (sizeof (bus_addr_t) > 4) { 2117 cb = mpt_execute_req_a64; 2118 } else { 2119 cb = mpt_execute_req; 2120 } 2121 2122 /* 2123 * Link the ccb and the request structure so we can find 2124 * the other knowing either the request or the ccb 2125 */ 2126 req->ccb = ccb; 2127 ccb->ccb_h.ccb_req_ptr = req; 2128 2129 /* Now we build the command for the IOC */ 2130 mpt_req = req->req_vbuf; 2131 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2132 2133 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2134 if (raid_passthru) { 2135 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2136 CAMLOCK_2_MPTLOCK(mpt); 2137 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2138 MPTLOCK_2_CAMLOCK(mpt); 2139 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2140 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2141 xpt_done(ccb); 2142 return; 2143 } 2144 MPTLOCK_2_CAMLOCK(mpt); 2145 mpt_req->Bus = 0; /* we never set bus here */ 2146 } else { 2147 tgt = ccb->ccb_h.target_id; 2148 mpt_req->Bus = 0; /* XXX */ 2149 2150 } 2151 mpt_req->SenseBufferLength = 2152 (csio->sense_len < MPT_SENSE_SIZE) ? 2153 csio->sense_len : MPT_SENSE_SIZE; 2154 2155 /* 2156 * We use the message context to find the request structure when we 2157 * Get the command completion interrupt from the IOC. 2158 */ 2159 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2160 2161 /* Which physical device to do the I/O on */ 2162 mpt_req->TargetID = tgt; 2163 2164 /* We assume a single level LUN type */ 2165 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { 2166 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 2167 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 2168 } else { 2169 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 2170 } 2171 2172 /* Set the direction of the transfer */ 2173 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2174 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2175 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2176 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2177 } else { 2178 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2179 } 2180 2181 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2182 switch(ccb->csio.tag_action) { 2183 case MSG_HEAD_OF_Q_TAG: 2184 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2185 break; 2186 case MSG_ACA_TASK: 2187 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2188 break; 2189 case MSG_ORDERED_Q_TAG: 2190 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2191 break; 2192 case MSG_SIMPLE_Q_TAG: 2193 default: 2194 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2195 break; 2196 } 2197 } else { 2198 if (mpt->is_fc || mpt->is_sas) { 2199 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2200 } else { 2201 /* XXX No such thing for a target doing packetized. */ 2202 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2203 } 2204 } 2205 2206 if (mpt->is_spi) { 2207 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2208 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2209 } 2210 } 2211 mpt_req->Control = htole32(mpt_req->Control); 2212 2213 /* Copy the scsi command block into place */ 2214 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2215 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2216 } else { 2217 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2218 } 2219 2220 mpt_req->CDBLength = csio->cdb_len; 2221 mpt_req->DataLength = htole32(csio->dxfer_len); 2222 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2223 2224 /* 2225 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2226 */ 2227 if (mpt->verbose == MPT_PRT_DEBUG) { 2228 U32 df; 2229 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2230 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2231 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2232 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2233 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2234 mpt_prtc(mpt, "(%s %u byte%s ", 2235 (df == MPI_SCSIIO_CONTROL_READ)? 2236 "read" : "write", csio->dxfer_len, 2237 (csio->dxfer_len == 1)? ")" : "s)"); 2238 } 2239 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, 2240 ccb->ccb_h.target_lun, req, req->serno); 2241 } 2242 2243 /* 2244 * If we have any data to send with this command map it into bus space. 2245 */ 2246 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2247 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 2248 /* 2249 * We've been given a pointer to a single buffer. 2250 */ 2251 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 2252 /* 2253 * Virtual address that needs to translated into 2254 * one or more physical address ranges. 2255 */ 2256 int error; 2257 int s = splsoftvm(); 2258 error = bus_dmamap_load(mpt->buffer_dmat, 2259 req->dmap, csio->data_ptr, csio->dxfer_len, 2260 cb, req, 0); 2261 splx(s); 2262 if (error == EINPROGRESS) { 2263 /* 2264 * So as to maintain ordering, 2265 * freeze the controller queue 2266 * until our mapping is 2267 * returned. 2268 */ 2269 xpt_freeze_simq(mpt->sim, 1); 2270 ccbh->status |= CAM_RELEASE_SIMQ; 2271 } 2272 } else { 2273 /* 2274 * We have been given a pointer to single 2275 * physical buffer. 2276 */ 2277 struct bus_dma_segment seg; 2278 seg.ds_addr = 2279 (bus_addr_t)(vm_offset_t)csio->data_ptr; 2280 seg.ds_len = csio->dxfer_len; 2281 (*cb)(req, &seg, 1, 0); 2282 } 2283 } else { 2284 /* 2285 * We have been given a list of addresses. 2286 * This case could be easily supported but they are not 2287 * currently generated by the CAM subsystem so there 2288 * is no point in wasting the time right now. 2289 */ 2290 struct bus_dma_segment *segs; 2291 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { 2292 (*cb)(req, NULL, 0, EFAULT); 2293 } else { 2294 /* Just use the segments provided */ 2295 segs = (struct bus_dma_segment *)csio->data_ptr; 2296 (*cb)(req, segs, csio->sglist_cnt, 0); 2297 } 2298 } 2299 } else { 2300 (*cb)(req, NULL, 0, 0); 2301 } 2302 } 2303 2304 static int 2305 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2306 int sleep_ok) 2307 { 2308 int error; 2309 uint16_t status; 2310 uint8_t response; 2311 2312 error = mpt_scsi_send_tmf(mpt, 2313 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2314 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2315 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2316 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2317 0, /* XXX How do I get the channel ID? */ 2318 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2319 lun != CAM_LUN_WILDCARD ? lun : 0, 2320 0, sleep_ok); 2321 2322 if (error != 0) { 2323 /* 2324 * mpt_scsi_send_tmf hard resets on failure, so no 2325 * need to do so here. 2326 */ 2327 mpt_prt(mpt, 2328 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2329 return (EIO); 2330 } 2331 2332 /* Wait for bus reset to be processed by the IOC. */ 2333 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2334 REQ_STATE_DONE, sleep_ok, 5000); 2335 2336 status = le16toh(mpt->tmf_req->IOCStatus); 2337 response = mpt->tmf_req->ResponseCode; 2338 mpt->tmf_req->state = REQ_STATE_FREE; 2339 2340 if (error) { 2341 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2342 "Resetting controller.\n"); 2343 mpt_reset(mpt, TRUE); 2344 return (ETIMEDOUT); 2345 } 2346 2347 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2348 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2349 "Resetting controller.\n", status); 2350 mpt_reset(mpt, TRUE); 2351 return (EIO); 2352 } 2353 2354 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2355 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2356 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2357 "Resetting controller.\n", response); 2358 mpt_reset(mpt, TRUE); 2359 return (EIO); 2360 } 2361 return (0); 2362 } 2363 2364 static int 2365 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2366 { 2367 int r = 0; 2368 request_t *req; 2369 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2370 2371 req = mpt_get_request(mpt, FALSE); 2372 if (req == NULL) { 2373 return (ENOMEM); 2374 } 2375 fc = req->req_vbuf; 2376 memset(fc, 0, sizeof(*fc)); 2377 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2378 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2379 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2380 mpt_send_cmd(mpt, req); 2381 if (dowait) { 2382 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2383 REQ_STATE_DONE, FALSE, 60 * 1000); 2384 if (r == 0) { 2385 mpt_free_request(mpt, req); 2386 } 2387 } 2388 return (r); 2389 } 2390 2391 static int 2392 mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2393 MSG_EVENT_NOTIFY_REPLY *msg) 2394 { 2395 uint32_t data0, data1; 2396 2397 data0 = le32toh(msg->Data[0]); 2398 data1 = le32toh(msg->Data[1]); 2399 switch(msg->Event & 0xFF) { 2400 case MPI_EVENT_UNIT_ATTENTION: 2401 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2402 (data0 >> 8) & 0xff, data0 & 0xff); 2403 break; 2404 2405 case MPI_EVENT_IOC_BUS_RESET: 2406 /* We generated a bus reset */ 2407 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2408 (data0 >> 8) & 0xff); 2409 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2410 break; 2411 2412 case MPI_EVENT_EXT_BUS_RESET: 2413 /* Someone else generated a bus reset */ 2414 mpt_prt(mpt, "External Bus Reset Detected\n"); 2415 /* 2416 * These replies don't return EventData like the MPI 2417 * spec says they do 2418 */ 2419 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2420 break; 2421 2422 case MPI_EVENT_RESCAN: 2423 #if __FreeBSD_version >= 600000 2424 { 2425 union ccb *ccb; 2426 uint32_t pathid; 2427 /* 2428 * In general this means a device has been added to the loop. 2429 */ 2430 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2431 if (mpt->ready == 0) { 2432 break; 2433 } 2434 if (mpt->phydisk_sim) { 2435 pathid = cam_sim_path(mpt->phydisk_sim); 2436 } else { 2437 pathid = cam_sim_path(mpt->sim); 2438 } 2439 MPTLOCK_2_CAMLOCK(mpt); 2440 /* 2441 * Allocate a CCB, create a wildcard path for this bus, 2442 * and schedule a rescan. 2443 */ 2444 ccb = xpt_alloc_ccb_nowait(); 2445 if (ccb == NULL) { 2446 mpt_prt(mpt, "unable to alloc CCB for rescan\n"); 2447 CAMLOCK_2_MPTLOCK(mpt); 2448 break; 2449 } 2450 2451 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, 2452 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2453 CAMLOCK_2_MPTLOCK(mpt); 2454 mpt_prt(mpt, "unable to create path for rescan\n"); 2455 xpt_free_ccb(ccb); 2456 break; 2457 } 2458 xpt_rescan(ccb); 2459 CAMLOCK_2_MPTLOCK(mpt); 2460 break; 2461 } 2462 #else 2463 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2464 break; 2465 #endif 2466 case MPI_EVENT_LINK_STATUS_CHANGE: 2467 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2468 (data1 >> 8) & 0xff, 2469 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2470 break; 2471 2472 case MPI_EVENT_LOOP_STATE_CHANGE: 2473 switch ((data0 >> 16) & 0xff) { 2474 case 0x01: 2475 mpt_prt(mpt, 2476 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2477 "(Loop Initialization)\n", 2478 (data1 >> 8) & 0xff, 2479 (data0 >> 8) & 0xff, 2480 (data0 ) & 0xff); 2481 switch ((data0 >> 8) & 0xff) { 2482 case 0xF7: 2483 if ((data0 & 0xff) == 0xF7) { 2484 mpt_prt(mpt, "Device needs AL_PA\n"); 2485 } else { 2486 mpt_prt(mpt, "Device %02x doesn't like " 2487 "FC performance\n", 2488 data0 & 0xFF); 2489 } 2490 break; 2491 case 0xF8: 2492 if ((data0 & 0xff) == 0xF7) { 2493 mpt_prt(mpt, "Device had loop failure " 2494 "at its receiver prior to acquiring" 2495 " AL_PA\n"); 2496 } else { 2497 mpt_prt(mpt, "Device %02x detected loop" 2498 " failure at its receiver\n", 2499 data0 & 0xFF); 2500 } 2501 break; 2502 default: 2503 mpt_prt(mpt, "Device %02x requests that device " 2504 "%02x reset itself\n", 2505 data0 & 0xFF, 2506 (data0 >> 8) & 0xFF); 2507 break; 2508 } 2509 break; 2510 case 0x02: 2511 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2512 "LPE(%02x,%02x) (Loop Port Enable)\n", 2513 (data1 >> 8) & 0xff, /* Port */ 2514 (data0 >> 8) & 0xff, /* Character 3 */ 2515 (data0 ) & 0xff /* Character 4 */); 2516 break; 2517 case 0x03: 2518 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2519 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2520 (data1 >> 8) & 0xff, /* Port */ 2521 (data0 >> 8) & 0xff, /* Character 3 */ 2522 (data0 ) & 0xff /* Character 4 */); 2523 break; 2524 default: 2525 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2526 "FC event (%02x %02x %02x)\n", 2527 (data1 >> 8) & 0xff, /* Port */ 2528 (data0 >> 16) & 0xff, /* Event */ 2529 (data0 >> 8) & 0xff, /* Character 3 */ 2530 (data0 ) & 0xff /* Character 4 */); 2531 } 2532 break; 2533 2534 case MPI_EVENT_LOGOUT: 2535 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2536 (data1 >> 8) & 0xff, data0); 2537 break; 2538 case MPI_EVENT_QUEUE_FULL: 2539 { 2540 struct cam_sim *sim; 2541 struct cam_path *tmppath; 2542 struct ccb_relsim crs; 2543 PTR_EVENT_DATA_QUEUE_FULL pqf; 2544 lun_id_t lun_id; 2545 2546 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 2547 pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 2548 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " 2549 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2550 if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 2551 pqf->TargetID) != 0) { 2552 sim = mpt->phydisk_sim; 2553 } else { 2554 sim = mpt->sim; 2555 } 2556 MPTLOCK_2_CAMLOCK(mpt); 2557 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2558 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2559 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2560 mpt_prt(mpt, "unable to create a path to send " 2561 "XPT_REL_SIMQ"); 2562 CAMLOCK_2_MPTLOCK(mpt); 2563 break; 2564 } 2565 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2566 crs.ccb_h.func_code = XPT_REL_SIMQ; 2567 crs.ccb_h.flags = CAM_DEV_QFREEZE; 2568 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2569 crs.openings = pqf->CurrentDepth - 1; 2570 xpt_action((union ccb *)&crs); 2571 if (crs.ccb_h.status != CAM_REQ_CMP) { 2572 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2573 } 2574 xpt_free_path(tmppath); 2575 } 2576 CAMLOCK_2_MPTLOCK(mpt); 2577 break; 2578 } 2579 case MPI_EVENT_IR_RESYNC_UPDATE: 2580 mpt_prt(mpt, "IR resync update %d completed\n", 2581 (data0 >> 16) & 0xff); 2582 break; 2583 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2584 { 2585 union ccb *ccb; 2586 struct cam_sim *sim; 2587 struct cam_path *tmppath; 2588 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc; 2589 2590 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data; 2591 if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 2592 psdsc->TargetID) != 0) 2593 sim = mpt->phydisk_sim; 2594 else 2595 sim = mpt->sim; 2596 switch(psdsc->ReasonCode) { 2597 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 2598 MPTLOCK_2_CAMLOCK(mpt); 2599 ccb = xpt_alloc_ccb_nowait(); 2600 if (ccb == NULL) { 2601 mpt_prt(mpt, 2602 "unable to alloc CCB for rescan\n"); 2603 CAMLOCK_2_MPTLOCK(mpt); 2604 break; 2605 } 2606 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 2607 cam_sim_path(sim), psdsc->TargetID, 2608 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2609 CAMLOCK_2_MPTLOCK(mpt); 2610 mpt_prt(mpt, 2611 "unable to create path for rescan\n"); 2612 xpt_free_ccb(ccb); 2613 break; 2614 } 2615 xpt_rescan(ccb); 2616 CAMLOCK_2_MPTLOCK(mpt); 2617 break; 2618 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 2619 MPTLOCK_2_CAMLOCK(mpt); 2620 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2621 psdsc->TargetID, CAM_LUN_WILDCARD) != 2622 CAM_REQ_CMP) { 2623 mpt_prt(mpt, 2624 "unable to create path for async event"); 2625 CAMLOCK_2_MPTLOCK(mpt); 2626 break; 2627 } 2628 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2629 xpt_free_path(tmppath); 2630 CAMLOCK_2_MPTLOCK(mpt); 2631 break; 2632 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: 2633 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL: 2634 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 2635 break; 2636 default: 2637 mpt_lprt(mpt, MPT_PRT_WARN, 2638 "SAS device status change: Bus: 0x%02x TargetID: " 2639 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus, 2640 psdsc->TargetID, psdsc->ReasonCode); 2641 break; 2642 } 2643 break; 2644 } 2645 case MPI_EVENT_SAS_DISCOVERY_ERROR: 2646 { 2647 PTR_EVENT_DATA_DISCOVERY_ERROR pde; 2648 2649 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data; 2650 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus); 2651 mpt_lprt(mpt, MPT_PRT_WARN, 2652 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n", 2653 pde->Port, pde->DiscoveryStatus); 2654 break; 2655 } 2656 case MPI_EVENT_EVENT_CHANGE: 2657 case MPI_EVENT_INTEGRATED_RAID: 2658 case MPI_EVENT_IR2: 2659 case MPI_EVENT_LOG_ENTRY_ADDED: 2660 case MPI_EVENT_SAS_DISCOVERY: 2661 case MPI_EVENT_SAS_PHY_LINK_STATUS: 2662 case MPI_EVENT_SAS_SES: 2663 break; 2664 default: 2665 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2666 msg->Event & 0xFF); 2667 return (0); 2668 } 2669 return (1); 2670 } 2671 2672 /* 2673 * Reply path for all SCSI I/O requests, called from our 2674 * interrupt handler by extracting our handler index from 2675 * the MsgContext field of the reply from the IOC. 2676 * 2677 * This routine is optimized for the common case of a 2678 * completion without error. All exception handling is 2679 * offloaded to non-inlined helper routines to minimize 2680 * cache footprint. 2681 */ 2682 static int 2683 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2684 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2685 { 2686 MSG_SCSI_IO_REQUEST *scsi_req; 2687 union ccb *ccb; 2688 2689 if (req->state == REQ_STATE_FREE) { 2690 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2691 return (TRUE); 2692 } 2693 2694 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2695 ccb = req->ccb; 2696 if (ccb == NULL) { 2697 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2698 req, req->serno); 2699 return (TRUE); 2700 } 2701 2702 mpt_req_untimeout(req, mpt_timeout, ccb); 2703 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2704 2705 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2706 bus_dmasync_op_t op; 2707 2708 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2709 op = BUS_DMASYNC_POSTREAD; 2710 else 2711 op = BUS_DMASYNC_POSTWRITE; 2712 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2713 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2714 } 2715 2716 if (reply_frame == NULL) { 2717 /* 2718 * Context only reply, completion without error status. 2719 */ 2720 ccb->csio.resid = 0; 2721 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2722 ccb->csio.scsi_status = SCSI_STATUS_OK; 2723 } else { 2724 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2725 } 2726 2727 if (mpt->outofbeer) { 2728 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2729 mpt->outofbeer = 0; 2730 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2731 } 2732 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2733 struct scsi_inquiry_data *iq = 2734 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2735 if (scsi_req->Function == 2736 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2737 /* 2738 * Fake out the device type so that only the 2739 * pass-thru device will attach. 2740 */ 2741 iq->device &= ~0x1F; 2742 iq->device |= T_NODEVICE; 2743 } 2744 } 2745 if (mpt->verbose == MPT_PRT_DEBUG) { 2746 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2747 req, req->serno); 2748 } 2749 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 2750 MPTLOCK_2_CAMLOCK(mpt); 2751 xpt_done(ccb); 2752 CAMLOCK_2_MPTLOCK(mpt); 2753 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2754 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2755 } else { 2756 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2757 req, req->serno); 2758 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2759 } 2760 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2761 ("CCB req needed wakeup")); 2762 #ifdef INVARIANTS 2763 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2764 #endif 2765 mpt_free_request(mpt, req); 2766 return (TRUE); 2767 } 2768 2769 static int 2770 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2771 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2772 { 2773 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2774 2775 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2776 #ifdef INVARIANTS 2777 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2778 #endif 2779 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2780 /* Record IOC Status and Response Code of TMF for any waiters. */ 2781 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2782 req->ResponseCode = tmf_reply->ResponseCode; 2783 2784 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2785 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2786 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2787 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2788 req->state |= REQ_STATE_DONE; 2789 wakeup(req); 2790 } else { 2791 mpt->tmf_req->state = REQ_STATE_FREE; 2792 } 2793 return (TRUE); 2794 } 2795 2796 /* 2797 * XXX: Move to definitions file 2798 */ 2799 #define ELS 0x22 2800 #define FC4LS 0x32 2801 #define ABTS 0x81 2802 #define BA_ACC 0x84 2803 2804 #define LS_RJT 0x01 2805 #define LS_ACC 0x02 2806 #define PLOGI 0x03 2807 #define LOGO 0x05 2808 #define SRR 0x14 2809 #define PRLI 0x20 2810 #define PRLO 0x21 2811 #define ADISC 0x52 2812 #define RSCN 0x61 2813 2814 static void 2815 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2816 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2817 { 2818 uint32_t fl; 2819 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2820 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2821 2822 /* 2823 * We are going to reuse the ELS request to send this response back. 2824 */ 2825 rsp = &tmp; 2826 memset(rsp, 0, sizeof(*rsp)); 2827 2828 #ifdef USE_IMMEDIATE_LINK_DATA 2829 /* 2830 * Apparently the IMMEDIATE stuff doesn't seem to work. 2831 */ 2832 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2833 #endif 2834 rsp->RspLength = length; 2835 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2836 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2837 2838 /* 2839 * Copy over information from the original reply frame to 2840 * it's correct place in the response. 2841 */ 2842 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2843 2844 /* 2845 * And now copy back the temporary area to the original frame. 2846 */ 2847 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2848 rsp = req->req_vbuf; 2849 2850 #ifdef USE_IMMEDIATE_LINK_DATA 2851 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2852 #else 2853 { 2854 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2855 bus_addr_t paddr = req->req_pbuf; 2856 paddr += MPT_RQSL(mpt); 2857 2858 fl = 2859 MPI_SGE_FLAGS_HOST_TO_IOC | 2860 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2861 MPI_SGE_FLAGS_LAST_ELEMENT | 2862 MPI_SGE_FLAGS_END_OF_LIST | 2863 MPI_SGE_FLAGS_END_OF_BUFFER; 2864 fl <<= MPI_SGE_FLAGS_SHIFT; 2865 fl |= (length); 2866 se->FlagsLength = htole32(fl); 2867 se->Address = htole32((uint32_t) paddr); 2868 } 2869 #endif 2870 2871 /* 2872 * Send it on... 2873 */ 2874 mpt_send_cmd(mpt, req); 2875 } 2876 2877 static int 2878 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2879 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2880 { 2881 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2882 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2883 U8 rctl; 2884 U8 type; 2885 U8 cmd; 2886 U16 status = le16toh(reply_frame->IOCStatus); 2887 U32 *elsbuf; 2888 int ioindex; 2889 int do_refresh = TRUE; 2890 2891 #ifdef INVARIANTS 2892 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2893 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2894 req, req->serno, rp->Function)); 2895 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2896 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2897 } else { 2898 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2899 } 2900 #endif 2901 mpt_lprt(mpt, MPT_PRT_DEBUG, 2902 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2903 req, req->serno, reply_frame, reply_frame->Function); 2904 2905 if (status != MPI_IOCSTATUS_SUCCESS) { 2906 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2907 status, reply_frame->Function); 2908 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2909 /* 2910 * XXX: to get around shutdown issue 2911 */ 2912 mpt->disabled = 1; 2913 return (TRUE); 2914 } 2915 return (TRUE); 2916 } 2917 2918 /* 2919 * If the function of a link service response, we recycle the 2920 * response to be a refresh for a new link service request. 2921 * 2922 * The request pointer is bogus in this case and we have to fetch 2923 * it based upon the TransactionContext. 2924 */ 2925 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2926 /* Freddie Uncle Charlie Katie */ 2927 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2928 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2929 if (mpt->els_cmd_ptrs[ioindex] == req) { 2930 break; 2931 } 2932 2933 KASSERT(ioindex < mpt->els_cmds_allocated, 2934 ("can't find my mommie!")); 2935 2936 /* remove from active list as we're going to re-post it */ 2937 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2938 req->state &= ~REQ_STATE_QUEUED; 2939 req->state |= REQ_STATE_DONE; 2940 mpt_fc_post_els(mpt, req, ioindex); 2941 return (TRUE); 2942 } 2943 2944 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2945 /* remove from active list as we're done */ 2946 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2947 req->state &= ~REQ_STATE_QUEUED; 2948 req->state |= REQ_STATE_DONE; 2949 if (req->state & REQ_STATE_TIMEDOUT) { 2950 mpt_lprt(mpt, MPT_PRT_DEBUG, 2951 "Sync Primitive Send Completed After Timeout\n"); 2952 mpt_free_request(mpt, req); 2953 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2954 mpt_lprt(mpt, MPT_PRT_DEBUG, 2955 "Async Primitive Send Complete\n"); 2956 mpt_free_request(mpt, req); 2957 } else { 2958 mpt_lprt(mpt, MPT_PRT_DEBUG, 2959 "Sync Primitive Send Complete- Waking Waiter\n"); 2960 wakeup(req); 2961 } 2962 return (TRUE); 2963 } 2964 2965 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2966 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2967 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2968 rp->MsgLength, rp->MsgFlags); 2969 return (TRUE); 2970 } 2971 2972 if (rp->MsgLength <= 5) { 2973 /* 2974 * This is just a ack of an original ELS buffer post 2975 */ 2976 mpt_lprt(mpt, MPT_PRT_DEBUG, 2977 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2978 return (TRUE); 2979 } 2980 2981 2982 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2983 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2984 2985 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2986 cmd = be32toh(elsbuf[0]) >> 24; 2987 2988 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2989 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2990 return (TRUE); 2991 } 2992 2993 ioindex = le32toh(rp->TransactionContext); 2994 req = mpt->els_cmd_ptrs[ioindex]; 2995 2996 if (rctl == ELS && type == 1) { 2997 switch (cmd) { 2998 case PRLI: 2999 /* 3000 * Send back a PRLI ACC 3001 */ 3002 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 3003 le32toh(rp->Wwn.PortNameHigh), 3004 le32toh(rp->Wwn.PortNameLow)); 3005 elsbuf[0] = htobe32(0x02100014); 3006 elsbuf[1] |= htobe32(0x00000100); 3007 elsbuf[4] = htobe32(0x00000002); 3008 if (mpt->role & MPT_ROLE_TARGET) 3009 elsbuf[4] |= htobe32(0x00000010); 3010 if (mpt->role & MPT_ROLE_INITIATOR) 3011 elsbuf[4] |= htobe32(0x00000020); 3012 /* remove from active list as we're done */ 3013 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3014 req->state &= ~REQ_STATE_QUEUED; 3015 req->state |= REQ_STATE_DONE; 3016 mpt_fc_els_send_response(mpt, req, rp, 20); 3017 do_refresh = FALSE; 3018 break; 3019 case PRLO: 3020 memset(elsbuf, 0, 5 * (sizeof (U32))); 3021 elsbuf[0] = htobe32(0x02100014); 3022 elsbuf[1] = htobe32(0x08000100); 3023 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 3024 le32toh(rp->Wwn.PortNameHigh), 3025 le32toh(rp->Wwn.PortNameLow)); 3026 /* remove from active list as we're done */ 3027 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3028 req->state &= ~REQ_STATE_QUEUED; 3029 req->state |= REQ_STATE_DONE; 3030 mpt_fc_els_send_response(mpt, req, rp, 20); 3031 do_refresh = FALSE; 3032 break; 3033 default: 3034 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 3035 break; 3036 } 3037 } else if (rctl == ABTS && type == 0) { 3038 uint16_t rx_id = le16toh(rp->Rxid); 3039 uint16_t ox_id = le16toh(rp->Oxid); 3040 request_t *tgt_req = NULL; 3041 3042 mpt_prt(mpt, 3043 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 3044 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 3045 le32toh(rp->Wwn.PortNameLow)); 3046 if (rx_id >= mpt->mpt_max_tgtcmds) { 3047 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 3048 } else if (mpt->tgt_cmd_ptrs == NULL) { 3049 mpt_prt(mpt, "No TGT CMD PTRS\n"); 3050 } else { 3051 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 3052 } 3053 if (tgt_req) { 3054 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 3055 union ccb *ccb; 3056 uint32_t ct_id; 3057 3058 /* 3059 * Check to make sure we have the correct command 3060 * The reply descriptor in the target state should 3061 * should contain an IoIndex that should match the 3062 * RX_ID. 3063 * 3064 * It'd be nice to have OX_ID to crosscheck with 3065 * as well. 3066 */ 3067 ct_id = GET_IO_INDEX(tgt->reply_desc); 3068 3069 if (ct_id != rx_id) { 3070 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 3071 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 3072 rx_id, ct_id); 3073 goto skip; 3074 } 3075 3076 ccb = tgt->ccb; 3077 if (ccb) { 3078 mpt_prt(mpt, 3079 "CCB (%p): lun %u flags %x status %x\n", 3080 ccb, ccb->ccb_h.target_lun, 3081 ccb->ccb_h.flags, ccb->ccb_h.status); 3082 } 3083 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 3084 "%x nxfers %x\n", tgt->state, 3085 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 3086 tgt->nxfers); 3087 skip: 3088 if (mpt_abort_target_cmd(mpt, tgt_req)) { 3089 mpt_prt(mpt, "unable to start TargetAbort\n"); 3090 } 3091 } else { 3092 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 3093 } 3094 memset(elsbuf, 0, 5 * (sizeof (U32))); 3095 elsbuf[0] = htobe32(0); 3096 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 3097 elsbuf[2] = htobe32(0x000ffff); 3098 /* 3099 * Dork with the reply frame so that the response to it 3100 * will be correct. 3101 */ 3102 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 3103 /* remove from active list as we're done */ 3104 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3105 req->state &= ~REQ_STATE_QUEUED; 3106 req->state |= REQ_STATE_DONE; 3107 mpt_fc_els_send_response(mpt, req, rp, 12); 3108 do_refresh = FALSE; 3109 } else { 3110 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 3111 } 3112 if (do_refresh == TRUE) { 3113 /* remove from active list as we're done */ 3114 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3115 req->state &= ~REQ_STATE_QUEUED; 3116 req->state |= REQ_STATE_DONE; 3117 mpt_fc_post_els(mpt, req, ioindex); 3118 } 3119 return (TRUE); 3120 } 3121 3122 /* 3123 * Clean up all SCSI Initiator personality state in response 3124 * to a controller reset. 3125 */ 3126 static void 3127 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3128 { 3129 3130 /* 3131 * The pending list is already run down by 3132 * the generic handler. Perform the same 3133 * operation on the timed out request list. 3134 */ 3135 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3136 MPI_IOCSTATUS_INVALID_STATE); 3137 3138 /* 3139 * XXX: We need to repost ELS and Target Command Buffers? 3140 */ 3141 3142 /* 3143 * Inform the XPT that a bus reset has occurred. 3144 */ 3145 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3146 } 3147 3148 /* 3149 * Parse additional completion information in the reply 3150 * frame for SCSI I/O requests. 3151 */ 3152 static int 3153 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3154 MSG_DEFAULT_REPLY *reply_frame) 3155 { 3156 union ccb *ccb; 3157 MSG_SCSI_IO_REPLY *scsi_io_reply; 3158 u_int ioc_status; 3159 u_int sstate; 3160 3161 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3162 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3163 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3164 ("MPT SCSI I/O Handler called with incorrect reply type")); 3165 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3166 ("MPT SCSI I/O Handler called with continuation reply")); 3167 3168 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3169 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3170 ioc_status &= MPI_IOCSTATUS_MASK; 3171 sstate = scsi_io_reply->SCSIState; 3172 3173 ccb = req->ccb; 3174 ccb->csio.resid = 3175 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3176 3177 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3178 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3179 uint32_t sense_returned; 3180 3181 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3182 3183 sense_returned = le32toh(scsi_io_reply->SenseCount); 3184 if (sense_returned < ccb->csio.sense_len) 3185 ccb->csio.sense_resid = ccb->csio.sense_len - 3186 sense_returned; 3187 else 3188 ccb->csio.sense_resid = 0; 3189 3190 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); 3191 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3192 min(ccb->csio.sense_len, sense_returned)); 3193 } 3194 3195 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3196 /* 3197 * Tag messages rejected, but non-tagged retry 3198 * was successful. 3199 XXXX 3200 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3201 */ 3202 } 3203 3204 switch(ioc_status) { 3205 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3206 /* 3207 * XXX 3208 * Linux driver indicates that a zero 3209 * transfer length with this error code 3210 * indicates a CRC error. 3211 * 3212 * No need to swap the bytes for checking 3213 * against zero. 3214 */ 3215 if (scsi_io_reply->TransferCount == 0) { 3216 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3217 break; 3218 } 3219 /* FALLTHROUGH */ 3220 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3221 case MPI_IOCSTATUS_SUCCESS: 3222 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3223 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3224 /* 3225 * Status was never returned for this transaction. 3226 */ 3227 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3228 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3229 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3230 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3231 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3232 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3233 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3234 3235 /* XXX Handle SPI-Packet and FCP-2 response info. */ 3236 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3237 } else 3238 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3239 break; 3240 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3241 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3242 break; 3243 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3244 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3245 break; 3246 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3247 /* 3248 * Since selection timeouts and "device really not 3249 * there" are grouped into this error code, report 3250 * selection timeout. Selection timeouts are 3251 * typically retried before giving up on the device 3252 * whereas "device not there" errors are considered 3253 * unretryable. 3254 */ 3255 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3256 break; 3257 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3258 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3259 break; 3260 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3261 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3262 break; 3263 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3264 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3265 break; 3266 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3267 ccb->ccb_h.status = CAM_UA_TERMIO; 3268 break; 3269 case MPI_IOCSTATUS_INVALID_STATE: 3270 /* 3271 * The IOC has been reset. Emulate a bus reset. 3272 */ 3273 /* FALLTHROUGH */ 3274 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3275 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3276 break; 3277 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3278 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3279 /* 3280 * Don't clobber any timeout status that has 3281 * already been set for this transaction. We 3282 * want the SCSI layer to be able to differentiate 3283 * between the command we aborted due to timeout 3284 * and any innocent bystanders. 3285 */ 3286 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3287 break; 3288 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3289 break; 3290 3291 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3292 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3293 break; 3294 case MPI_IOCSTATUS_BUSY: 3295 mpt_set_ccb_status(ccb, CAM_BUSY); 3296 break; 3297 case MPI_IOCSTATUS_INVALID_FUNCTION: 3298 case MPI_IOCSTATUS_INVALID_SGL: 3299 case MPI_IOCSTATUS_INTERNAL_ERROR: 3300 case MPI_IOCSTATUS_INVALID_FIELD: 3301 default: 3302 /* XXX 3303 * Some of the above may need to kick 3304 * of a recovery action!!!! 3305 */ 3306 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3307 break; 3308 } 3309 3310 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3311 mpt_freeze_ccb(ccb); 3312 } 3313 3314 return (TRUE); 3315 } 3316 3317 static void 3318 mpt_action(struct cam_sim *sim, union ccb *ccb) 3319 { 3320 struct mpt_softc *mpt; 3321 struct ccb_trans_settings *cts; 3322 target_id_t tgt; 3323 lun_id_t lun; 3324 int raid_passthru; 3325 3326 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3327 3328 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3329 raid_passthru = (sim == mpt->phydisk_sim); 3330 MPT_LOCK_ASSERT(mpt); 3331 3332 tgt = ccb->ccb_h.target_id; 3333 lun = ccb->ccb_h.target_lun; 3334 if (raid_passthru && 3335 ccb->ccb_h.func_code != XPT_PATH_INQ && 3336 ccb->ccb_h.func_code != XPT_RESET_BUS && 3337 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3338 CAMLOCK_2_MPTLOCK(mpt); 3339 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3340 MPTLOCK_2_CAMLOCK(mpt); 3341 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3342 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3343 xpt_done(ccb); 3344 return; 3345 } 3346 MPTLOCK_2_CAMLOCK(mpt); 3347 } 3348 ccb->ccb_h.ccb_mpt_ptr = mpt; 3349 3350 switch (ccb->ccb_h.func_code) { 3351 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3352 /* 3353 * Do a couple of preliminary checks... 3354 */ 3355 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3356 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3357 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3358 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3359 break; 3360 } 3361 } 3362 /* Max supported CDB length is 16 bytes */ 3363 /* XXX Unless we implement the new 32byte message type */ 3364 if (ccb->csio.cdb_len > 3365 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3366 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3367 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3368 break; 3369 } 3370 #ifdef MPT_TEST_MULTIPATH 3371 if (mpt->failure_id == ccb->ccb_h.target_id) { 3372 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3373 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3374 break; 3375 } 3376 #endif 3377 ccb->csio.scsi_status = SCSI_STATUS_OK; 3378 mpt_start(sim, ccb); 3379 return; 3380 3381 case XPT_RESET_BUS: 3382 if (raid_passthru) { 3383 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3384 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3385 break; 3386 } 3387 case XPT_RESET_DEV: 3388 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3389 if (bootverbose) { 3390 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3391 } 3392 } else { 3393 xpt_print(ccb->ccb_h.path, "reset device\n"); 3394 } 3395 CAMLOCK_2_MPTLOCK(mpt); 3396 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3397 MPTLOCK_2_CAMLOCK(mpt); 3398 3399 /* 3400 * mpt_bus_reset is always successful in that it 3401 * will fall back to a hard reset should a bus 3402 * reset attempt fail. 3403 */ 3404 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3405 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3406 break; 3407 3408 case XPT_ABORT: 3409 { 3410 union ccb *accb = ccb->cab.abort_ccb; 3411 CAMLOCK_2_MPTLOCK(mpt); 3412 switch (accb->ccb_h.func_code) { 3413 case XPT_ACCEPT_TARGET_IO: 3414 case XPT_IMMEDIATE_NOTIFY: 3415 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3416 break; 3417 case XPT_CONT_TARGET_IO: 3418 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3419 ccb->ccb_h.status = CAM_UA_ABORT; 3420 break; 3421 case XPT_SCSI_IO: 3422 ccb->ccb_h.status = CAM_UA_ABORT; 3423 break; 3424 default: 3425 ccb->ccb_h.status = CAM_REQ_INVALID; 3426 break; 3427 } 3428 MPTLOCK_2_CAMLOCK(mpt); 3429 break; 3430 } 3431 3432 #ifdef CAM_NEW_TRAN_CODE 3433 #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3434 #else 3435 #define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) 3436 #endif 3437 #define DP_DISC_ENABLE 0x1 3438 #define DP_DISC_DISABL 0x2 3439 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3440 3441 #define DP_TQING_ENABLE 0x4 3442 #define DP_TQING_DISABL 0x8 3443 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3444 3445 #define DP_WIDE 0x10 3446 #define DP_NARROW 0x20 3447 #define DP_WIDTH (DP_WIDE|DP_NARROW) 3448 3449 #define DP_SYNC 0x40 3450 3451 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3452 { 3453 #ifdef CAM_NEW_TRAN_CODE 3454 struct ccb_trans_settings_scsi *scsi; 3455 struct ccb_trans_settings_spi *spi; 3456 #endif 3457 uint8_t dval; 3458 u_int period; 3459 u_int offset; 3460 int i, j; 3461 3462 cts = &ccb->cts; 3463 3464 if (mpt->is_fc || mpt->is_sas) { 3465 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3466 break; 3467 } 3468 3469 #ifdef CAM_NEW_TRAN_CODE 3470 scsi = &cts->proto_specific.scsi; 3471 spi = &cts->xport_specific.spi; 3472 3473 /* 3474 * We can be called just to valid transport and proto versions 3475 */ 3476 if (scsi->valid == 0 && spi->valid == 0) { 3477 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3478 break; 3479 } 3480 #endif 3481 3482 /* 3483 * Skip attempting settings on RAID volume disks. 3484 * Other devices on the bus get the normal treatment. 3485 */ 3486 if (mpt->phydisk_sim && raid_passthru == 0 && 3487 mpt_is_raid_volume(mpt, tgt) != 0) { 3488 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3489 "no transfer settings for RAID vols\n"); 3490 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3491 break; 3492 } 3493 3494 i = mpt->mpt_port_page2.PortSettings & 3495 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3496 j = mpt->mpt_port_page2.PortFlags & 3497 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3498 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3499 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3500 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3501 "honoring BIOS transfer negotiations\n"); 3502 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3503 break; 3504 } 3505 3506 dval = 0; 3507 period = 0; 3508 offset = 0; 3509 3510 #ifndef CAM_NEW_TRAN_CODE 3511 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 3512 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? 3513 DP_DISC_ENABLE : DP_DISC_DISABL; 3514 } 3515 3516 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 3517 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? 3518 DP_TQING_ENABLE : DP_TQING_DISABL; 3519 } 3520 3521 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 3522 dval |= cts->bus_width ? DP_WIDE : DP_NARROW; 3523 } 3524 3525 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && 3526 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { 3527 dval |= DP_SYNC; 3528 period = cts->sync_period; 3529 offset = cts->sync_offset; 3530 } 3531 #else 3532 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3533 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3534 DP_DISC_ENABLE : DP_DISC_DISABL; 3535 } 3536 3537 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3538 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3539 DP_TQING_ENABLE : DP_TQING_DISABL; 3540 } 3541 3542 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3543 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3544 DP_WIDE : DP_NARROW; 3545 } 3546 3547 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3548 dval |= DP_SYNC; 3549 offset = spi->sync_offset; 3550 } else { 3551 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3552 &mpt->mpt_dev_page1[tgt]; 3553 offset = ptr->RequestedParameters; 3554 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3555 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3556 } 3557 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3558 dval |= DP_SYNC; 3559 period = spi->sync_period; 3560 } else { 3561 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3562 &mpt->mpt_dev_page1[tgt]; 3563 period = ptr->RequestedParameters; 3564 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3565 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3566 } 3567 #endif 3568 CAMLOCK_2_MPTLOCK(mpt); 3569 if (dval & DP_DISC_ENABLE) { 3570 mpt->mpt_disc_enable |= (1 << tgt); 3571 } else if (dval & DP_DISC_DISABL) { 3572 mpt->mpt_disc_enable &= ~(1 << tgt); 3573 } 3574 if (dval & DP_TQING_ENABLE) { 3575 mpt->mpt_tag_enable |= (1 << tgt); 3576 } else if (dval & DP_TQING_DISABL) { 3577 mpt->mpt_tag_enable &= ~(1 << tgt); 3578 } 3579 if (dval & DP_WIDTH) { 3580 mpt_setwidth(mpt, tgt, 1); 3581 } 3582 if (dval & DP_SYNC) { 3583 mpt_setsync(mpt, tgt, period, offset); 3584 } 3585 if (dval == 0) { 3586 MPTLOCK_2_CAMLOCK(mpt); 3587 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3588 break; 3589 } 3590 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3591 "set [%d]: 0x%x period 0x%x offset %d\n", 3592 tgt, dval, period, offset); 3593 if (mpt_update_spi_config(mpt, tgt)) { 3594 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3595 } else { 3596 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3597 } 3598 MPTLOCK_2_CAMLOCK(mpt); 3599 break; 3600 } 3601 case XPT_GET_TRAN_SETTINGS: 3602 { 3603 #ifdef CAM_NEW_TRAN_CODE 3604 struct ccb_trans_settings_scsi *scsi; 3605 cts = &ccb->cts; 3606 cts->protocol = PROTO_SCSI; 3607 if (mpt->is_fc) { 3608 struct ccb_trans_settings_fc *fc = 3609 &cts->xport_specific.fc; 3610 cts->protocol_version = SCSI_REV_SPC; 3611 cts->transport = XPORT_FC; 3612 cts->transport_version = 0; 3613 fc->valid = CTS_FC_VALID_SPEED; 3614 fc->bitrate = 100000; 3615 } else if (mpt->is_sas) { 3616 struct ccb_trans_settings_sas *sas = 3617 &cts->xport_specific.sas; 3618 cts->protocol_version = SCSI_REV_SPC2; 3619 cts->transport = XPORT_SAS; 3620 cts->transport_version = 0; 3621 sas->valid = CTS_SAS_VALID_SPEED; 3622 sas->bitrate = 300000; 3623 } else { 3624 cts->protocol_version = SCSI_REV_2; 3625 cts->transport = XPORT_SPI; 3626 cts->transport_version = 2; 3627 if (mpt_get_spi_settings(mpt, cts) != 0) { 3628 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3629 break; 3630 } 3631 } 3632 scsi = &cts->proto_specific.scsi; 3633 scsi->valid = CTS_SCSI_VALID_TQ; 3634 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3635 #else 3636 cts = &ccb->cts; 3637 if (mpt->is_fc) { 3638 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3639 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3640 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3641 } else if (mpt->is_sas) { 3642 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; 3643 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3644 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3645 } else if (mpt_get_spi_settings(mpt, cts) != 0) { 3646 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3647 break; 3648 } 3649 #endif 3650 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3651 break; 3652 } 3653 case XPT_CALC_GEOMETRY: 3654 { 3655 struct ccb_calc_geometry *ccg; 3656 3657 ccg = &ccb->ccg; 3658 if (ccg->block_size == 0) { 3659 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3660 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3661 break; 3662 } 3663 mpt_calc_geometry(ccg, /*extended*/1); 3664 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 3665 break; 3666 } 3667 case XPT_PATH_INQ: /* Path routing inquiry */ 3668 { 3669 struct ccb_pathinq *cpi = &ccb->cpi; 3670 3671 cpi->version_num = 1; 3672 cpi->target_sprt = 0; 3673 cpi->hba_eng_cnt = 0; 3674 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3675 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; 3676 /* 3677 * FC cards report MAX_DEVICES of 512, but 3678 * the MSG_SCSI_IO_REQUEST target id field 3679 * is only 8 bits. Until we fix the driver 3680 * to support 'channels' for bus overflow, 3681 * just limit it. 3682 */ 3683 if (cpi->max_target > 255) { 3684 cpi->max_target = 255; 3685 } 3686 3687 /* 3688 * VMware ESX reports > 16 devices and then dies when we probe. 3689 */ 3690 if (mpt->is_spi && cpi->max_target > 15) { 3691 cpi->max_target = 15; 3692 } 3693 if (mpt->is_spi) 3694 cpi->max_lun = 7; 3695 else 3696 cpi->max_lun = MPT_MAX_LUNS; 3697 cpi->initiator_id = mpt->mpt_ini_id; 3698 cpi->bus_id = cam_sim_bus(sim); 3699 3700 /* 3701 * The base speed is the speed of the underlying connection. 3702 */ 3703 #ifdef CAM_NEW_TRAN_CODE 3704 cpi->protocol = PROTO_SCSI; 3705 if (mpt->is_fc) { 3706 cpi->hba_misc = PIM_NOBUSRESET; 3707 cpi->base_transfer_speed = 100000; 3708 cpi->hba_inquiry = PI_TAG_ABLE; 3709 cpi->transport = XPORT_FC; 3710 cpi->transport_version = 0; 3711 cpi->protocol_version = SCSI_REV_SPC; 3712 } else if (mpt->is_sas) { 3713 cpi->hba_misc = PIM_NOBUSRESET; 3714 cpi->base_transfer_speed = 300000; 3715 cpi->hba_inquiry = PI_TAG_ABLE; 3716 cpi->transport = XPORT_SAS; 3717 cpi->transport_version = 0; 3718 cpi->protocol_version = SCSI_REV_SPC2; 3719 } else { 3720 cpi->hba_misc = PIM_SEQSCAN; 3721 cpi->base_transfer_speed = 3300; 3722 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3723 cpi->transport = XPORT_SPI; 3724 cpi->transport_version = 2; 3725 cpi->protocol_version = SCSI_REV_2; 3726 } 3727 #else 3728 if (mpt->is_fc) { 3729 cpi->hba_misc = PIM_NOBUSRESET; 3730 cpi->base_transfer_speed = 100000; 3731 cpi->hba_inquiry = PI_TAG_ABLE; 3732 } else if (mpt->is_sas) { 3733 cpi->hba_misc = PIM_NOBUSRESET; 3734 cpi->base_transfer_speed = 300000; 3735 cpi->hba_inquiry = PI_TAG_ABLE; 3736 } else { 3737 cpi->hba_misc = PIM_SEQSCAN; 3738 cpi->base_transfer_speed = 3300; 3739 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3740 } 3741 #endif 3742 3743 /* 3744 * We give our fake RAID passhtru bus a width that is MaxVolumes 3745 * wide and restrict it to one lun. 3746 */ 3747 if (raid_passthru) { 3748 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3749 cpi->initiator_id = cpi->max_target + 1; 3750 cpi->max_lun = 0; 3751 } 3752 3753 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3754 cpi->hba_misc |= PIM_NOINITIATOR; 3755 } 3756 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3757 cpi->target_sprt = 3758 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3759 } else { 3760 cpi->target_sprt = 0; 3761 } 3762 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3763 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3764 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3765 cpi->unit_number = cam_sim_unit(sim); 3766 cpi->ccb_h.status = CAM_REQ_CMP; 3767 break; 3768 } 3769 case XPT_EN_LUN: /* Enable LUN as a target */ 3770 { 3771 int result; 3772 3773 CAMLOCK_2_MPTLOCK(mpt); 3774 if (ccb->cel.enable) 3775 result = mpt_enable_lun(mpt, 3776 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3777 else 3778 result = mpt_disable_lun(mpt, 3779 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3780 MPTLOCK_2_CAMLOCK(mpt); 3781 if (result == 0) { 3782 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3783 } else { 3784 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3785 } 3786 break; 3787 } 3788 case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */ 3789 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ 3790 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3791 { 3792 tgt_resource_t *trtp; 3793 lun_id_t lun = ccb->ccb_h.target_lun; 3794 ccb->ccb_h.sim_priv.entries[0].field = 0; 3795 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3796 ccb->ccb_h.flags = 0; 3797 3798 if (lun == CAM_LUN_WILDCARD) { 3799 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3800 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3801 break; 3802 } 3803 trtp = &mpt->trt_wildcard; 3804 } else if (lun >= MPT_MAX_LUNS) { 3805 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3806 break; 3807 } else { 3808 trtp = &mpt->trt[lun]; 3809 } 3810 CAMLOCK_2_MPTLOCK(mpt); 3811 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3812 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3813 "Put FREE ATIO %p lun %d\n", ccb, lun); 3814 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3815 sim_links.stqe); 3816 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { 3817 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3818 "Put FREE INOT lun %d\n", lun); 3819 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3820 sim_links.stqe); 3821 } else { 3822 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3823 } 3824 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3825 MPTLOCK_2_CAMLOCK(mpt); 3826 return; 3827 } 3828 case XPT_CONT_TARGET_IO: 3829 CAMLOCK_2_MPTLOCK(mpt); 3830 mpt_target_start_io(mpt, ccb); 3831 MPTLOCK_2_CAMLOCK(mpt); 3832 return; 3833 3834 default: 3835 ccb->ccb_h.status = CAM_REQ_INVALID; 3836 break; 3837 } 3838 xpt_done(ccb); 3839 } 3840 3841 static int 3842 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3843 { 3844 #ifdef CAM_NEW_TRAN_CODE 3845 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3846 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3847 #endif 3848 target_id_t tgt; 3849 uint32_t dval, pval, oval; 3850 int rv; 3851 3852 if (IS_CURRENT_SETTINGS(cts) == 0) { 3853 tgt = cts->ccb_h.target_id; 3854 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3855 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3856 return (-1); 3857 } 3858 } else { 3859 tgt = cts->ccb_h.target_id; 3860 } 3861 3862 /* 3863 * We aren't looking at Port Page 2 BIOS settings here- 3864 * sometimes these have been known to be bogus XXX. 3865 * 3866 * For user settings, we pick the max from port page 0 3867 * 3868 * For current settings we read the current settings out from 3869 * device page 0 for that target. 3870 */ 3871 if (IS_CURRENT_SETTINGS(cts)) { 3872 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3873 dval = 0; 3874 3875 CAMLOCK_2_MPTLOCK(mpt); 3876 tmp = mpt->mpt_dev_page0[tgt]; 3877 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3878 sizeof(tmp), FALSE, 5000); 3879 if (rv) { 3880 MPTLOCK_2_CAMLOCK(mpt); 3881 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3882 return (rv); 3883 } 3884 mpt2host_config_page_scsi_device_0(&tmp); 3885 3886 MPTLOCK_2_CAMLOCK(mpt); 3887 mpt_lprt(mpt, MPT_PRT_DEBUG, 3888 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3889 tmp.NegotiatedParameters, tmp.Information); 3890 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3891 DP_WIDE : DP_NARROW; 3892 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3893 DP_DISC_ENABLE : DP_DISC_DISABL; 3894 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3895 DP_TQING_ENABLE : DP_TQING_DISABL; 3896 oval = tmp.NegotiatedParameters; 3897 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3898 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3899 pval = tmp.NegotiatedParameters; 3900 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3901 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3902 mpt->mpt_dev_page0[tgt] = tmp; 3903 } else { 3904 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3905 oval = mpt->mpt_port_page0.Capabilities; 3906 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3907 pval = mpt->mpt_port_page0.Capabilities; 3908 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3909 } 3910 3911 #ifndef CAM_NEW_TRAN_CODE 3912 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 3913 cts->valid = 0; 3914 cts->sync_period = pval; 3915 cts->sync_offset = oval; 3916 cts->valid |= CCB_TRANS_SYNC_RATE_VALID; 3917 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; 3918 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; 3919 if (dval & DP_WIDE) { 3920 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3921 } else { 3922 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3923 } 3924 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3925 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; 3926 if (dval & DP_DISC_ENABLE) { 3927 cts->flags |= CCB_TRANS_DISC_ENB; 3928 } 3929 if (dval & DP_TQING_ENABLE) { 3930 cts->flags |= CCB_TRANS_TAG_ENB; 3931 } 3932 } 3933 #else 3934 spi->valid = 0; 3935 scsi->valid = 0; 3936 spi->flags = 0; 3937 scsi->flags = 0; 3938 spi->sync_offset = oval; 3939 spi->sync_period = pval; 3940 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3941 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3942 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3943 if (dval & DP_WIDE) { 3944 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3945 } else { 3946 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3947 } 3948 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3949 scsi->valid = CTS_SCSI_VALID_TQ; 3950 if (dval & DP_TQING_ENABLE) { 3951 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3952 } 3953 spi->valid |= CTS_SPI_VALID_DISC; 3954 if (dval & DP_DISC_ENABLE) { 3955 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3956 } 3957 } 3958 #endif 3959 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3960 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3961 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); 3962 return (0); 3963 } 3964 3965 static void 3966 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3967 { 3968 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3969 3970 ptr = &mpt->mpt_dev_page1[tgt]; 3971 if (onoff) { 3972 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3973 } else { 3974 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3975 } 3976 } 3977 3978 static void 3979 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3980 { 3981 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3982 3983 ptr = &mpt->mpt_dev_page1[tgt]; 3984 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3985 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3986 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3987 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3988 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3989 if (period == 0) { 3990 return; 3991 } 3992 ptr->RequestedParameters |= 3993 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3994 ptr->RequestedParameters |= 3995 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3996 if (period < 0xa) { 3997 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3998 } 3999 if (period < 0x9) { 4000 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 4001 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 4002 } 4003 } 4004 4005 static int 4006 mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 4007 { 4008 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 4009 int rv; 4010 4011 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 4012 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 4013 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 4014 tmp = mpt->mpt_dev_page1[tgt]; 4015 host2mpt_config_page_scsi_device_1(&tmp); 4016 rv = mpt_write_cur_cfg_page(mpt, tgt, 4017 &tmp.Header, sizeof(tmp), FALSE, 5000); 4018 if (rv) { 4019 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 4020 return (-1); 4021 } 4022 return (0); 4023 } 4024 4025 static void 4026 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) 4027 { 4028 #if __FreeBSD_version >= 500000 4029 cam_calc_geometry(ccg, extended); 4030 #else 4031 uint32_t size_mb; 4032 uint32_t secs_per_cylinder; 4033 4034 if (ccg->block_size == 0) { 4035 ccg->ccb_h.status = CAM_REQ_INVALID; 4036 return; 4037 } 4038 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); 4039 if (size_mb > 1024 && extended) { 4040 ccg->heads = 255; 4041 ccg->secs_per_track = 63; 4042 } else { 4043 ccg->heads = 64; 4044 ccg->secs_per_track = 32; 4045 } 4046 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 4047 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 4048 ccg->ccb_h.status = CAM_REQ_CMP; 4049 #endif 4050 } 4051 4052 /****************************** Timeout Recovery ******************************/ 4053 static int 4054 mpt_spawn_recovery_thread(struct mpt_softc *mpt) 4055 { 4056 int error; 4057 4058 error = mpt_kthread_create(mpt_recovery_thread, mpt, 4059 &mpt->recovery_thread, /*flags*/0, 4060 /*altstack*/0, "mpt_recovery%d", mpt->unit); 4061 return (error); 4062 } 4063 4064 static void 4065 mpt_terminate_recovery_thread(struct mpt_softc *mpt) 4066 { 4067 4068 if (mpt->recovery_thread == NULL) { 4069 return; 4070 } 4071 mpt->shutdwn_recovery = 1; 4072 wakeup(mpt); 4073 /* 4074 * Sleep on a slightly different location 4075 * for this interlock just for added safety. 4076 */ 4077 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 4078 } 4079 4080 static void 4081 mpt_recovery_thread(void *arg) 4082 { 4083 struct mpt_softc *mpt; 4084 4085 mpt = (struct mpt_softc *)arg; 4086 MPT_LOCK(mpt); 4087 for (;;) { 4088 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4089 if (mpt->shutdwn_recovery == 0) { 4090 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 4091 } 4092 } 4093 if (mpt->shutdwn_recovery != 0) { 4094 break; 4095 } 4096 mpt_recover_commands(mpt); 4097 } 4098 mpt->recovery_thread = NULL; 4099 wakeup(&mpt->recovery_thread); 4100 MPT_UNLOCK(mpt); 4101 mpt_kthread_exit(0); 4102 } 4103 4104 static int 4105 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 4106 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 4107 { 4108 MSG_SCSI_TASK_MGMT *tmf_req; 4109 int error; 4110 4111 /* 4112 * Wait for any current TMF request to complete. 4113 * We're only allowed to issue one TMF at a time. 4114 */ 4115 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 4116 sleep_ok, MPT_TMF_MAX_TIMEOUT); 4117 if (error != 0) { 4118 mpt_reset(mpt, TRUE); 4119 return (ETIMEDOUT); 4120 } 4121 4122 mpt_assign_serno(mpt, mpt->tmf_req); 4123 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 4124 4125 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 4126 memset(tmf_req, 0, sizeof(*tmf_req)); 4127 tmf_req->TargetID = target; 4128 tmf_req->Bus = channel; 4129 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 4130 tmf_req->TaskType = type; 4131 tmf_req->MsgFlags = flags; 4132 tmf_req->MsgContext = 4133 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 4134 if (lun > MPT_MAX_LUNS) { 4135 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4136 tmf_req->LUN[1] = lun & 0xff; 4137 } else { 4138 tmf_req->LUN[1] = lun; 4139 } 4140 tmf_req->TaskMsgContext = abort_ctx; 4141 4142 mpt_lprt(mpt, MPT_PRT_DEBUG, 4143 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 4144 mpt->tmf_req->serno, tmf_req->MsgContext); 4145 if (mpt->verbose > MPT_PRT_DEBUG) { 4146 mpt_print_request(tmf_req); 4147 } 4148 4149 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 4150 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 4151 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 4152 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 4153 if (error != MPT_OK) { 4154 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 4155 mpt->tmf_req->state = REQ_STATE_FREE; 4156 mpt_reset(mpt, TRUE); 4157 } 4158 return (error); 4159 } 4160 4161 /* 4162 * When a command times out, it is placed on the requeust_timeout_list 4163 * and we wake our recovery thread. The MPT-Fusion architecture supports 4164 * only a single TMF operation at a time, so we serially abort/bdr, etc, 4165 * the timedout transactions. The next TMF is issued either by the 4166 * completion handler of the current TMF waking our recovery thread, 4167 * or the TMF timeout handler causing a hard reset sequence. 4168 */ 4169 static void 4170 mpt_recover_commands(struct mpt_softc *mpt) 4171 { 4172 request_t *req; 4173 union ccb *ccb; 4174 int error; 4175 4176 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4177 /* 4178 * No work to do- leave. 4179 */ 4180 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 4181 return; 4182 } 4183 4184 /* 4185 * Flush any commands whose completion coincides with their timeout. 4186 */ 4187 mpt_intr(mpt); 4188 4189 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4190 /* 4191 * The timedout commands have already 4192 * completed. This typically means 4193 * that either the timeout value was on 4194 * the hairy edge of what the device 4195 * requires or - more likely - interrupts 4196 * are not happening. 4197 */ 4198 mpt_prt(mpt, "Timedout requests already complete. " 4199 "Interrupts may not be functioning.\n"); 4200 mpt_enable_ints(mpt); 4201 return; 4202 } 4203 4204 /* 4205 * We have no visibility into the current state of the 4206 * controller, so attempt to abort the commands in the 4207 * order they timed-out. For initiator commands, we 4208 * depend on the reply handler pulling requests off 4209 * the timeout list. 4210 */ 4211 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4212 uint16_t status; 4213 uint8_t response; 4214 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4215 4216 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4217 req, req->serno, hdrp->Function); 4218 ccb = req->ccb; 4219 if (ccb == NULL) { 4220 mpt_prt(mpt, "null ccb in timed out request. " 4221 "Resetting Controller.\n"); 4222 mpt_reset(mpt, TRUE); 4223 continue; 4224 } 4225 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4226 4227 /* 4228 * Check to see if this is not an initiator command and 4229 * deal with it differently if it is. 4230 */ 4231 switch (hdrp->Function) { 4232 case MPI_FUNCTION_SCSI_IO_REQUEST: 4233 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4234 break; 4235 default: 4236 /* 4237 * XXX: FIX ME: need to abort target assists... 4238 */ 4239 mpt_prt(mpt, "just putting it back on the pend q\n"); 4240 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4241 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4242 links); 4243 continue; 4244 } 4245 4246 error = mpt_scsi_send_tmf(mpt, 4247 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4248 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4249 htole32(req->index | scsi_io_handler_id), TRUE); 4250 4251 if (error != 0) { 4252 /* 4253 * mpt_scsi_send_tmf hard resets on failure, so no 4254 * need to do so here. Our queue should be emptied 4255 * by the hard reset. 4256 */ 4257 continue; 4258 } 4259 4260 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4261 REQ_STATE_DONE, TRUE, 500); 4262 4263 status = le16toh(mpt->tmf_req->IOCStatus); 4264 response = mpt->tmf_req->ResponseCode; 4265 mpt->tmf_req->state = REQ_STATE_FREE; 4266 4267 if (error != 0) { 4268 /* 4269 * If we've errored out,, reset the controller. 4270 */ 4271 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4272 "Resetting controller\n"); 4273 mpt_reset(mpt, TRUE); 4274 continue; 4275 } 4276 4277 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4278 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4279 "Resetting controller.\n", status); 4280 mpt_reset(mpt, TRUE); 4281 continue; 4282 } 4283 4284 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4285 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4286 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4287 "Resetting controller.\n", response); 4288 mpt_reset(mpt, TRUE); 4289 continue; 4290 } 4291 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4292 } 4293 } 4294 4295 /************************ Target Mode Support ****************************/ 4296 static void 4297 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4298 { 4299 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4300 PTR_SGE_TRANSACTION32 tep; 4301 PTR_SGE_SIMPLE32 se; 4302 bus_addr_t paddr; 4303 uint32_t fl; 4304 4305 paddr = req->req_pbuf; 4306 paddr += MPT_RQSL(mpt); 4307 4308 fc = req->req_vbuf; 4309 memset(fc, 0, MPT_REQUEST_AREA); 4310 fc->BufferCount = 1; 4311 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4312 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4313 4314 /* 4315 * Okay, set up ELS buffer pointers. ELS buffer pointers 4316 * consist of a TE SGL element (with details length of zero) 4317 * followed by a SIMPLE SGL element which holds the address 4318 * of the buffer. 4319 */ 4320 4321 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4322 4323 tep->ContextSize = 4; 4324 tep->Flags = 0; 4325 tep->TransactionContext[0] = htole32(ioindex); 4326 4327 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4328 fl = 4329 MPI_SGE_FLAGS_HOST_TO_IOC | 4330 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4331 MPI_SGE_FLAGS_LAST_ELEMENT | 4332 MPI_SGE_FLAGS_END_OF_LIST | 4333 MPI_SGE_FLAGS_END_OF_BUFFER; 4334 fl <<= MPI_SGE_FLAGS_SHIFT; 4335 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4336 se->FlagsLength = htole32(fl); 4337 se->Address = htole32((uint32_t) paddr); 4338 mpt_lprt(mpt, MPT_PRT_DEBUG, 4339 "add ELS index %d ioindex %d for %p:%u\n", 4340 req->index, ioindex, req, req->serno); 4341 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4342 ("mpt_fc_post_els: request not locked")); 4343 mpt_send_cmd(mpt, req); 4344 } 4345 4346 static void 4347 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4348 { 4349 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4350 PTR_CMD_BUFFER_DESCRIPTOR cb; 4351 bus_addr_t paddr; 4352 4353 paddr = req->req_pbuf; 4354 paddr += MPT_RQSL(mpt); 4355 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4356 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4357 4358 fc = req->req_vbuf; 4359 fc->BufferCount = 1; 4360 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4361 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4362 4363 cb = &fc->Buffer[0]; 4364 cb->IoIndex = htole16(ioindex); 4365 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4366 4367 mpt_check_doorbell(mpt); 4368 mpt_send_cmd(mpt, req); 4369 } 4370 4371 static int 4372 mpt_add_els_buffers(struct mpt_softc *mpt) 4373 { 4374 int i; 4375 4376 if (mpt->is_fc == 0) { 4377 return (TRUE); 4378 } 4379 4380 if (mpt->els_cmds_allocated) { 4381 return (TRUE); 4382 } 4383 4384 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), 4385 M_DEVBUF, M_NOWAIT | M_ZERO); 4386 4387 if (mpt->els_cmd_ptrs == NULL) { 4388 return (FALSE); 4389 } 4390 4391 /* 4392 * Feed the chip some ELS buffer resources 4393 */ 4394 for (i = 0; i < MPT_MAX_ELS; i++) { 4395 request_t *req = mpt_get_request(mpt, FALSE); 4396 if (req == NULL) { 4397 break; 4398 } 4399 req->state |= REQ_STATE_LOCKED; 4400 mpt->els_cmd_ptrs[i] = req; 4401 mpt_fc_post_els(mpt, req, i); 4402 } 4403 4404 if (i == 0) { 4405 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4406 free(mpt->els_cmd_ptrs, M_DEVBUF); 4407 mpt->els_cmd_ptrs = NULL; 4408 return (FALSE); 4409 } 4410 if (i != MPT_MAX_ELS) { 4411 mpt_lprt(mpt, MPT_PRT_INFO, 4412 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4413 } 4414 mpt->els_cmds_allocated = i; 4415 return(TRUE); 4416 } 4417 4418 static int 4419 mpt_add_target_commands(struct mpt_softc *mpt) 4420 { 4421 int i, max; 4422 4423 if (mpt->tgt_cmd_ptrs) { 4424 return (TRUE); 4425 } 4426 4427 max = MPT_MAX_REQUESTS(mpt) >> 1; 4428 if (max > mpt->mpt_max_tgtcmds) { 4429 max = mpt->mpt_max_tgtcmds; 4430 } 4431 mpt->tgt_cmd_ptrs = 4432 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4433 if (mpt->tgt_cmd_ptrs == NULL) { 4434 mpt_prt(mpt, 4435 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4436 return (FALSE); 4437 } 4438 4439 for (i = 0; i < max; i++) { 4440 request_t *req; 4441 4442 req = mpt_get_request(mpt, FALSE); 4443 if (req == NULL) { 4444 break; 4445 } 4446 req->state |= REQ_STATE_LOCKED; 4447 mpt->tgt_cmd_ptrs[i] = req; 4448 mpt_post_target_command(mpt, req, i); 4449 } 4450 4451 4452 if (i == 0) { 4453 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4454 free(mpt->tgt_cmd_ptrs, M_DEVBUF); 4455 mpt->tgt_cmd_ptrs = NULL; 4456 return (FALSE); 4457 } 4458 4459 mpt->tgt_cmds_allocated = i; 4460 4461 if (i < max) { 4462 mpt_lprt(mpt, MPT_PRT_INFO, 4463 "added %d of %d target bufs\n", i, max); 4464 } 4465 return (i); 4466 } 4467 4468 static int 4469 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4470 { 4471 4472 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4473 mpt->twildcard = 1; 4474 } else if (lun >= MPT_MAX_LUNS) { 4475 return (EINVAL); 4476 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4477 return (EINVAL); 4478 } 4479 if (mpt->tenabled == 0) { 4480 if (mpt->is_fc) { 4481 (void) mpt_fc_reset_link(mpt, 0); 4482 } 4483 mpt->tenabled = 1; 4484 } 4485 if (lun == CAM_LUN_WILDCARD) { 4486 mpt->trt_wildcard.enabled = 1; 4487 } else { 4488 mpt->trt[lun].enabled = 1; 4489 } 4490 return (0); 4491 } 4492 4493 static int 4494 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4495 { 4496 int i; 4497 4498 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4499 mpt->twildcard = 0; 4500 } else if (lun >= MPT_MAX_LUNS) { 4501 return (EINVAL); 4502 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4503 return (EINVAL); 4504 } 4505 if (lun == CAM_LUN_WILDCARD) { 4506 mpt->trt_wildcard.enabled = 0; 4507 } else { 4508 mpt->trt[lun].enabled = 0; 4509 } 4510 for (i = 0; i < MPT_MAX_LUNS; i++) { 4511 if (mpt->trt[lun].enabled) { 4512 break; 4513 } 4514 } 4515 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4516 if (mpt->is_fc) { 4517 (void) mpt_fc_reset_link(mpt, 0); 4518 } 4519 mpt->tenabled = 0; 4520 } 4521 return (0); 4522 } 4523 4524 /* 4525 * Called with MPT lock held 4526 */ 4527 static void 4528 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4529 { 4530 struct ccb_scsiio *csio = &ccb->csio; 4531 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4532 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4533 4534 switch (tgt->state) { 4535 case TGT_STATE_IN_CAM: 4536 break; 4537 case TGT_STATE_MOVING_DATA: 4538 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4539 xpt_freeze_simq(mpt->sim, 1); 4540 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4541 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4542 MPTLOCK_2_CAMLOCK(mpt); 4543 xpt_done(ccb); 4544 CAMLOCK_2_MPTLOCK(mpt); 4545 return; 4546 default: 4547 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4548 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4549 mpt_tgt_dump_req_state(mpt, cmd_req); 4550 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4551 MPTLOCK_2_CAMLOCK(mpt); 4552 xpt_done(ccb); 4553 CAMLOCK_2_MPTLOCK(mpt); 4554 return; 4555 } 4556 4557 if (csio->dxfer_len) { 4558 bus_dmamap_callback_t *cb; 4559 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4560 request_t *req; 4561 4562 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4563 ("dxfer_len %u but direction is NONE", csio->dxfer_len)); 4564 4565 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4566 if (mpt->outofbeer == 0) { 4567 mpt->outofbeer = 1; 4568 xpt_freeze_simq(mpt->sim, 1); 4569 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4570 } 4571 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4572 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4573 MPTLOCK_2_CAMLOCK(mpt); 4574 xpt_done(ccb); 4575 CAMLOCK_2_MPTLOCK(mpt); 4576 return; 4577 } 4578 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4579 if (sizeof (bus_addr_t) > 4) { 4580 cb = mpt_execute_req_a64; 4581 } else { 4582 cb = mpt_execute_req; 4583 } 4584 4585 req->ccb = ccb; 4586 ccb->ccb_h.ccb_req_ptr = req; 4587 4588 /* 4589 * Record the currently active ccb and the 4590 * request for it in our target state area. 4591 */ 4592 tgt->ccb = ccb; 4593 tgt->req = req; 4594 4595 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4596 ta = req->req_vbuf; 4597 4598 if (mpt->is_sas) { 4599 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4600 cmd_req->req_vbuf; 4601 ta->QueueTag = ssp->InitiatorTag; 4602 } else if (mpt->is_spi) { 4603 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4604 cmd_req->req_vbuf; 4605 ta->QueueTag = sp->Tag; 4606 } 4607 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4608 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4609 ta->ReplyWord = htole32(tgt->reply_desc); 4610 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { 4611 ta->LUN[0] = 4612 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4613 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4614 } else { 4615 ta->LUN[1] = csio->ccb_h.target_lun; 4616 } 4617 4618 ta->RelativeOffset = tgt->bytes_xfered; 4619 ta->DataLength = ccb->csio.dxfer_len; 4620 if (ta->DataLength > tgt->resid) { 4621 ta->DataLength = tgt->resid; 4622 } 4623 4624 /* 4625 * XXX Should be done after data transfer completes? 4626 */ 4627 tgt->resid -= csio->dxfer_len; 4628 tgt->bytes_xfered += csio->dxfer_len; 4629 4630 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4631 ta->TargetAssistFlags |= 4632 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4633 } 4634 4635 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4636 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4637 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4638 ta->TargetAssistFlags |= 4639 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4640 } 4641 #endif 4642 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4643 4644 mpt_lprt(mpt, MPT_PRT_DEBUG, 4645 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4646 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4647 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4648 4649 MPTLOCK_2_CAMLOCK(mpt); 4650 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 4651 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { 4652 int error; 4653 int s = splsoftvm(); 4654 error = bus_dmamap_load(mpt->buffer_dmat, 4655 req->dmap, csio->data_ptr, csio->dxfer_len, 4656 cb, req, 0); 4657 splx(s); 4658 if (error == EINPROGRESS) { 4659 xpt_freeze_simq(mpt->sim, 1); 4660 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4661 } 4662 } else { 4663 /* 4664 * We have been given a pointer to single 4665 * physical buffer. 4666 */ 4667 struct bus_dma_segment seg; 4668 seg.ds_addr = (bus_addr_t) 4669 (vm_offset_t)csio->data_ptr; 4670 seg.ds_len = csio->dxfer_len; 4671 (*cb)(req, &seg, 1, 0); 4672 } 4673 } else { 4674 /* 4675 * We have been given a list of addresses. 4676 * This case could be easily supported but they are not 4677 * currently generated by the CAM subsystem so there 4678 * is no point in wasting the time right now. 4679 */ 4680 struct bus_dma_segment *sgs; 4681 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 4682 (*cb)(req, NULL, 0, EFAULT); 4683 } else { 4684 /* Just use the segments provided */ 4685 sgs = (struct bus_dma_segment *)csio->data_ptr; 4686 (*cb)(req, sgs, csio->sglist_cnt, 0); 4687 } 4688 } 4689 CAMLOCK_2_MPTLOCK(mpt); 4690 } else { 4691 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 4692 4693 /* 4694 * XXX: I don't know why this seems to happen, but 4695 * XXX: completing the CCB seems to make things happy. 4696 * XXX: This seems to happen if the initiator requests 4697 * XXX: enough data that we have to do multiple CTIOs. 4698 */ 4699 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4700 mpt_lprt(mpt, MPT_PRT_DEBUG, 4701 "Meaningless STATUS CCB (%p): flags %x status %x " 4702 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4703 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4704 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4705 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4706 MPTLOCK_2_CAMLOCK(mpt); 4707 xpt_done(ccb); 4708 CAMLOCK_2_MPTLOCK(mpt); 4709 return; 4710 } 4711 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 4712 sp = sense; 4713 memcpy(sp, &csio->sense_data, 4714 min(csio->sense_len, MPT_SENSE_SIZE)); 4715 } 4716 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); 4717 } 4718 } 4719 4720 static void 4721 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4722 uint32_t lun, int send, uint8_t *data, size_t length) 4723 { 4724 mpt_tgt_state_t *tgt; 4725 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4726 SGE_SIMPLE32 *se; 4727 uint32_t flags; 4728 uint8_t *dptr; 4729 bus_addr_t pptr; 4730 request_t *req; 4731 4732 /* 4733 * We enter with resid set to the data load for the command. 4734 */ 4735 tgt = MPT_TGT_STATE(mpt, cmd_req); 4736 if (length == 0 || tgt->resid == 0) { 4737 tgt->resid = 0; 4738 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); 4739 return; 4740 } 4741 4742 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4743 mpt_prt(mpt, "out of resources- dropping local response\n"); 4744 return; 4745 } 4746 tgt->is_local = 1; 4747 4748 4749 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4750 ta = req->req_vbuf; 4751 4752 if (mpt->is_sas) { 4753 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4754 ta->QueueTag = ssp->InitiatorTag; 4755 } else if (mpt->is_spi) { 4756 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4757 ta->QueueTag = sp->Tag; 4758 } 4759 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4760 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4761 ta->ReplyWord = htole32(tgt->reply_desc); 4762 if (lun > MPT_MAX_LUNS) { 4763 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4764 ta->LUN[1] = lun & 0xff; 4765 } else { 4766 ta->LUN[1] = lun; 4767 } 4768 ta->RelativeOffset = 0; 4769 ta->DataLength = length; 4770 4771 dptr = req->req_vbuf; 4772 dptr += MPT_RQSL(mpt); 4773 pptr = req->req_pbuf; 4774 pptr += MPT_RQSL(mpt); 4775 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4776 4777 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4778 memset(se, 0,sizeof (*se)); 4779 4780 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4781 if (send) { 4782 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4783 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4784 } 4785 se->Address = pptr; 4786 MPI_pSGE_SET_LENGTH(se, length); 4787 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4788 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4789 MPI_pSGE_SET_FLAGS(se, flags); 4790 4791 tgt->ccb = NULL; 4792 tgt->req = req; 4793 tgt->resid -= length; 4794 tgt->bytes_xfered = length; 4795 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4796 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4797 #else 4798 tgt->state = TGT_STATE_MOVING_DATA; 4799 #endif 4800 mpt_send_cmd(mpt, req); 4801 } 4802 4803 /* 4804 * Abort queued up CCBs 4805 */ 4806 static cam_status 4807 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4808 { 4809 struct mpt_hdr_stailq *lp; 4810 struct ccb_hdr *srch; 4811 int found = 0; 4812 union ccb *accb = ccb->cab.abort_ccb; 4813 tgt_resource_t *trtp; 4814 4815 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4816 4817 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4818 trtp = &mpt->trt_wildcard; 4819 } else { 4820 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4821 } 4822 4823 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4824 lp = &trtp->atios; 4825 } else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { 4826 lp = &trtp->inots; 4827 } else { 4828 return (CAM_REQ_INVALID); 4829 } 4830 4831 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4832 if (srch == &accb->ccb_h) { 4833 found = 1; 4834 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4835 break; 4836 } 4837 } 4838 if (found) { 4839 accb->ccb_h.status = CAM_REQ_ABORTED; 4840 xpt_done(accb); 4841 return (CAM_REQ_CMP); 4842 } 4843 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4844 return (CAM_PATH_INVALID); 4845 } 4846 4847 /* 4848 * Ask the MPT to abort the current target command 4849 */ 4850 static int 4851 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4852 { 4853 int error; 4854 request_t *req; 4855 PTR_MSG_TARGET_MODE_ABORT abtp; 4856 4857 req = mpt_get_request(mpt, FALSE); 4858 if (req == NULL) { 4859 return (-1); 4860 } 4861 abtp = req->req_vbuf; 4862 memset(abtp, 0, sizeof (*abtp)); 4863 4864 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4865 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4866 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4867 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4868 error = 0; 4869 if (mpt->is_fc || mpt->is_sas) { 4870 mpt_send_cmd(mpt, req); 4871 } else { 4872 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4873 } 4874 return (error); 4875 } 4876 4877 /* 4878 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4879 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4880 * FC929 to set bogus FC_RSP fields (nonzero residuals 4881 * but w/o RESID fields set). This causes QLogic initiators 4882 * to think maybe that a frame was lost. 4883 * 4884 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4885 * we use allocated requests to do TARGET_ASSIST and we 4886 * need to know when to release them. 4887 */ 4888 4889 static void 4890 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4891 uint8_t status, uint8_t const *sense_data) 4892 { 4893 uint8_t *cmd_vbuf; 4894 mpt_tgt_state_t *tgt; 4895 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4896 request_t *req; 4897 bus_addr_t paddr; 4898 int resplen = 0; 4899 uint32_t fl; 4900 4901 cmd_vbuf = cmd_req->req_vbuf; 4902 cmd_vbuf += MPT_RQSL(mpt); 4903 tgt = MPT_TGT_STATE(mpt, cmd_req); 4904 4905 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4906 if (mpt->outofbeer == 0) { 4907 mpt->outofbeer = 1; 4908 xpt_freeze_simq(mpt->sim, 1); 4909 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4910 } 4911 if (ccb) { 4912 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4913 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4914 MPTLOCK_2_CAMLOCK(mpt); 4915 xpt_done(ccb); 4916 CAMLOCK_2_MPTLOCK(mpt); 4917 } else { 4918 mpt_prt(mpt, 4919 "could not allocate status request- dropping\n"); 4920 } 4921 return; 4922 } 4923 req->ccb = ccb; 4924 if (ccb) { 4925 ccb->ccb_h.ccb_mpt_ptr = mpt; 4926 ccb->ccb_h.ccb_req_ptr = req; 4927 } 4928 4929 /* 4930 * Record the currently active ccb, if any, and the 4931 * request for it in our target state area. 4932 */ 4933 tgt->ccb = ccb; 4934 tgt->req = req; 4935 tgt->state = TGT_STATE_SENDING_STATUS; 4936 4937 tp = req->req_vbuf; 4938 paddr = req->req_pbuf; 4939 paddr += MPT_RQSL(mpt); 4940 4941 memset(tp, 0, sizeof (*tp)); 4942 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4943 if (mpt->is_fc) { 4944 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4945 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4946 uint8_t *sts_vbuf; 4947 uint32_t *rsp; 4948 4949 sts_vbuf = req->req_vbuf; 4950 sts_vbuf += MPT_RQSL(mpt); 4951 rsp = (uint32_t *) sts_vbuf; 4952 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4953 4954 /* 4955 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4956 * It has to be big-endian in memory and is organized 4957 * in 32 bit words, which are much easier to deal with 4958 * as words which are swizzled as needed. 4959 * 4960 * All we're filling here is the FC_RSP payload. 4961 * We may just have the chip synthesize it if 4962 * we have no residual and an OK status. 4963 * 4964 */ 4965 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4966 4967 rsp[2] = status; 4968 if (tgt->resid) { 4969 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ 4970 rsp[3] = htobe32(tgt->resid); 4971 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4972 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4973 #endif 4974 } 4975 if (status == SCSI_STATUS_CHECK_COND) { 4976 int i; 4977 4978 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ 4979 rsp[4] = htobe32(MPT_SENSE_SIZE); 4980 if (sense_data) { 4981 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); 4982 } else { 4983 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" 4984 "TION but no sense data?\n"); 4985 memset(&rsp, 0, MPT_SENSE_SIZE); 4986 } 4987 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { 4988 rsp[i] = htobe32(rsp[i]); 4989 } 4990 #ifdef WE_TRUST_AUTO_GOOD_STATUS 4991 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4992 #endif 4993 } 4994 #ifndef WE_TRUST_AUTO_GOOD_STATUS 4995 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); 4996 #endif 4997 rsp[2] = htobe32(rsp[2]); 4998 } else if (mpt->is_sas) { 4999 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 5000 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 5001 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 5002 } else { 5003 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 5004 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 5005 tp->StatusCode = status; 5006 tp->QueueTag = htole16(sp->Tag); 5007 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 5008 } 5009 5010 tp->ReplyWord = htole32(tgt->reply_desc); 5011 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 5012 5013 #ifdef WE_CAN_USE_AUTO_REPOST 5014 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 5015 #endif 5016 if (status == SCSI_STATUS_OK && resplen == 0) { 5017 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 5018 } else { 5019 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 5020 fl = 5021 MPI_SGE_FLAGS_HOST_TO_IOC | 5022 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 5023 MPI_SGE_FLAGS_LAST_ELEMENT | 5024 MPI_SGE_FLAGS_END_OF_LIST | 5025 MPI_SGE_FLAGS_END_OF_BUFFER; 5026 fl <<= MPI_SGE_FLAGS_SHIFT; 5027 fl |= resplen; 5028 tp->StatusDataSGE.FlagsLength = htole32(fl); 5029 } 5030 5031 mpt_lprt(mpt, MPT_PRT_DEBUG, 5032 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", 5033 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, 5034 req->serno, tgt->resid); 5035 if (ccb) { 5036 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 5037 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); 5038 } 5039 mpt_send_cmd(mpt, req); 5040 } 5041 5042 static void 5043 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 5044 tgt_resource_t *trtp, int init_id) 5045 { 5046 struct ccb_immediate_notify *inot; 5047 mpt_tgt_state_t *tgt; 5048 5049 tgt = MPT_TGT_STATE(mpt, req); 5050 inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots); 5051 if (inot == NULL) { 5052 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 5053 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); 5054 return; 5055 } 5056 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 5057 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5058 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); 5059 5060 inot->initiator_id = init_id; /* XXX */ 5061 /* 5062 * This is a somewhat grotesque attempt to map from task management 5063 * to old style SCSI messages. God help us all. 5064 */ 5065 switch (fc) { 5066 case MPT_ABORT_TASK_SET: 5067 inot->arg = MSG_ABORT_TAG; 5068 break; 5069 case MPT_CLEAR_TASK_SET: 5070 inot->arg = MSG_CLEAR_TASK_SET; 5071 break; 5072 case MPT_TARGET_RESET: 5073 inot->arg = MSG_TARGET_RESET; 5074 break; 5075 case MPT_CLEAR_ACA: 5076 inot->arg = MSG_CLEAR_ACA; 5077 break; 5078 case MPT_TERMINATE_TASK: 5079 inot->arg = MSG_ABORT_TAG; 5080 break; 5081 default: 5082 inot->arg = MSG_NOOP; 5083 break; 5084 } 5085 /* 5086 * XXX KDM we need the sequence/tag number for the target of the 5087 * task management operation, especially if it is an abort. 5088 */ 5089 tgt->ccb = (union ccb *) inot; 5090 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 5091 MPTLOCK_2_CAMLOCK(mpt); 5092 xpt_done((union ccb *)inot); 5093 CAMLOCK_2_MPTLOCK(mpt); 5094 } 5095 5096 static void 5097 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 5098 { 5099 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 5100 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 5101 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 5102 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 5103 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 5104 '0', '0', '0', '1' 5105 }; 5106 struct ccb_accept_tio *atiop; 5107 lun_id_t lun; 5108 int tag_action = 0; 5109 mpt_tgt_state_t *tgt; 5110 tgt_resource_t *trtp = NULL; 5111 U8 *lunptr; 5112 U8 *vbuf; 5113 U16 itag; 5114 U16 ioindex; 5115 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 5116 uint8_t *cdbp; 5117 5118 /* 5119 * Stash info for the current command where we can get at it later. 5120 */ 5121 vbuf = req->req_vbuf; 5122 vbuf += MPT_RQSL(mpt); 5123 5124 /* 5125 * Get our state pointer set up. 5126 */ 5127 tgt = MPT_TGT_STATE(mpt, req); 5128 if (tgt->state != TGT_STATE_LOADED) { 5129 mpt_tgt_dump_req_state(mpt, req); 5130 panic("bad target state in mpt_scsi_tgt_atio"); 5131 } 5132 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 5133 tgt->state = TGT_STATE_IN_CAM; 5134 tgt->reply_desc = reply_desc; 5135 ioindex = GET_IO_INDEX(reply_desc); 5136 if (mpt->verbose >= MPT_PRT_DEBUG) { 5137 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 5138 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 5139 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 5140 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 5141 } 5142 if (mpt->is_fc) { 5143 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 5144 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 5145 if (fc->FcpCntl[2]) { 5146 /* 5147 * Task Management Request 5148 */ 5149 switch (fc->FcpCntl[2]) { 5150 case 0x2: 5151 fct = MPT_ABORT_TASK_SET; 5152 break; 5153 case 0x4: 5154 fct = MPT_CLEAR_TASK_SET; 5155 break; 5156 case 0x20: 5157 fct = MPT_TARGET_RESET; 5158 break; 5159 case 0x40: 5160 fct = MPT_CLEAR_ACA; 5161 break; 5162 case 0x80: 5163 fct = MPT_TERMINATE_TASK; 5164 break; 5165 default: 5166 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 5167 fc->FcpCntl[2]); 5168 mpt_scsi_tgt_status(mpt, 0, req, 5169 SCSI_STATUS_OK, 0); 5170 return; 5171 } 5172 } else { 5173 switch (fc->FcpCntl[1]) { 5174 case 0: 5175 tag_action = MSG_SIMPLE_Q_TAG; 5176 break; 5177 case 1: 5178 tag_action = MSG_HEAD_OF_Q_TAG; 5179 break; 5180 case 2: 5181 tag_action = MSG_ORDERED_Q_TAG; 5182 break; 5183 default: 5184 /* 5185 * Bah. Ignore Untagged Queing and ACA 5186 */ 5187 tag_action = MSG_SIMPLE_Q_TAG; 5188 break; 5189 } 5190 } 5191 tgt->resid = be32toh(fc->FcpDl); 5192 cdbp = fc->FcpCdb; 5193 lunptr = fc->FcpLun; 5194 itag = be16toh(fc->OptionalOxid); 5195 } else if (mpt->is_sas) { 5196 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 5197 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 5198 cdbp = ssp->CDB; 5199 lunptr = ssp->LogicalUnitNumber; 5200 itag = ssp->InitiatorTag; 5201 } else { 5202 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 5203 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 5204 cdbp = sp->CDB; 5205 lunptr = sp->LogicalUnitNumber; 5206 itag = sp->Tag; 5207 } 5208 5209 /* 5210 * Generate a simple lun 5211 */ 5212 switch (lunptr[0] & 0xc0) { 5213 case 0x40: 5214 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 5215 break; 5216 case 0: 5217 lun = lunptr[1]; 5218 break; 5219 default: 5220 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 5221 lun = 0xffff; 5222 break; 5223 } 5224 5225 /* 5226 * Deal with non-enabled or bad luns here. 5227 */ 5228 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 5229 mpt->trt[lun].enabled == 0) { 5230 if (mpt->twildcard) { 5231 trtp = &mpt->trt_wildcard; 5232 } else if (fct == MPT_NIL_TMT_VALUE) { 5233 /* 5234 * In this case, we haven't got an upstream listener 5235 * for either a specific lun or wildcard luns. We 5236 * have to make some sensible response. For regular 5237 * inquiry, just return some NOT HERE inquiry data. 5238 * For VPD inquiry, report illegal field in cdb. 5239 * For REQUEST SENSE, just return NO SENSE data. 5240 * REPORT LUNS gets illegal command. 5241 * All other commands get 'no such device'. 5242 */ 5243 uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; 5244 size_t len; 5245 5246 memset(buf, 0, MPT_SENSE_SIZE); 5247 cond = SCSI_STATUS_CHECK_COND; 5248 buf[0] = 0xf0; 5249 buf[2] = 0x5; 5250 buf[7] = 0x8; 5251 sp = buf; 5252 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5253 5254 switch (cdbp[0]) { 5255 case INQUIRY: 5256 { 5257 if (cdbp[1] != 0) { 5258 buf[12] = 0x26; 5259 buf[13] = 0x01; 5260 break; 5261 } 5262 len = min(tgt->resid, cdbp[4]); 5263 len = min(len, sizeof (null_iqd)); 5264 mpt_lprt(mpt, MPT_PRT_DEBUG, 5265 "local inquiry %ld bytes\n", (long) len); 5266 mpt_scsi_tgt_local(mpt, req, lun, 1, 5267 null_iqd, len); 5268 return; 5269 } 5270 case REQUEST_SENSE: 5271 { 5272 buf[2] = 0x0; 5273 len = min(tgt->resid, cdbp[4]); 5274 len = min(len, sizeof (buf)); 5275 mpt_lprt(mpt, MPT_PRT_DEBUG, 5276 "local reqsense %ld bytes\n", (long) len); 5277 mpt_scsi_tgt_local(mpt, req, lun, 1, 5278 buf, len); 5279 return; 5280 } 5281 case REPORT_LUNS: 5282 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5283 buf[12] = 0x26; 5284 return; 5285 default: 5286 mpt_lprt(mpt, MPT_PRT_DEBUG, 5287 "CMD 0x%x to unmanaged lun %u\n", 5288 cdbp[0], lun); 5289 buf[12] = 0x25; 5290 break; 5291 } 5292 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); 5293 return; 5294 } 5295 /* otherwise, leave trtp NULL */ 5296 } else { 5297 trtp = &mpt->trt[lun]; 5298 } 5299 5300 /* 5301 * Deal with any task management 5302 */ 5303 if (fct != MPT_NIL_TMT_VALUE) { 5304 if (trtp == NULL) { 5305 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5306 fct); 5307 mpt_scsi_tgt_status(mpt, 0, req, 5308 SCSI_STATUS_OK, 0); 5309 } else { 5310 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5311 GET_INITIATOR_INDEX(reply_desc)); 5312 } 5313 return; 5314 } 5315 5316 5317 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5318 if (atiop == NULL) { 5319 mpt_lprt(mpt, MPT_PRT_WARN, 5320 "no ATIOs for lun %u- sending back %s\n", lun, 5321 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5322 mpt_scsi_tgt_status(mpt, NULL, req, 5323 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5324 NULL); 5325 return; 5326 } 5327 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5328 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5329 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); 5330 atiop->ccb_h.ccb_mpt_ptr = mpt; 5331 atiop->ccb_h.status = CAM_CDB_RECVD; 5332 atiop->ccb_h.target_lun = lun; 5333 atiop->sense_len = 0; 5334 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5335 atiop->cdb_len = mpt_cdblen(cdbp[0], 16); 5336 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5337 5338 /* 5339 * The tag we construct here allows us to find the 5340 * original request that the command came in with. 5341 * 5342 * This way we don't have to depend on anything but the 5343 * tag to find things when CCBs show back up from CAM. 5344 */ 5345 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5346 tgt->tag_id = atiop->tag_id; 5347 if (tag_action) { 5348 atiop->tag_action = tag_action; 5349 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; 5350 } 5351 if (mpt->verbose >= MPT_PRT_DEBUG) { 5352 int i; 5353 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, 5354 atiop->ccb_h.target_lun); 5355 for (i = 0; i < atiop->cdb_len; i++) { 5356 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5357 (i == (atiop->cdb_len - 1))? '>' : ' '); 5358 } 5359 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5360 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 5361 } 5362 5363 MPTLOCK_2_CAMLOCK(mpt); 5364 xpt_done((union ccb *)atiop); 5365 CAMLOCK_2_MPTLOCK(mpt); 5366 } 5367 5368 static void 5369 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5370 { 5371 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5372 5373 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5374 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 5375 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 5376 tgt->tag_id, tgt->state); 5377 } 5378 5379 static void 5380 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5381 { 5382 5383 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5384 req->index, req->index, req->state); 5385 mpt_tgt_dump_tgt_state(mpt, req); 5386 } 5387 5388 static int 5389 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5390 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5391 { 5392 int dbg; 5393 union ccb *ccb; 5394 U16 status; 5395 5396 if (reply_frame == NULL) { 5397 /* 5398 * Figure out what the state of the command is. 5399 */ 5400 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5401 5402 #ifdef INVARIANTS 5403 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5404 if (tgt->req) { 5405 mpt_req_not_spcl(mpt, tgt->req, 5406 "turbo scsi_tgt_reply associated req", __LINE__); 5407 } 5408 #endif 5409 switch(tgt->state) { 5410 case TGT_STATE_LOADED: 5411 /* 5412 * This is a new command starting. 5413 */ 5414 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5415 break; 5416 case TGT_STATE_MOVING_DATA: 5417 { 5418 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; 5419 5420 ccb = tgt->ccb; 5421 if (tgt->req == NULL) { 5422 panic("mpt: turbo target reply with null " 5423 "associated request moving data"); 5424 /* NOTREACHED */ 5425 } 5426 if (ccb == NULL) { 5427 if (tgt->is_local == 0) { 5428 panic("mpt: turbo target reply with " 5429 "null associated ccb moving data"); 5430 /* NOTREACHED */ 5431 } 5432 mpt_lprt(mpt, MPT_PRT_DEBUG, 5433 "TARGET_ASSIST local done\n"); 5434 TAILQ_REMOVE(&mpt->request_pending_list, 5435 tgt->req, links); 5436 mpt_free_request(mpt, tgt->req); 5437 tgt->req = NULL; 5438 mpt_scsi_tgt_status(mpt, NULL, req, 5439 0, NULL); 5440 return (TRUE); 5441 } 5442 tgt->ccb = NULL; 5443 tgt->nxfers++; 5444 mpt_req_untimeout(req, mpt_timeout, ccb); 5445 mpt_lprt(mpt, MPT_PRT_DEBUG, 5446 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5447 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5448 /* 5449 * Free the Target Assist Request 5450 */ 5451 KASSERT(tgt->req->ccb == ccb, 5452 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5453 tgt->req->serno, tgt->req->ccb)); 5454 TAILQ_REMOVE(&mpt->request_pending_list, 5455 tgt->req, links); 5456 mpt_free_request(mpt, tgt->req); 5457 tgt->req = NULL; 5458 5459 /* 5460 * Do we need to send status now? That is, are 5461 * we done with all our data transfers? 5462 */ 5463 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5464 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5465 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5466 KASSERT(ccb->ccb_h.status, 5467 ("zero ccb sts at %d", __LINE__)); 5468 tgt->state = TGT_STATE_IN_CAM; 5469 if (mpt->outofbeer) { 5470 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5471 mpt->outofbeer = 0; 5472 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5473 } 5474 MPTLOCK_2_CAMLOCK(mpt); 5475 xpt_done(ccb); 5476 CAMLOCK_2_MPTLOCK(mpt); 5477 break; 5478 } 5479 /* 5480 * Otherwise, send status (and sense) 5481 */ 5482 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5483 sp = sense; 5484 memcpy(sp, &ccb->csio.sense_data, 5485 min(ccb->csio.sense_len, MPT_SENSE_SIZE)); 5486 } 5487 mpt_scsi_tgt_status(mpt, ccb, req, 5488 ccb->csio.scsi_status, sp); 5489 break; 5490 } 5491 case TGT_STATE_SENDING_STATUS: 5492 case TGT_STATE_MOVING_DATA_AND_STATUS: 5493 { 5494 int ioindex; 5495 ccb = tgt->ccb; 5496 5497 if (tgt->req == NULL) { 5498 panic("mpt: turbo target reply with null " 5499 "associated request sending status"); 5500 /* NOTREACHED */ 5501 } 5502 5503 if (ccb) { 5504 tgt->ccb = NULL; 5505 if (tgt->state == 5506 TGT_STATE_MOVING_DATA_AND_STATUS) { 5507 tgt->nxfers++; 5508 } 5509 mpt_req_untimeout(req, mpt_timeout, ccb); 5510 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5511 ccb->ccb_h.status |= CAM_SENT_SENSE; 5512 } 5513 mpt_lprt(mpt, MPT_PRT_DEBUG, 5514 "TARGET_STATUS tag %x sts %x flgs %x req " 5515 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5516 ccb->ccb_h.flags, tgt->req); 5517 /* 5518 * Free the Target Send Status Request 5519 */ 5520 KASSERT(tgt->req->ccb == ccb, 5521 ("tgt->req %p:%u tgt->req->ccb %p", 5522 tgt->req, tgt->req->serno, tgt->req->ccb)); 5523 /* 5524 * Notify CAM that we're done 5525 */ 5526 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5527 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5528 KASSERT(ccb->ccb_h.status, 5529 ("ZERO ccb sts at %d", __LINE__)); 5530 tgt->ccb = NULL; 5531 } else { 5532 mpt_lprt(mpt, MPT_PRT_DEBUG, 5533 "TARGET_STATUS non-CAM for req %p:%u\n", 5534 tgt->req, tgt->req->serno); 5535 } 5536 TAILQ_REMOVE(&mpt->request_pending_list, 5537 tgt->req, links); 5538 mpt_free_request(mpt, tgt->req); 5539 tgt->req = NULL; 5540 5541 /* 5542 * And re-post the Command Buffer. 5543 * This will reset the state. 5544 */ 5545 ioindex = GET_IO_INDEX(reply_desc); 5546 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5547 tgt->is_local = 0; 5548 mpt_post_target_command(mpt, req, ioindex); 5549 5550 /* 5551 * And post a done for anyone who cares 5552 */ 5553 if (ccb) { 5554 if (mpt->outofbeer) { 5555 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5556 mpt->outofbeer = 0; 5557 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5558 } 5559 MPTLOCK_2_CAMLOCK(mpt); 5560 xpt_done(ccb); 5561 CAMLOCK_2_MPTLOCK(mpt); 5562 } 5563 break; 5564 } 5565 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5566 tgt->state = TGT_STATE_LOADED; 5567 break; 5568 default: 5569 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5570 "Reply Function\n", tgt->state); 5571 } 5572 return (TRUE); 5573 } 5574 5575 status = le16toh(reply_frame->IOCStatus); 5576 if (status != MPI_IOCSTATUS_SUCCESS) { 5577 dbg = MPT_PRT_ERROR; 5578 } else { 5579 dbg = MPT_PRT_DEBUG1; 5580 } 5581 5582 mpt_lprt(mpt, dbg, 5583 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5584 req, req->serno, reply_frame, reply_frame->Function, status); 5585 5586 switch (reply_frame->Function) { 5587 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5588 { 5589 mpt_tgt_state_t *tgt; 5590 #ifdef INVARIANTS 5591 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5592 #endif 5593 if (status != MPI_IOCSTATUS_SUCCESS) { 5594 /* 5595 * XXX What to do? 5596 */ 5597 break; 5598 } 5599 tgt = MPT_TGT_STATE(mpt, req); 5600 KASSERT(tgt->state == TGT_STATE_LOADING, 5601 ("bad state 0x%x on reply to buffer post", tgt->state)); 5602 mpt_assign_serno(mpt, req); 5603 tgt->state = TGT_STATE_LOADED; 5604 break; 5605 } 5606 case MPI_FUNCTION_TARGET_ASSIST: 5607 #ifdef INVARIANTS 5608 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5609 #endif 5610 mpt_prt(mpt, "target assist completion\n"); 5611 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5612 mpt_free_request(mpt, req); 5613 break; 5614 case MPI_FUNCTION_TARGET_STATUS_SEND: 5615 #ifdef INVARIANTS 5616 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5617 #endif 5618 mpt_prt(mpt, "status send completion\n"); 5619 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5620 mpt_free_request(mpt, req); 5621 break; 5622 case MPI_FUNCTION_TARGET_MODE_ABORT: 5623 { 5624 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5625 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5626 PTR_MSG_TARGET_MODE_ABORT abtp = 5627 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5628 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5629 #ifdef INVARIANTS 5630 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5631 #endif 5632 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5633 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5634 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5635 mpt_free_request(mpt, req); 5636 break; 5637 } 5638 default: 5639 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5640 "0x%x\n", reply_frame->Function); 5641 break; 5642 } 5643 return (TRUE); 5644 } 5645