1 /* 2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers 3 * 4 * Copyright (c) 1994-2001 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU Public License ("GPL"). 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $ 32 * 33 * $FreeBSD$ 34 */ 35 36 #include <dev/aic7xxx/aic7xxx_osm.h> 37 #include <dev/aic7xxx/aic7xxx_inline.h> 38 39 #ifndef AHC_TMODE_ENABLE 40 #define AHC_TMODE_ENABLE 0 41 #endif 42 43 #define ccb_scb_ptr spriv_ptr0 44 45 devclass_t ahc_devclass; 46 47 #if UNUSED 48 static void ahc_dump_targcmd(struct target_cmd *cmd); 49 #endif 50 static int ahc_modevent(module_t mod, int type, void *data); 51 static void ahc_action(struct cam_sim *sim, union ccb *ccb); 52 static void ahc_get_tran_settings(struct ahc_softc *ahc, 53 int our_id, char channel, 54 struct ccb_trans_settings *cts); 55 static void ahc_async(void *callback_arg, uint32_t code, 56 struct cam_path *path, void *arg); 57 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, 58 int nsegments, int error); 59 static void ahc_poll(struct cam_sim *sim); 60 static void ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim, 61 struct ccb_scsiio *csio, struct scb *scb); 62 static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, 63 union ccb *ccb); 64 static int ahc_create_path(struct ahc_softc *ahc, 65 char channel, u_int target, u_int lun, 66 struct cam_path **path); 67 68 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb); 69 70 static int 71 ahc_create_path(struct ahc_softc *ahc, char channel, u_int target, 72 u_int lun, struct cam_path **path) 73 { 74 path_id_t path_id; 75 76 if (channel == 'B') 77 path_id = cam_sim_path(ahc->platform_data->sim_b); 78 else 79 path_id = cam_sim_path(ahc->platform_data->sim); 80 81 return (xpt_create_path(path, /*periph*/NULL, 82 path_id, target, lun)); 83 } 84 85 int 86 ahc_map_int(struct ahc_softc *ahc) 87 { 88 int error; 89 90 /* Hook up our interrupt handler */ 91 error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq, 92 INTR_TYPE_CAM, ahc_platform_intr, ahc, 93 &ahc->platform_data->ih); 94 95 if (error != 0) 96 device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n", 97 error); 98 return (error); 99 } 100 101 /* 102 * Attach all the sub-devices we can find 103 */ 104 int 105 ahc_attach(struct ahc_softc *ahc) 106 { 107 char ahc_info[256]; 108 struct ccb_setasync csa; 109 struct cam_devq *devq; 110 int bus_id; 111 int bus_id2; 112 struct cam_sim *sim; 113 struct cam_sim *sim2; 114 struct cam_path *path; 115 struct cam_path *path2; 116 long s; 117 int count; 118 119 count = 0; 120 sim = NULL; 121 sim2 = NULL; 122 123 ahc_controller_info(ahc, ahc_info); 124 printf("%s\n", ahc_info); 125 ahc_lock(ahc, &s); 126 /* 127 * Attach secondary channel first if the user has 128 * declared it the primary channel. 129 */ 130 if ((ahc->features & AHC_TWIN) != 0 131 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) { 132 bus_id = 1; 133 bus_id2 = 0; 134 } else { 135 bus_id = 0; 136 bus_id2 = 1; 137 } 138 139 /* 140 * Create the device queue for our SIM(s). 141 */ 142 devq = cam_simq_alloc(AHC_MAX_QUEUE); 143 if (devq == NULL) 144 goto fail; 145 146 /* 147 * Construct our first channel SIM entry 148 */ 149 sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, 150 device_get_unit(ahc->dev_softc), 151 1, AHC_MAX_QUEUE, devq); 152 if (sim == NULL) { 153 cam_simq_free(devq); 154 goto fail; 155 } 156 157 if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) { 158 cam_sim_free(sim, /*free_devq*/TRUE); 159 sim = NULL; 160 goto fail; 161 } 162 163 if (xpt_create_path(&path, /*periph*/NULL, 164 cam_sim_path(sim), CAM_TARGET_WILDCARD, 165 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 166 xpt_bus_deregister(cam_sim_path(sim)); 167 cam_sim_free(sim, /*free_devq*/TRUE); 168 sim = NULL; 169 goto fail; 170 } 171 172 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 173 csa.ccb_h.func_code = XPT_SASYNC_CB; 174 csa.event_enable = AC_LOST_DEVICE; 175 csa.callback = ahc_async; 176 csa.callback_arg = sim; 177 xpt_action((union ccb *)&csa); 178 count++; 179 180 if (ahc->features & AHC_TWIN) { 181 sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc", 182 ahc, device_get_unit(ahc->dev_softc), 1, 183 AHC_MAX_QUEUE, devq); 184 185 if (sim2 == NULL) { 186 printf("ahc_attach: Unable to attach second " 187 "bus due to resource shortage"); 188 goto fail; 189 } 190 191 if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) { 192 printf("ahc_attach: Unable to attach second " 193 "bus due to resource shortage"); 194 /* 195 * We do not want to destroy the device queue 196 * because the first bus is using it. 197 */ 198 cam_sim_free(sim2, /*free_devq*/FALSE); 199 goto fail; 200 } 201 202 if (xpt_create_path(&path2, /*periph*/NULL, 203 cam_sim_path(sim2), 204 CAM_TARGET_WILDCARD, 205 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 206 xpt_bus_deregister(cam_sim_path(sim2)); 207 cam_sim_free(sim2, /*free_devq*/FALSE); 208 sim2 = NULL; 209 goto fail; 210 } 211 xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5); 212 csa.ccb_h.func_code = XPT_SASYNC_CB; 213 csa.event_enable = AC_LOST_DEVICE; 214 csa.callback = ahc_async; 215 csa.callback_arg = sim2; 216 xpt_action((union ccb *)&csa); 217 count++; 218 } 219 220 fail: 221 if ((ahc->features & AHC_TWIN) != 0 222 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) { 223 ahc->platform_data->sim_b = sim; 224 ahc->platform_data->path_b = path; 225 ahc->platform_data->sim = sim2; 226 ahc->platform_data->path = path2; 227 } else { 228 ahc->platform_data->sim = sim; 229 ahc->platform_data->path = path; 230 ahc->platform_data->sim_b = sim2; 231 ahc->platform_data->path_b = path2; 232 } 233 234 if (count != 0) { 235 /* We have to wait until after any system dumps... */ 236 ahc->platform_data->eh = 237 EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown, 238 ahc, SHUTDOWN_PRI_DEFAULT); 239 ahc_intr_enable(ahc, TRUE); 240 } 241 242 ahc_unlock(ahc, &s); 243 return (count); 244 } 245 246 /* 247 * Catch an interrupt from the adapter 248 */ 249 void 250 ahc_platform_intr(void *arg) 251 { 252 struct ahc_softc *ahc; 253 254 ahc = (struct ahc_softc *)arg; 255 ahc_intr(ahc); 256 } 257 258 /* 259 * We have an scb which has been processed by the 260 * adaptor, now we look to see how the operation 261 * went. 262 */ 263 void 264 ahc_done(struct ahc_softc *ahc, struct scb *scb) 265 { 266 union ccb *ccb; 267 268 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE, 269 ("ahc_done - scb %d\n", scb->hscb->tag)); 270 271 ccb = scb->io_ctx; 272 LIST_REMOVE(scb, pending_links); 273 if ((scb->flags & SCB_UNTAGGEDQ) != 0) { 274 struct scb_tailq *untagged_q; 275 int target_offset; 276 277 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 278 untagged_q = &ahc->untagged_queues[target_offset]; 279 TAILQ_REMOVE(untagged_q, scb, links.tqe); 280 scb->flags &= ~SCB_UNTAGGEDQ; 281 ahc_run_untagged_queue(ahc, untagged_q); 282 } 283 284 untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); 285 286 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 287 bus_dmasync_op_t op; 288 289 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 290 op = BUS_DMASYNC_POSTREAD; 291 else 292 op = BUS_DMASYNC_POSTWRITE; 293 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 294 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 295 } 296 297 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 298 struct cam_path *ccb_path; 299 300 /* 301 * If we have finally disconnected, clean up our 302 * pending device state. 303 * XXX - There may be error states that cause where 304 * we will remain connected. 305 */ 306 ccb_path = ccb->ccb_h.path; 307 if (ahc->pending_device != NULL 308 && xpt_path_comp(ahc->pending_device->path, ccb_path) == 0) { 309 310 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 311 ahc->pending_device = NULL; 312 } else { 313 if (bootverbose) { 314 xpt_print_path(ccb->ccb_h.path); 315 printf("Still connected\n"); 316 } 317 ahc_freeze_ccb(ccb); 318 } 319 } 320 321 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) 322 ccb->ccb_h.status |= CAM_REQ_CMP; 323 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 324 ahc_free_scb(ahc, scb); 325 xpt_done(ccb); 326 return; 327 } 328 329 /* 330 * If the recovery SCB completes, we have to be 331 * out of our timeout. 332 */ 333 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 334 struct scb *list_scb; 335 336 /* 337 * We were able to complete the command successfully, 338 * so reinstate the timeouts for all other pending 339 * commands. 340 */ 341 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { 342 union ccb *ccb; 343 uint64_t time; 344 345 ccb = list_scb->io_ctx; 346 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) 347 continue; 348 349 time = ccb->ccb_h.timeout; 350 time *= hz; 351 time /= 1000; 352 ccb->ccb_h.timeout_ch = 353 timeout(ahc_timeout, list_scb, time); 354 } 355 356 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT 357 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) 358 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 359 ahc_print_path(ahc, scb); 360 printf("no longer in timeout, status = %x\n", 361 ccb->ccb_h.status); 362 } 363 364 /* Don't clobber any existing error state */ 365 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) { 366 ccb->ccb_h.status |= CAM_REQ_CMP; 367 } else if ((scb->flags & SCB_SENSE) != 0) { 368 /* 369 * We performed autosense retrieval. 370 * 371 * Zero any sense not transferred by the 372 * device. The SCSI spec mandates that any 373 * untransfered data should be assumed to be 374 * zero. Complete the 'bounce' of sense information 375 * through buffers accessible via bus-space by 376 * copying it into the clients csio. 377 */ 378 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); 379 memcpy(&ccb->csio.sense_data, 380 ahc_get_sense_buf(ahc, scb), 381 (ahc_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK) 382 - ccb->csio.sense_resid); 383 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; 384 } 385 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 386 ahc_free_scb(ahc, scb); 387 xpt_done(ccb); 388 } 389 390 static void 391 ahc_action(struct cam_sim *sim, union ccb *ccb) 392 { 393 struct ahc_softc *ahc; 394 struct ahc_tmode_lstate *lstate; 395 u_int target_id; 396 u_int our_id; 397 long s; 398 399 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n")); 400 401 ahc = (struct ahc_softc *)cam_sim_softc(sim); 402 403 target_id = ccb->ccb_h.target_id; 404 our_id = SIM_SCSI_ID(ahc, sim); 405 406 switch (ccb->ccb_h.func_code) { 407 /* Common cases first */ 408 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 409 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ 410 { 411 struct ahc_tmode_tstate *tstate; 412 cam_status status; 413 414 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 415 &lstate, TRUE); 416 417 if (status != CAM_REQ_CMP) { 418 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 419 /* Response from the black hole device */ 420 tstate = NULL; 421 lstate = ahc->black_hole; 422 } else { 423 ccb->ccb_h.status = status; 424 xpt_done(ccb); 425 break; 426 } 427 } 428 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 429 430 ahc_lock(ahc, &s); 431 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 432 sim_links.sle); 433 ccb->ccb_h.status = CAM_REQ_INPROG; 434 if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0) 435 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 436 ahc_unlock(ahc, &s); 437 break; 438 } 439 440 /* 441 * The target_id represents the target we attempt to 442 * select. In target mode, this is the initiator of 443 * the original command. 444 */ 445 our_id = target_id; 446 target_id = ccb->csio.init_id; 447 /* FALLTHROUGH */ 448 } 449 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 450 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 451 { 452 struct scb *scb; 453 struct hardware_scb *hscb; 454 455 if ((ahc->flags & AHC_INITIATORROLE) == 0 456 && (ccb->ccb_h.func_code == XPT_SCSI_IO 457 || ccb->ccb_h.func_code == XPT_RESET_DEV)) { 458 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 459 xpt_done(ccb); 460 return; 461 } 462 463 /* 464 * get an scb to use. 465 */ 466 ahc_lock(ahc, &s); 467 if ((scb = ahc_get_scb(ahc)) == NULL) { 468 469 xpt_freeze_simq(sim, /*count*/1); 470 ahc->flags |= AHC_RESOURCE_SHORTAGE; 471 ahc_unlock(ahc, &s); 472 ccb->ccb_h.status = CAM_REQUEUE_REQ; 473 xpt_done(ccb); 474 return; 475 } 476 ahc_unlock(ahc, &s); 477 478 hscb = scb->hscb; 479 480 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, 481 ("start scb(%p)\n", scb)); 482 scb->io_ctx = ccb; 483 /* 484 * So we can find the SCB when an abort is requested 485 */ 486 ccb->ccb_h.ccb_scb_ptr = scb; 487 488 /* 489 * Put all the arguments for the xfer in the scb 490 */ 491 hscb->control = 0; 492 hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id); 493 hscb->lun = ccb->ccb_h.target_lun; 494 if (ccb->ccb_h.func_code == XPT_RESET_DEV) { 495 hscb->cdb_len = 0; 496 scb->flags |= SCB_DEVICE_RESET; 497 hscb->control |= MK_MESSAGE; 498 ahc_execute_scb(scb, NULL, 0, 0); 499 } else { 500 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 501 struct target_data *tdata; 502 503 tdata = &hscb->shared_data.tdata; 504 if (ahc->pending_device == lstate) 505 scb->flags |= SCB_TARGET_IMMEDIATE; 506 hscb->control |= TARGET_SCB; 507 scb->flags |= SCB_TARGET_SCB; 508 tdata->target_phases = 0; 509 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 510 tdata->target_phases |= SPHASE_PENDING; 511 tdata->scsi_status = 512 ccb->csio.scsi_status; 513 } 514 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) 515 tdata->target_phases |= NO_DISCONNECT; 516 517 tdata->initiator_tag = ccb->csio.tag_id; 518 } 519 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) 520 hscb->control |= ccb->csio.tag_action; 521 522 ahc_setup_data(ahc, sim, &ccb->csio, scb); 523 } 524 break; 525 } 526 case XPT_NOTIFY_ACK: 527 case XPT_IMMED_NOTIFY: 528 { 529 struct ahc_tmode_tstate *tstate; 530 struct ahc_tmode_lstate *lstate; 531 cam_status status; 532 533 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 534 &lstate, TRUE); 535 536 if (status != CAM_REQ_CMP) { 537 ccb->ccb_h.status = status; 538 xpt_done(ccb); 539 break; 540 } 541 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 542 sim_links.sle); 543 ccb->ccb_h.status = CAM_REQ_INPROG; 544 ahc_send_lstate_events(ahc, lstate); 545 break; 546 } 547 case XPT_EN_LUN: /* Enable LUN as a target */ 548 ahc_handle_en_lun(ahc, sim, ccb); 549 xpt_done(ccb); 550 break; 551 case XPT_ABORT: /* Abort the specified CCB */ 552 { 553 ahc_abort_ccb(ahc, sim, ccb); 554 break; 555 } 556 case XPT_SET_TRAN_SETTINGS: 557 { 558 #ifdef AHC_NEW_TRAN_SETTINGS 559 struct ahc_devinfo devinfo; 560 struct ccb_trans_settings *cts; 561 struct ccb_trans_settings_scsi *scsi; 562 struct ccb_trans_settings_spi *spi; 563 struct ahc_initiator_tinfo *tinfo; 564 struct ahc_tmode_tstate *tstate; 565 uint16_t *discenable; 566 uint16_t *tagenable; 567 u_int update_type; 568 569 cts = &ccb->cts; 570 scsi = &cts->proto_specific.scsi; 571 spi = &cts->xport_specific.spi; 572 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 573 cts->ccb_h.target_id, 574 cts->ccb_h.target_lun, 575 SIM_CHANNEL(ahc, sim), 576 ROLE_UNKNOWN); 577 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 578 devinfo.our_scsiid, 579 devinfo.target, &tstate); 580 update_type = 0; 581 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 582 update_type |= AHC_TRANS_GOAL; 583 discenable = &tstate->discenable; 584 tagenable = &tstate->tagenable; 585 tinfo->curr.protocol_version = 586 cts->protocol_version; 587 tinfo->curr.transport_version = 588 cts->transport_version; 589 tinfo->goal.protocol_version = 590 cts->protocol_version; 591 tinfo->goal.transport_version = 592 cts->transport_version; 593 } else if (cts->type == CTS_TYPE_USER_SETTINGS) { 594 update_type |= AHC_TRANS_USER; 595 discenable = &ahc->user_discenable; 596 tagenable = &ahc->user_tagenable; 597 tinfo->user.protocol_version = 598 cts->protocol_version; 599 tinfo->user.transport_version = 600 cts->transport_version; 601 } else { 602 ccb->ccb_h.status = CAM_REQ_INVALID; 603 xpt_done(ccb); 604 break; 605 } 606 607 ahc_lock(ahc, &s); 608 609 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 610 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 611 *discenable |= devinfo.target_mask; 612 else 613 *discenable &= ~devinfo.target_mask; 614 } 615 616 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 617 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 618 *tagenable |= devinfo.target_mask; 619 else 620 *tagenable &= ~devinfo.target_mask; 621 } 622 623 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 624 ahc_validate_width(ahc, /*tinfo limit*/NULL, 625 &spi->bus_width, ROLE_UNKNOWN); 626 ahc_set_width(ahc, &devinfo, spi->bus_width, 627 update_type, /*paused*/FALSE); 628 } 629 630 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) { 631 if (update_type == AHC_TRANS_USER) 632 spi->ppr_options = tinfo->user.ppr_options; 633 else 634 spi->ppr_options = tinfo->goal.ppr_options; 635 } 636 637 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { 638 if (update_type == AHC_TRANS_USER) 639 spi->sync_offset = tinfo->user.offset; 640 else 641 spi->sync_offset = tinfo->goal.offset; 642 } 643 644 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { 645 if (update_type == AHC_TRANS_USER) 646 spi->sync_period = tinfo->user.period; 647 else 648 spi->sync_period = tinfo->goal.period; 649 } 650 651 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 652 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 653 struct ahc_syncrate *syncrate; 654 u_int maxsync; 655 656 if ((ahc->features & AHC_ULTRA2) != 0) 657 maxsync = AHC_SYNCRATE_DT; 658 else if ((ahc->features & AHC_ULTRA) != 0) 659 maxsync = AHC_SYNCRATE_ULTRA; 660 else 661 maxsync = AHC_SYNCRATE_FAST; 662 663 if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT) 664 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; 665 666 syncrate = ahc_find_syncrate(ahc, &spi->sync_period, 667 &spi->ppr_options, 668 maxsync); 669 ahc_validate_offset(ahc, /*tinfo limit*/NULL, 670 syncrate, &spi->sync_offset, 671 spi->bus_width, ROLE_UNKNOWN); 672 673 /* We use a period of 0 to represent async */ 674 if (spi->sync_offset == 0) { 675 spi->sync_period = 0; 676 spi->ppr_options = 0; 677 } 678 679 ahc_set_syncrate(ahc, &devinfo, syncrate, 680 spi->sync_period, spi->sync_offset, 681 spi->ppr_options, update_type, 682 /*paused*/FALSE); 683 } 684 ahc_unlock(ahc, &s); 685 ccb->ccb_h.status = CAM_REQ_CMP; 686 xpt_done(ccb); 687 #else 688 struct ahc_devinfo devinfo; 689 struct ccb_trans_settings *cts; 690 struct ahc_initiator_tinfo *tinfo; 691 struct ahc_tmode_tstate *tstate; 692 uint16_t *discenable; 693 uint16_t *tagenable; 694 u_int update_type; 695 long s; 696 697 cts = &ccb->cts; 698 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 699 cts->ccb_h.target_id, 700 cts->ccb_h.target_lun, 701 SIM_CHANNEL(ahc, sim), 702 ROLE_UNKNOWN); 703 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 704 devinfo.our_scsiid, 705 devinfo.target, &tstate); 706 update_type = 0; 707 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 708 update_type |= AHC_TRANS_GOAL; 709 discenable = &tstate->discenable; 710 tagenable = &tstate->tagenable; 711 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 712 update_type |= AHC_TRANS_USER; 713 discenable = &ahc->user_discenable; 714 tagenable = &ahc->user_tagenable; 715 } else { 716 ccb->ccb_h.status = CAM_REQ_INVALID; 717 xpt_done(ccb); 718 break; 719 } 720 721 ahc_lock(ahc, &s); 722 723 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 724 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 725 *discenable |= devinfo.target_mask; 726 else 727 *discenable &= ~devinfo.target_mask; 728 } 729 730 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 731 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 732 *tagenable |= devinfo.target_mask; 733 else 734 *tagenable &= ~devinfo.target_mask; 735 } 736 737 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 738 ahc_validate_width(ahc, /*tinfo limit*/NULL, 739 &cts->bus_width, ROLE_UNKNOWN); 740 ahc_set_width(ahc, &devinfo, cts->bus_width, 741 update_type, /*paused*/FALSE); 742 } 743 744 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { 745 if (update_type == AHC_TRANS_USER) 746 cts->sync_offset = tinfo->user.offset; 747 else 748 cts->sync_offset = tinfo->goal.offset; 749 } 750 751 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { 752 if (update_type == AHC_TRANS_USER) 753 cts->sync_period = tinfo->user.period; 754 else 755 cts->sync_period = tinfo->goal.period; 756 } 757 758 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 759 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 760 struct ahc_syncrate *syncrate; 761 u_int ppr_options; 762 u_int maxsync; 763 764 if ((ahc->features & AHC_ULTRA2) != 0) 765 maxsync = AHC_SYNCRATE_DT; 766 else if ((ahc->features & AHC_ULTRA) != 0) 767 maxsync = AHC_SYNCRATE_ULTRA; 768 else 769 maxsync = AHC_SYNCRATE_FAST; 770 771 ppr_options = 0; 772 if (cts->sync_period <= 9 773 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 774 ppr_options = MSG_EXT_PPR_DT_REQ; 775 776 syncrate = ahc_find_syncrate(ahc, &cts->sync_period, 777 &ppr_options, 778 maxsync); 779 ahc_validate_offset(ahc, /*tinfo limit*/NULL, 780 syncrate, &cts->sync_offset, 781 MSG_EXT_WDTR_BUS_8_BIT, 782 ROLE_UNKNOWN); 783 784 /* We use a period of 0 to represent async */ 785 if (cts->sync_offset == 0) { 786 cts->sync_period = 0; 787 ppr_options = 0; 788 } 789 790 if (ppr_options == MSG_EXT_PPR_DT_REQ 791 && tinfo->user.transport_version >= 3) { 792 tinfo->goal.transport_version = 793 tinfo->user.transport_version; 794 tinfo->curr.transport_version = 795 tinfo->user.transport_version; 796 } 797 798 ahc_set_syncrate(ahc, &devinfo, syncrate, 799 cts->sync_period, cts->sync_offset, 800 ppr_options, update_type, 801 /*paused*/FALSE); 802 } 803 ahc_unlock(ahc, &s); 804 ccb->ccb_h.status = CAM_REQ_CMP; 805 xpt_done(ccb); 806 #endif 807 break; 808 } 809 case XPT_GET_TRAN_SETTINGS: 810 /* Get default/user set transfer settings for the target */ 811 { 812 813 ahc_lock(ahc, &s); 814 ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim), 815 SIM_CHANNEL(ahc, sim), &ccb->cts); 816 ahc_unlock(ahc, &s); 817 xpt_done(ccb); 818 break; 819 } 820 case XPT_CALC_GEOMETRY: 821 { 822 struct ccb_calc_geometry *ccg; 823 uint32_t size_mb; 824 uint32_t secs_per_cylinder; 825 int extended; 826 827 ccg = &ccb->ccg; 828 size_mb = ccg->volume_size 829 / ((1024L * 1024L) / ccg->block_size); 830 extended = SIM_IS_SCSIBUS_B(ahc, sim) 831 ? ahc->flags & AHC_EXTENDED_TRANS_B 832 : ahc->flags & AHC_EXTENDED_TRANS_A; 833 834 if (size_mb > 1024 && extended) { 835 ccg->heads = 255; 836 ccg->secs_per_track = 63; 837 } else { 838 ccg->heads = 64; 839 ccg->secs_per_track = 32; 840 } 841 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 842 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 843 ccb->ccb_h.status = CAM_REQ_CMP; 844 xpt_done(ccb); 845 break; 846 } 847 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 848 { 849 int found; 850 851 ahc_lock(ahc, &s); 852 found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim), 853 /*initiate reset*/TRUE); 854 ahc_unlock(ahc, &s); 855 if (bootverbose) { 856 xpt_print_path(SIM_PATH(ahc, sim)); 857 printf("SCSI bus reset delivered. " 858 "%d SCBs aborted.\n", found); 859 } 860 ccb->ccb_h.status = CAM_REQ_CMP; 861 xpt_done(ccb); 862 break; 863 } 864 case XPT_TERM_IO: /* Terminate the I/O process */ 865 /* XXX Implement */ 866 ccb->ccb_h.status = CAM_REQ_INVALID; 867 xpt_done(ccb); 868 break; 869 case XPT_PATH_INQ: /* Path routing inquiry */ 870 { 871 struct ccb_pathinq *cpi = &ccb->cpi; 872 873 cpi->version_num = 1; /* XXX??? */ 874 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 875 if ((ahc->features & AHC_WIDE) != 0) 876 cpi->hba_inquiry |= PI_WIDE_16; 877 if ((ahc->features & AHC_TARGETMODE) != 0) { 878 cpi->target_sprt = PIT_PROCESSOR 879 | PIT_DISCONNECT 880 | PIT_TERM_IO; 881 } else { 882 cpi->target_sprt = 0; 883 } 884 cpi->hba_misc = 0; 885 cpi->hba_eng_cnt = 0; 886 cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7; 887 cpi->max_lun = AHC_NUM_LUNS - 1; 888 if (SIM_IS_SCSIBUS_B(ahc, sim)) { 889 cpi->initiator_id = ahc->our_id_b; 890 if ((ahc->flags & AHC_RESET_BUS_B) == 0) 891 cpi->hba_misc |= PIM_NOBUSRESET; 892 } else { 893 cpi->initiator_id = ahc->our_id; 894 if ((ahc->flags & AHC_RESET_BUS_A) == 0) 895 cpi->hba_misc |= PIM_NOBUSRESET; 896 } 897 cpi->bus_id = cam_sim_bus(sim); 898 cpi->base_transfer_speed = 3300; 899 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 900 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 901 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 902 cpi->unit_number = cam_sim_unit(sim); 903 #ifdef AHC_NEW_TRAN_SETTINGS 904 cpi->protocol = PROTO_SCSI; 905 cpi->protocol_version = SCSI_REV_2; 906 cpi->transport = XPORT_SPI; 907 cpi->transport_version = 2; 908 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; 909 if ((ahc->features & AHC_DT) != 0) { 910 cpi->transport_version = 3; 911 cpi->xport_specific.spi.ppr_options = 912 SID_SPI_CLOCK_DT_ST; 913 } 914 #endif 915 cpi->ccb_h.status = CAM_REQ_CMP; 916 xpt_done(ccb); 917 break; 918 } 919 default: 920 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 921 xpt_done(ccb); 922 break; 923 } 924 } 925 926 static void 927 ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel, 928 struct ccb_trans_settings *cts) 929 { 930 #ifdef AHC_NEW_TRAN_SETTINGS 931 struct ahc_devinfo devinfo; 932 struct ccb_trans_settings_scsi *scsi; 933 struct ccb_trans_settings_spi *spi; 934 struct ahc_initiator_tinfo *targ_info; 935 struct ahc_tmode_tstate *tstate; 936 struct ahc_transinfo *tinfo; 937 938 scsi = &cts->proto_specific.scsi; 939 spi = &cts->xport_specific.spi; 940 ahc_compile_devinfo(&devinfo, our_id, 941 cts->ccb_h.target_id, 942 cts->ccb_h.target_lun, 943 channel, ROLE_UNKNOWN); 944 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 945 devinfo.our_scsiid, 946 devinfo.target, &tstate); 947 948 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 949 tinfo = &targ_info->curr; 950 else 951 tinfo = &targ_info->user; 952 953 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 954 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 955 if (cts->type == CTS_TYPE_USER_SETTINGS) { 956 if ((ahc->user_discenable & devinfo.target_mask) != 0) 957 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 958 959 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 960 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 961 } else { 962 if ((tstate->discenable & devinfo.target_mask) != 0) 963 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 964 965 if ((tstate->tagenable & devinfo.target_mask) != 0) 966 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 967 } 968 cts->protocol_version = tinfo->protocol_version; 969 cts->transport_version = tinfo->transport_version; 970 971 spi->sync_period = tinfo->period; 972 spi->sync_offset = tinfo->offset; 973 spi->bus_width = tinfo->width; 974 spi->ppr_options = tinfo->ppr_options; 975 976 cts->protocol = PROTO_SCSI; 977 cts->transport = XPORT_SPI; 978 spi->valid = CTS_SPI_VALID_SYNC_RATE 979 | CTS_SPI_VALID_SYNC_OFFSET 980 | CTS_SPI_VALID_BUS_WIDTH 981 | CTS_SPI_VALID_PPR_OPTIONS; 982 983 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 984 scsi->valid = CTS_SCSI_VALID_TQ; 985 spi->valid |= CTS_SPI_VALID_DISC; 986 } else { 987 scsi->valid = 0; 988 } 989 990 cts->ccb_h.status = CAM_REQ_CMP; 991 #else 992 struct ahc_devinfo devinfo; 993 struct ahc_initiator_tinfo *targ_info; 994 struct ahc_tmode_tstate *tstate; 995 struct ahc_transinfo *tinfo; 996 997 ahc_compile_devinfo(&devinfo, our_id, 998 cts->ccb_h.target_id, 999 cts->ccb_h.target_lun, 1000 channel, ROLE_UNKNOWN); 1001 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 1002 devinfo.our_scsiid, 1003 devinfo.target, &tstate); 1004 1005 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 1006 tinfo = &targ_info->curr; 1007 else 1008 tinfo = &targ_info->user; 1009 1010 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 1011 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) { 1012 if ((ahc->user_discenable & devinfo.target_mask) != 0) 1013 cts->flags |= CCB_TRANS_DISC_ENB; 1014 1015 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 1016 cts->flags |= CCB_TRANS_TAG_ENB; 1017 } else { 1018 if ((tstate->discenable & devinfo.target_mask) != 0) 1019 cts->flags |= CCB_TRANS_DISC_ENB; 1020 1021 if ((tstate->tagenable & devinfo.target_mask) != 0) 1022 cts->flags |= CCB_TRANS_TAG_ENB; 1023 } 1024 cts->sync_period = tinfo->period; 1025 cts->sync_offset = tinfo->offset; 1026 cts->bus_width = tinfo->width; 1027 1028 cts->valid = CCB_TRANS_SYNC_RATE_VALID 1029 | CCB_TRANS_SYNC_OFFSET_VALID 1030 | CCB_TRANS_BUS_WIDTH_VALID; 1031 1032 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) 1033 cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID; 1034 1035 cts->ccb_h.status = CAM_REQ_CMP; 1036 #endif 1037 } 1038 1039 static void 1040 ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 1041 { 1042 struct ahc_softc *ahc; 1043 struct cam_sim *sim; 1044 1045 sim = (struct cam_sim *)callback_arg; 1046 ahc = (struct ahc_softc *)cam_sim_softc(sim); 1047 switch (code) { 1048 case AC_LOST_DEVICE: 1049 { 1050 struct ahc_devinfo devinfo; 1051 long s; 1052 1053 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 1054 xpt_path_target_id(path), 1055 xpt_path_lun_id(path), 1056 SIM_CHANNEL(ahc, sim), 1057 ROLE_UNKNOWN); 1058 1059 /* 1060 * Revert to async/narrow transfers 1061 * for the next device. 1062 */ 1063 ahc_lock(ahc, &s); 1064 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 1065 AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE); 1066 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 1067 /*period*/0, /*offset*/0, /*ppr_options*/0, 1068 AHC_TRANS_GOAL|AHC_TRANS_CUR, 1069 /*paused*/FALSE); 1070 ahc_unlock(ahc, &s); 1071 break; 1072 } 1073 default: 1074 break; 1075 } 1076 } 1077 1078 static void 1079 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, 1080 int error) 1081 { 1082 struct scb *scb; 1083 union ccb *ccb; 1084 struct ahc_softc *ahc; 1085 struct ahc_initiator_tinfo *tinfo; 1086 struct ahc_tmode_tstate *tstate; 1087 u_int mask; 1088 long s; 1089 1090 scb = (struct scb *)arg; 1091 ccb = scb->io_ctx; 1092 ahc = scb->ahc_softc; 1093 1094 if (error != 0) { 1095 if (error == EFBIG) 1096 ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG); 1097 else 1098 ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR); 1099 if (nsegments != 0) 1100 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 1101 ahc_lock(ahc, &s); 1102 ahc_free_scb(ahc, scb); 1103 ahc_unlock(ahc, &s); 1104 xpt_done(ccb); 1105 return; 1106 } 1107 if (nsegments != 0) { 1108 struct ahc_dma_seg *sg; 1109 bus_dma_segment_t *end_seg; 1110 bus_dmasync_op_t op; 1111 1112 end_seg = dm_segs + nsegments; 1113 1114 /* Copy the segments into our SG list */ 1115 sg = scb->sg_list; 1116 while (dm_segs < end_seg) { 1117 uint32_t len; 1118 1119 sg->addr = ahc_htole32(dm_segs->ds_addr); 1120 len = dm_segs->ds_len 1121 | ((dm_segs->ds_addr >> 8) & 0x7F000000); 1122 sg->len = ahc_htole32(len); 1123 sg++; 1124 dm_segs++; 1125 } 1126 1127 /* 1128 * Note where to find the SG entries in bus space. 1129 * We also set the full residual flag which the 1130 * sequencer will clear as soon as a data transfer 1131 * occurs. 1132 */ 1133 scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID); 1134 1135 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1136 op = BUS_DMASYNC_PREREAD; 1137 else 1138 op = BUS_DMASYNC_PREWRITE; 1139 1140 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 1141 1142 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1143 struct target_data *tdata; 1144 1145 tdata = &scb->hscb->shared_data.tdata; 1146 tdata->target_phases |= DPHASE_PENDING; 1147 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1148 tdata->data_phase = P_DATAOUT; 1149 else 1150 tdata->data_phase = P_DATAIN; 1151 1152 /* 1153 * If the transfer is of an odd length and in the 1154 * "in" direction (scsi->HostBus), then it may 1155 * trigger a bug in the 'WideODD' feature of 1156 * non-Ultra2 chips. Force the total data-length 1157 * to be even by adding an extra, 1 byte, SG, 1158 * element. We do this even if we are not currently 1159 * negotiated wide as negotiation could occur before 1160 * this command is executed. 1161 */ 1162 if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0 1163 && (ccb->csio.dxfer_len & 0x1) != 0 1164 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1165 1166 nsegments++; 1167 if (nsegments > AHC_NSEG) { 1168 1169 ahc_set_transaction_status(scb, 1170 CAM_REQ_TOO_BIG); 1171 bus_dmamap_unload(ahc->buffer_dmat, 1172 scb->dmamap); 1173 ahc_lock(ahc, &s); 1174 ahc_free_scb(ahc, scb); 1175 ahc_unlock(ahc, &s); 1176 xpt_done(ccb); 1177 return; 1178 } 1179 sg->addr = ahc_htole32(ahc->dma_bug_buf); 1180 sg->len = ahc_htole32(1); 1181 sg++; 1182 } 1183 } 1184 sg--; 1185 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); 1186 1187 /* Copy the first SG into the "current" data pointer area */ 1188 scb->hscb->dataptr = scb->sg_list->addr; 1189 scb->hscb->datacnt = scb->sg_list->len; 1190 } else { 1191 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); 1192 scb->hscb->dataptr = 0; 1193 scb->hscb->datacnt = 0; 1194 } 1195 1196 scb->sg_count = nsegments; 1197 1198 ahc_lock(ahc, &s); 1199 1200 /* 1201 * Last time we need to check if this SCB needs to 1202 * be aborted. 1203 */ 1204 if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) { 1205 if (nsegments != 0) 1206 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 1207 ahc_free_scb(ahc, scb); 1208 ahc_unlock(ahc, &s); 1209 xpt_done(ccb); 1210 return; 1211 } 1212 1213 tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid), 1214 SCSIID_OUR_ID(scb->hscb->scsiid), 1215 SCSIID_TARGET(ahc, scb->hscb->scsiid), 1216 &tstate); 1217 1218 mask = SCB_GET_TARGET_MASK(ahc, scb); 1219 scb->hscb->scsirate = tinfo->scsirate; 1220 scb->hscb->scsioffset = tinfo->curr.offset; 1221 if ((tstate->ultraenb & mask) != 0) 1222 scb->hscb->control |= ULTRAENB; 1223 1224 if ((tstate->discenable & mask) != 0 1225 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 1226 scb->hscb->control |= DISCENB; 1227 1228 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0 1229 && (tinfo->goal.width != 0 1230 || tinfo->goal.offset != 0 1231 || tinfo->goal.ppr_options != 0)) { 1232 scb->flags |= SCB_NEGOTIATE; 1233 scb->hscb->control |= MK_MESSAGE; 1234 } else if ((tstate->auto_negotiate & mask) != 0) { 1235 scb->flags |= SCB_AUTO_NEGOTIATE; 1236 scb->hscb->control |= MK_MESSAGE; 1237 } 1238 1239 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); 1240 1241 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1242 1243 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1244 uint64_t time; 1245 1246 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1247 ccb->ccb_h.timeout = 5 * 1000; 1248 1249 time = ccb->ccb_h.timeout; 1250 time *= hz; 1251 time /= 1000; 1252 ccb->ccb_h.timeout_ch = 1253 timeout(ahc_timeout, (caddr_t)scb, time); 1254 } 1255 1256 /* 1257 * We only allow one untagged transaction 1258 * per target in the initiator role unless 1259 * we are storing a full busy target *lun* 1260 * table in SCB space. 1261 */ 1262 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 1263 && (ahc->flags & AHC_SCB_BTT) == 0) { 1264 struct scb_tailq *untagged_q; 1265 int target_offset; 1266 1267 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 1268 untagged_q = &(ahc->untagged_queues[target_offset]); 1269 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); 1270 scb->flags |= SCB_UNTAGGEDQ; 1271 if (TAILQ_FIRST(untagged_q) != scb) { 1272 ahc_unlock(ahc, &s); 1273 return; 1274 } 1275 } 1276 scb->flags |= SCB_ACTIVE; 1277 1278 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 1279 /* Define a mapping from our tag to the SCB. */ 1280 ahc->scb_data->scbindex[scb->hscb->tag] = scb; 1281 ahc_pause(ahc); 1282 if ((ahc->flags & AHC_PAGESCBS) == 0) 1283 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 1284 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag); 1285 ahc_unpause(ahc); 1286 } else { 1287 ahc_queue_scb(ahc, scb); 1288 } 1289 1290 ahc_unlock(ahc, &s); 1291 } 1292 1293 static void 1294 ahc_poll(struct cam_sim *sim) 1295 { 1296 struct ahc_softc *ahc; 1297 1298 ahc = (struct ahc_softc *)cam_sim_softc(sim); 1299 ahc_intr(ahc); 1300 } 1301 1302 static void 1303 ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim, 1304 struct ccb_scsiio *csio, struct scb *scb) 1305 { 1306 struct hardware_scb *hscb; 1307 struct ccb_hdr *ccb_h; 1308 1309 hscb = scb->hscb; 1310 ccb_h = &csio->ccb_h; 1311 1312 csio->resid = 0; 1313 csio->sense_resid = 0; 1314 if (ccb_h->func_code == XPT_SCSI_IO) { 1315 hscb->cdb_len = csio->cdb_len; 1316 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 1317 1318 if (hscb->cdb_len > sizeof(hscb->cdb32) 1319 || (ccb_h->flags & CAM_CDB_PHYS) != 0) { 1320 u_long s; 1321 1322 ahc_set_transaction_status(scb, 1323 CAM_REQ_INVALID); 1324 ahc_lock(ahc, &s); 1325 ahc_free_scb(ahc, scb); 1326 ahc_unlock(ahc, &s); 1327 xpt_done((union ccb *)csio); 1328 return; 1329 } 1330 if (hscb->cdb_len > 12) { 1331 memcpy(hscb->cdb32, 1332 csio->cdb_io.cdb_ptr, 1333 hscb->cdb_len); 1334 scb->flags |= SCB_CDB32_PTR; 1335 } else { 1336 memcpy(hscb->shared_data.cdb, 1337 csio->cdb_io.cdb_ptr, 1338 hscb->cdb_len); 1339 } 1340 } else { 1341 if (hscb->cdb_len > 12) { 1342 memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes, 1343 hscb->cdb_len); 1344 scb->flags |= SCB_CDB32_PTR; 1345 } else { 1346 memcpy(hscb->shared_data.cdb, 1347 csio->cdb_io.cdb_bytes, 1348 hscb->cdb_len); 1349 } 1350 } 1351 } 1352 1353 /* Only use S/G if there is a transfer */ 1354 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1355 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 1356 /* We've been given a pointer to a single buffer */ 1357 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 1358 int s; 1359 int error; 1360 1361 s = splsoftvm(); 1362 error = bus_dmamap_load(ahc->buffer_dmat, 1363 scb->dmamap, 1364 csio->data_ptr, 1365 csio->dxfer_len, 1366 ahc_execute_scb, 1367 scb, /*flags*/0); 1368 if (error == EINPROGRESS) { 1369 /* 1370 * So as to maintain ordering, 1371 * freeze the controller queue 1372 * until our mapping is 1373 * returned. 1374 */ 1375 xpt_freeze_simq(sim, 1376 /*count*/1); 1377 scb->io_ctx->ccb_h.status |= 1378 CAM_RELEASE_SIMQ; 1379 } 1380 splx(s); 1381 } else { 1382 struct bus_dma_segment seg; 1383 1384 /* Pointer to physical buffer */ 1385 if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE) 1386 panic("ahc_setup_data - Transfer size " 1387 "larger than can device max"); 1388 1389 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1390 seg.ds_len = csio->dxfer_len; 1391 ahc_execute_scb(scb, &seg, 1, 0); 1392 } 1393 } else { 1394 struct bus_dma_segment *segs; 1395 1396 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 1397 panic("ahc_setup_data - Physical segment " 1398 "pointers unsupported"); 1399 1400 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 1401 panic("ahc_setup_data - Virtual segment " 1402 "addresses unsupported"); 1403 1404 /* Just use the segments provided */ 1405 segs = (struct bus_dma_segment *)csio->data_ptr; 1406 ahc_execute_scb(scb, segs, csio->sglist_cnt, 0); 1407 } 1408 } else { 1409 ahc_execute_scb(scb, NULL, 0, 0); 1410 } 1411 } 1412 1413 static void 1414 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) { 1415 1416 if ((scb->flags & SCB_RECOVERY_SCB) == 0) { 1417 struct scb *list_scb; 1418 1419 scb->flags |= SCB_RECOVERY_SCB; 1420 1421 /* 1422 * Take all queued, but not sent SCBs out of the equation. 1423 * Also ensure that no new CCBs are queued to us while we 1424 * try to fix this problem. 1425 */ 1426 if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 1427 xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1); 1428 scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; 1429 } 1430 1431 /* 1432 * Go through all of our pending SCBs and remove 1433 * any scheduled timeouts for them. We will reschedule 1434 * them after we've successfully fixed this problem. 1435 */ 1436 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { 1437 union ccb *ccb; 1438 1439 ccb = list_scb->io_ctx; 1440 untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch); 1441 } 1442 } 1443 } 1444 1445 void 1446 ahc_timeout(void *arg) 1447 { 1448 struct scb *scb; 1449 struct ahc_softc *ahc; 1450 long s; 1451 int found; 1452 u_int last_phase; 1453 int target; 1454 int lun; 1455 int i; 1456 char channel; 1457 1458 scb = (struct scb *)arg; 1459 ahc = (struct ahc_softc *)scb->ahc_softc; 1460 1461 ahc_lock(ahc, &s); 1462 1463 ahc_pause_and_flushwork(ahc); 1464 1465 if ((scb->flags & SCB_ACTIVE) == 0) { 1466 /* Previous timeout took care of me already */ 1467 printf("%s: Timedout SCB already complete. " 1468 "Interrupts may not be functioning.\n", ahc_name(ahc)); 1469 ahc_unpause(ahc); 1470 ahc_unlock(ahc, &s); 1471 return; 1472 } 1473 1474 target = SCB_GET_TARGET(ahc, scb); 1475 channel = SCB_GET_CHANNEL(ahc, scb); 1476 lun = SCB_GET_LUN(scb); 1477 1478 ahc_print_path(ahc, scb); 1479 printf("SCB 0x%x - timed out\n", scb->hscb->tag); 1480 ahc_dump_card_state(ahc); 1481 last_phase = ahc_inb(ahc, LASTPHASE); 1482 if (scb->sg_count > 0) { 1483 for (i = 0; i < scb->sg_count; i++) { 1484 printf("sg[%d] - Addr 0x%x : Length %d\n", 1485 i, 1486 scb->sg_list[i].addr, 1487 scb->sg_list[i].len & AHC_SG_LEN_MASK); 1488 } 1489 } 1490 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { 1491 /* 1492 * Been down this road before. 1493 * Do a full bus reset. 1494 */ 1495 bus_reset: 1496 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 1497 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE); 1498 printf("%s: Issued Channel %c Bus Reset. " 1499 "%d SCBs aborted\n", ahc_name(ahc), channel, found); 1500 } else { 1501 /* 1502 * If we are a target, transition to bus free and report 1503 * the timeout. 1504 * 1505 * The target/initiator that is holding up the bus may not 1506 * be the same as the one that triggered this timeout 1507 * (different commands have different timeout lengths). 1508 * If the bus is idle and we are actiing as the initiator 1509 * for this request, queue a BDR message to the timed out 1510 * target. Otherwise, if the timed out transaction is 1511 * active: 1512 * Initiator transaction: 1513 * Stuff the message buffer with a BDR message and assert 1514 * ATN in the hopes that the target will let go of the bus 1515 * and go to the mesgout phase. If this fails, we'll 1516 * get another timeout 2 seconds later which will attempt 1517 * a bus reset. 1518 * 1519 * Target transaction: 1520 * Transition to BUS FREE and report the error. 1521 * It's good to be the target! 1522 */ 1523 u_int active_scb_index; 1524 u_int saved_scbptr; 1525 1526 saved_scbptr = ahc_inb(ahc, SCBPTR); 1527 active_scb_index = ahc_inb(ahc, SCB_TAG); 1528 1529 if ((ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) == 0 1530 && (active_scb_index < ahc->scb_data->numscbs)) { 1531 struct scb *active_scb; 1532 1533 /* 1534 * If the active SCB is not us, assume that 1535 * the active SCB has a longer timeout than 1536 * the timedout SCB, and wait for the active 1537 * SCB to timeout. 1538 */ 1539 active_scb = ahc_lookup_scb(ahc, active_scb_index); 1540 if (active_scb != scb) { 1541 struct ccb_hdr *ccbh; 1542 uint64_t newtimeout; 1543 1544 ahc_print_path(ahc, scb); 1545 printf("Other SCB Timeout%s", 1546 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 1547 ? " again\n" : "\n"); 1548 scb->flags |= SCB_OTHERTCL_TIMEOUT; 1549 newtimeout = 1550 MAX(active_scb->io_ctx->ccb_h.timeout, 1551 scb->io_ctx->ccb_h.timeout); 1552 newtimeout *= hz; 1553 newtimeout /= 1000; 1554 ccbh = &scb->io_ctx->ccb_h; 1555 scb->io_ctx->ccb_h.timeout_ch = 1556 timeout(ahc_timeout, scb, newtimeout); 1557 ahc_unpause(ahc); 1558 ahc_unlock(ahc, &s); 1559 return; 1560 } 1561 1562 /* It's us */ 1563 if ((scb->flags & SCB_TARGET_SCB) != 0) { 1564 1565 /* 1566 * Send back any queued up transactions 1567 * and properly record the error condition. 1568 */ 1569 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), 1570 SCB_GET_CHANNEL(ahc, scb), 1571 SCB_GET_LUN(scb), 1572 scb->hscb->tag, 1573 ROLE_TARGET, 1574 CAM_CMD_TIMEOUT); 1575 1576 /* Will clear us from the bus */ 1577 ahc_restart(ahc); 1578 ahc_unlock(ahc, &s); 1579 return; 1580 } 1581 1582 ahc_set_recoveryscb(ahc, active_scb); 1583 ahc_outb(ahc, MSG_OUT, HOST_MSG); 1584 ahc_outb(ahc, SCSISIGO, last_phase|ATNO); 1585 ahc_print_path(ahc, active_scb); 1586 printf("BDR message in message buffer\n"); 1587 active_scb->flags |= SCB_DEVICE_RESET; 1588 active_scb->io_ctx->ccb_h.timeout_ch = 1589 timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz); 1590 ahc_unpause(ahc); 1591 } else { 1592 int disconnected; 1593 1594 /* XXX Shouldn't panic. Just punt instead? */ 1595 if ((scb->flags & SCB_TARGET_SCB) != 0) 1596 panic("Timed-out target SCB but bus idle"); 1597 1598 if (last_phase != P_BUSFREE 1599 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { 1600 /* XXX What happened to the SCB? */ 1601 /* Hung target selection. Goto busfree */ 1602 printf("%s: Hung target selection\n", 1603 ahc_name(ahc)); 1604 ahc_restart(ahc); 1605 ahc_unlock(ahc, &s); 1606 return; 1607 } 1608 1609 if (ahc_search_qinfifo(ahc, target, channel, lun, 1610 scb->hscb->tag, ROLE_INITIATOR, 1611 /*status*/0, SEARCH_COUNT) > 0) { 1612 disconnected = FALSE; 1613 } else { 1614 disconnected = TRUE; 1615 } 1616 1617 if (disconnected) { 1618 1619 ahc_set_recoveryscb(ahc, scb); 1620 /* 1621 * Actually re-queue this SCB in an attempt 1622 * to select the device before it reconnects. 1623 * In either case (selection or reselection), 1624 * we will now issue a target reset to the 1625 * timed-out device. 1626 * 1627 * Set the MK_MESSAGE control bit indicating 1628 * that we desire to send a message. We 1629 * also set the disconnected flag since 1630 * in the paging case there is no guarantee 1631 * that our SCB control byte matches the 1632 * version on the card. We don't want the 1633 * sequencer to abort the command thinking 1634 * an unsolicited reselection occurred. 1635 */ 1636 scb->hscb->control |= MK_MESSAGE|DISCONNECTED; 1637 scb->flags |= SCB_DEVICE_RESET; 1638 1639 /* 1640 * Remove any cached copy of this SCB in the 1641 * disconnected list in preparation for the 1642 * queuing of our abort SCB. We use the 1643 * same element in the SCB, SCB_NEXT, for 1644 * both the qinfifo and the disconnected list. 1645 */ 1646 ahc_search_disc_list(ahc, target, channel, 1647 lun, scb->hscb->tag, 1648 /*stop_on_first*/TRUE, 1649 /*remove*/TRUE, 1650 /*save_state*/FALSE); 1651 1652 /* 1653 * In the non-paging case, the sequencer will 1654 * never re-reference the in-core SCB. 1655 * To make sure we are notified during 1656 * reslection, set the MK_MESSAGE flag in 1657 * the card's copy of the SCB. 1658 */ 1659 if ((ahc->flags & AHC_PAGESCBS) == 0) { 1660 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 1661 ahc_outb(ahc, SCB_CONTROL, 1662 ahc_inb(ahc, SCB_CONTROL) 1663 | MK_MESSAGE); 1664 } 1665 1666 /* 1667 * Clear out any entries in the QINFIFO first 1668 * so we are the next SCB for this target 1669 * to run. 1670 */ 1671 ahc_search_qinfifo(ahc, 1672 SCB_GET_TARGET(ahc, scb), 1673 channel, SCB_GET_LUN(scb), 1674 SCB_LIST_NULL, 1675 ROLE_INITIATOR, 1676 CAM_REQUEUE_REQ, 1677 SEARCH_COMPLETE); 1678 ahc_print_path(ahc, scb); 1679 printf("Queuing a BDR SCB\n"); 1680 ahc_qinfifo_requeue_tail(ahc, scb); 1681 ahc_outb(ahc, SCBPTR, saved_scbptr); 1682 scb->io_ctx->ccb_h.timeout_ch = 1683 timeout(ahc_timeout, (caddr_t)scb, 2 * hz); 1684 ahc_unpause(ahc); 1685 } else { 1686 /* Go "immediatly" to the bus reset */ 1687 /* This shouldn't happen */ 1688 ahc_set_recoveryscb(ahc, scb); 1689 ahc_print_path(ahc, scb); 1690 printf("SCB %d: Immediate reset. " 1691 "Flags = 0x%x\n", scb->hscb->tag, 1692 scb->flags); 1693 goto bus_reset; 1694 } 1695 } 1696 } 1697 ahc_unlock(ahc, &s); 1698 } 1699 1700 static void 1701 ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 1702 { 1703 union ccb *abort_ccb; 1704 1705 abort_ccb = ccb->cab.abort_ccb; 1706 switch (abort_ccb->ccb_h.func_code) { 1707 case XPT_ACCEPT_TARGET_IO: 1708 case XPT_IMMED_NOTIFY: 1709 case XPT_CONT_TARGET_IO: 1710 { 1711 struct ahc_tmode_tstate *tstate; 1712 struct ahc_tmode_lstate *lstate; 1713 struct ccb_hdr_slist *list; 1714 cam_status status; 1715 1716 status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate, 1717 &lstate, TRUE); 1718 1719 if (status != CAM_REQ_CMP) { 1720 ccb->ccb_h.status = status; 1721 break; 1722 } 1723 1724 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 1725 list = &lstate->accept_tios; 1726 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) 1727 list = &lstate->immed_notifies; 1728 else 1729 list = NULL; 1730 1731 if (list != NULL) { 1732 struct ccb_hdr *curelm; 1733 int found; 1734 1735 curelm = SLIST_FIRST(list); 1736 found = 0; 1737 if (curelm == &abort_ccb->ccb_h) { 1738 found = 1; 1739 SLIST_REMOVE_HEAD(list, sim_links.sle); 1740 } else { 1741 while(curelm != NULL) { 1742 struct ccb_hdr *nextelm; 1743 1744 nextelm = 1745 SLIST_NEXT(curelm, sim_links.sle); 1746 1747 if (nextelm == &abort_ccb->ccb_h) { 1748 found = 1; 1749 SLIST_NEXT(curelm, 1750 sim_links.sle) = 1751 SLIST_NEXT(nextelm, 1752 sim_links.sle); 1753 break; 1754 } 1755 curelm = nextelm; 1756 } 1757 } 1758 1759 if (found) { 1760 abort_ccb->ccb_h.status = CAM_REQ_ABORTED; 1761 xpt_done(abort_ccb); 1762 ccb->ccb_h.status = CAM_REQ_CMP; 1763 } else { 1764 xpt_print_path(abort_ccb->ccb_h.path); 1765 printf("Not found\n"); 1766 ccb->ccb_h.status = CAM_PATH_INVALID; 1767 } 1768 break; 1769 } 1770 /* FALLTHROUGH */ 1771 } 1772 case XPT_SCSI_IO: 1773 /* XXX Fully implement the hard ones */ 1774 ccb->ccb_h.status = CAM_UA_ABORT; 1775 break; 1776 default: 1777 ccb->ccb_h.status = CAM_REQ_INVALID; 1778 break; 1779 } 1780 xpt_done(ccb); 1781 } 1782 1783 void 1784 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, 1785 u_int lun, ac_code code, void *opt_arg) 1786 { 1787 struct ccb_trans_settings cts; 1788 struct cam_path *path; 1789 void *arg; 1790 int error; 1791 1792 arg = NULL; 1793 error = ahc_create_path(ahc, channel, target, lun, &path); 1794 1795 if (error != CAM_REQ_CMP) 1796 return; 1797 1798 switch (code) { 1799 case AC_TRANSFER_NEG: 1800 { 1801 #ifdef AHC_NEW_TRAN_SETTINGS 1802 struct ccb_trans_settings_scsi *scsi; 1803 1804 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1805 scsi = &cts.proto_specific.scsi; 1806 #else 1807 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1808 #endif 1809 cts.ccb_h.path = path; 1810 cts.ccb_h.target_id = target; 1811 cts.ccb_h.target_lun = lun; 1812 ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id 1813 : ahc->our_id_b, 1814 channel, &cts); 1815 arg = &cts; 1816 #ifdef AHC_NEW_TRAN_SETTINGS 1817 scsi->valid &= ~CTS_SCSI_VALID_TQ; 1818 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 1819 #else 1820 cts.valid &= ~CCB_TRANS_TQ_VALID; 1821 cts.flags &= ~CCB_TRANS_TAG_ENB; 1822 #endif 1823 if (opt_arg == NULL) 1824 break; 1825 if (*((ahc_queue_alg *)opt_arg) == AHC_QUEUE_TAGGED) 1826 #ifdef AHC_NEW_TRAN_SETTINGS 1827 scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB; 1828 scsi->valid |= CTS_SCSI_VALID_TQ; 1829 #else 1830 cts.flags |= CCB_TRANS_TAG_ENB; 1831 cts.valid |= CCB_TRANS_TQ_VALID; 1832 #endif 1833 break; 1834 } 1835 case AC_SENT_BDR: 1836 case AC_BUS_RESET: 1837 break; 1838 default: 1839 panic("ahc_send_async: Unexpected async event"); 1840 } 1841 xpt_async(code, path, arg); 1842 xpt_free_path(path); 1843 } 1844 1845 void 1846 ahc_platform_set_tags(struct ahc_softc *ahc, 1847 struct ahc_devinfo *devinfo, int enable) 1848 { 1849 } 1850 1851 int 1852 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) 1853 { 1854 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF, 1855 M_NOWAIT | M_ZERO); 1856 if (ahc->platform_data == NULL) 1857 return (ENOMEM); 1858 return (0); 1859 } 1860 1861 void 1862 ahc_platform_free(struct ahc_softc *ahc) 1863 { 1864 struct ahc_platform_data *pdata; 1865 1866 pdata = ahc->platform_data; 1867 if (pdata != NULL) { 1868 if (pdata->regs != NULL) 1869 bus_release_resource(ahc->dev_softc, 1870 pdata->regs_res_type, 1871 pdata->regs_res_id, 1872 pdata->regs); 1873 1874 if (pdata->irq != NULL) 1875 bus_release_resource(ahc->dev_softc, 1876 pdata->irq_res_type, 1877 0, pdata->irq); 1878 1879 if (pdata->sim_b != NULL) { 1880 xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL); 1881 xpt_free_path(pdata->path_b); 1882 xpt_bus_deregister(cam_sim_path(pdata->sim_b)); 1883 cam_sim_free(pdata->sim_b, /*free_devq*/TRUE); 1884 } 1885 if (pdata->sim != NULL) { 1886 xpt_async(AC_LOST_DEVICE, pdata->path, NULL); 1887 xpt_free_path(pdata->path); 1888 xpt_bus_deregister(cam_sim_path(pdata->sim)); 1889 cam_sim_free(pdata->sim, /*free_devq*/TRUE); 1890 } 1891 if (pdata->eh != NULL) 1892 EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh); 1893 free(ahc->platform_data, M_DEVBUF); 1894 } 1895 } 1896 1897 int 1898 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc) 1899 { 1900 /* We don't sort softcs under FreeBSD so report equal always */ 1901 return (0); 1902 } 1903 1904 int 1905 ahc_detach(device_t dev) 1906 { 1907 struct ahc_softc *ahc; 1908 u_long l; 1909 u_long s; 1910 1911 ahc_list_lock(&l); 1912 device_printf(dev, "detaching device\n"); 1913 ahc = device_get_softc(dev); 1914 ahc = ahc_find_softc(ahc); 1915 if (ahc == NULL) { 1916 device_printf(dev, "aic7xxx already detached\n"); 1917 ahc_list_unlock(&l); 1918 return (ENOENT); 1919 } 1920 ahc_lock(ahc, &s); 1921 ahc_intr_enable(ahc, FALSE); 1922 bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih); 1923 ahc_unlock(ahc, &s); 1924 ahc_free(ahc); 1925 ahc_list_unlock(&l); 1926 return (0); 1927 } 1928 1929 #if UNUSED 1930 static void 1931 ahc_dump_targcmd(struct target_cmd *cmd) 1932 { 1933 uint8_t *byte; 1934 uint8_t *last_byte; 1935 int i; 1936 1937 byte = &cmd->initiator_channel; 1938 /* Debugging info for received commands */ 1939 last_byte = &cmd[1].initiator_channel; 1940 1941 i = 0; 1942 while (byte < last_byte) { 1943 if (i == 0) 1944 printf("\t"); 1945 printf("%#x", *byte++); 1946 i++; 1947 if (i == 8) { 1948 printf("\n"); 1949 i = 0; 1950 } else { 1951 printf(", "); 1952 } 1953 } 1954 } 1955 #endif 1956 1957 static int 1958 ahc_modevent(module_t mod, int type, void *data) 1959 { 1960 /* XXX Deal with busy status on unload. */ 1961 return 0; 1962 } 1963 1964 static moduledata_t ahc_mod = { 1965 "ahc", 1966 ahc_modevent, 1967 NULL 1968 }; 1969 1970 DECLARE_MODULE(ahc, ahc_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 1971 MODULE_DEPEND(ahc, cam, 1, 1, 1); 1972 MODULE_VERSION(ahc, 1); 1973