1 /* 2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers 3 * 4 * Copyright (c) 1994-2001 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU Public License ("GPL"). 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#11 $ 32 * 33 * $FreeBSD$ 34 */ 35 36 #include <dev/aic7xxx/aic7xxx_osm.h> 37 #include <dev/aic7xxx/aic7xxx_inline.h> 38 39 #ifndef AHC_TMODE_ENABLE 40 #define AHC_TMODE_ENABLE 0 41 #endif 42 43 #define ccb_scb_ptr spriv_ptr0 44 45 devclass_t ahc_devclass; 46 47 #if UNUSED 48 static void ahc_dump_targcmd(struct target_cmd *cmd); 49 #endif 50 static int ahc_modevent(module_t mod, int type, void *data); 51 static void ahc_action(struct cam_sim *sim, union ccb *ccb); 52 static void ahc_get_tran_settings(struct ahc_softc *ahc, 53 int our_id, char channel, 54 struct ccb_trans_settings *cts); 55 static void ahc_async(void *callback_arg, uint32_t code, 56 struct cam_path *path, void *arg); 57 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, 58 int nsegments, int error); 59 static void ahc_poll(struct cam_sim *sim); 60 static void ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim, 61 struct ccb_scsiio *csio, struct scb *scb); 62 static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, 63 union ccb *ccb); 64 static int ahc_create_path(struct ahc_softc *ahc, 65 char channel, u_int target, u_int lun, 66 struct cam_path **path); 67 68 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb); 69 70 static int 71 ahc_create_path(struct ahc_softc *ahc, char channel, u_int target, 72 u_int lun, struct cam_path **path) 73 { 74 path_id_t path_id; 75 76 if (channel == 'B') 77 path_id = cam_sim_path(ahc->platform_data->sim_b); 78 else 79 path_id = cam_sim_path(ahc->platform_data->sim); 80 81 return (xpt_create_path(path, /*periph*/NULL, 82 path_id, target, lun)); 83 } 84 85 int 86 ahc_map_int(struct ahc_softc *ahc) 87 { 88 int error; 89 90 /* Hook up our interrupt handler */ 91 error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq, 92 INTR_TYPE_CAM, ahc_platform_intr, ahc, 93 &ahc->platform_data->ih); 94 95 if (error != 0) 96 device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n", 97 error); 98 return (error); 99 } 100 101 /* 102 * Attach all the sub-devices we can find 103 */ 104 int 105 ahc_attach(struct ahc_softc *ahc) 106 { 107 char ahc_info[256]; 108 struct ccb_setasync csa; 109 struct cam_devq *devq; 110 int bus_id; 111 int bus_id2; 112 struct cam_sim *sim; 113 struct cam_sim *sim2; 114 struct cam_path *path; 115 struct cam_path *path2; 116 long s; 117 int count; 118 119 count = 0; 120 sim = NULL; 121 sim2 = NULL; 122 123 ahc_controller_info(ahc, ahc_info); 124 printf("%s\n", ahc_info); 125 ahc_lock(ahc, &s); 126 /* 127 * Attach secondary channel first if the user has 128 * declared it the primary channel. 129 */ 130 if ((ahc->features & AHC_TWIN) != 0 131 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) { 132 bus_id = 1; 133 bus_id2 = 0; 134 } else { 135 bus_id = 0; 136 bus_id2 = 1; 137 } 138 139 /* 140 * Create the device queue for our SIM(s). 141 */ 142 devq = cam_simq_alloc(AHC_MAX_QUEUE); 143 if (devq == NULL) 144 goto fail; 145 146 /* 147 * Construct our first channel SIM entry 148 */ 149 sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, 150 device_get_unit(ahc->dev_softc), 151 1, AHC_MAX_QUEUE, devq); 152 if (sim == NULL) { 153 cam_simq_free(devq); 154 goto fail; 155 } 156 157 if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) { 158 cam_sim_free(sim, /*free_devq*/TRUE); 159 sim = NULL; 160 goto fail; 161 } 162 163 if (xpt_create_path(&path, /*periph*/NULL, 164 cam_sim_path(sim), CAM_TARGET_WILDCARD, 165 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 166 xpt_bus_deregister(cam_sim_path(sim)); 167 cam_sim_free(sim, /*free_devq*/TRUE); 168 sim = NULL; 169 goto fail; 170 } 171 172 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 173 csa.ccb_h.func_code = XPT_SASYNC_CB; 174 csa.event_enable = AC_LOST_DEVICE; 175 csa.callback = ahc_async; 176 csa.callback_arg = sim; 177 xpt_action((union ccb *)&csa); 178 count++; 179 180 if (ahc->features & AHC_TWIN) { 181 sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc", 182 ahc, device_get_unit(ahc->dev_softc), 1, 183 AHC_MAX_QUEUE, devq); 184 185 if (sim2 == NULL) { 186 printf("ahc_attach: Unable to attach second " 187 "bus due to resource shortage"); 188 goto fail; 189 } 190 191 if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) { 192 printf("ahc_attach: Unable to attach second " 193 "bus due to resource shortage"); 194 /* 195 * We do not want to destroy the device queue 196 * because the first bus is using it. 197 */ 198 cam_sim_free(sim2, /*free_devq*/FALSE); 199 goto fail; 200 } 201 202 if (xpt_create_path(&path2, /*periph*/NULL, 203 cam_sim_path(sim2), 204 CAM_TARGET_WILDCARD, 205 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 206 xpt_bus_deregister(cam_sim_path(sim2)); 207 cam_sim_free(sim2, /*free_devq*/FALSE); 208 sim2 = NULL; 209 goto fail; 210 } 211 xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5); 212 csa.ccb_h.func_code = XPT_SASYNC_CB; 213 csa.event_enable = AC_LOST_DEVICE; 214 csa.callback = ahc_async; 215 csa.callback_arg = sim2; 216 xpt_action((union ccb *)&csa); 217 count++; 218 } 219 220 fail: 221 if ((ahc->features & AHC_TWIN) != 0 222 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) { 223 ahc->platform_data->sim_b = sim; 224 ahc->platform_data->path_b = path; 225 ahc->platform_data->sim = sim2; 226 ahc->platform_data->path = path2; 227 } else { 228 ahc->platform_data->sim = sim; 229 ahc->platform_data->path = path; 230 ahc->platform_data->sim_b = sim2; 231 ahc->platform_data->path_b = path2; 232 } 233 234 if (count != 0) { 235 /* We have to wait until after any system dumps... */ 236 ahc->platform_data->eh = 237 EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown, 238 ahc, SHUTDOWN_PRI_DEFAULT); 239 ahc_intr_enable(ahc, TRUE); 240 } 241 242 ahc_unlock(ahc, &s); 243 return (count); 244 } 245 246 /* 247 * Catch an interrupt from the adapter 248 */ 249 void 250 ahc_platform_intr(void *arg) 251 { 252 struct ahc_softc *ahc; 253 254 ahc = (struct ahc_softc *)arg; 255 ahc_intr(ahc); 256 } 257 258 /* 259 * We have an scb which has been processed by the 260 * adaptor, now we look to see how the operation 261 * went. 262 */ 263 void 264 ahc_done(struct ahc_softc *ahc, struct scb *scb) 265 { 266 union ccb *ccb; 267 268 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE, 269 ("ahc_done - scb %d\n", scb->hscb->tag)); 270 271 ccb = scb->io_ctx; 272 LIST_REMOVE(scb, pending_links); 273 if ((scb->flags & SCB_UNTAGGEDQ) != 0) { 274 struct scb_tailq *untagged_q; 275 int target_offset; 276 277 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 278 untagged_q = &ahc->untagged_queues[target_offset]; 279 TAILQ_REMOVE(untagged_q, scb, links.tqe); 280 scb->flags &= ~SCB_UNTAGGEDQ; 281 ahc_run_untagged_queue(ahc, untagged_q); 282 } 283 284 untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); 285 286 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 287 bus_dmasync_op_t op; 288 289 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 290 op = BUS_DMASYNC_POSTREAD; 291 else 292 op = BUS_DMASYNC_POSTWRITE; 293 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 294 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 295 } 296 297 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 298 struct cam_path *ccb_path; 299 300 /* 301 * If we have finally disconnected, clean up our 302 * pending device state. 303 * XXX - There may be error states that cause where 304 * we will remain connected. 305 */ 306 ccb_path = ccb->ccb_h.path; 307 if (ahc->pending_device != NULL 308 && xpt_path_comp(ahc->pending_device->path, ccb_path) == 0) { 309 310 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 311 ahc->pending_device = NULL; 312 } else { 313 if (bootverbose) { 314 xpt_print_path(ccb->ccb_h.path); 315 printf("Still connected\n"); 316 } 317 ahc_freeze_ccb(ccb); 318 } 319 } 320 321 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) 322 ccb->ccb_h.status |= CAM_REQ_CMP; 323 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 324 ahc_free_scb(ahc, scb); 325 xpt_done(ccb); 326 return; 327 } 328 329 /* 330 * If the recovery SCB completes, we have to be 331 * out of our timeout. 332 */ 333 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 334 struct scb *list_scb; 335 336 /* 337 * We were able to complete the command successfully, 338 * so reinstate the timeouts for all other pending 339 * commands. 340 */ 341 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { 342 union ccb *ccb; 343 uint64_t time; 344 345 ccb = list_scb->io_ctx; 346 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) 347 continue; 348 349 time = ccb->ccb_h.timeout; 350 time *= hz; 351 time /= 1000; 352 ccb->ccb_h.timeout_ch = 353 timeout(ahc_timeout, list_scb, time); 354 } 355 356 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT 357 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) 358 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 359 ahc_print_path(ahc, scb); 360 printf("no longer in timeout, status = %x\n", 361 ccb->ccb_h.status); 362 } 363 364 /* Don't clobber any existing error state */ 365 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) { 366 ccb->ccb_h.status |= CAM_REQ_CMP; 367 } else if ((scb->flags & SCB_SENSE) != 0) { 368 /* 369 * We performed autosense retrieval. 370 * 371 * Zero any sense not transferred by the 372 * device. The SCSI spec mandates that any 373 * untransfered data should be assumed to be 374 * zero. Complete the 'bounce' of sense information 375 * through buffers accessible via bus-space by 376 * copying it into the clients csio. 377 */ 378 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); 379 memcpy(&ccb->csio.sense_data, 380 ahc_get_sense_buf(ahc, scb), 381 (ahc_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK) 382 - ccb->csio.sense_resid); 383 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; 384 } 385 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 386 ahc_free_scb(ahc, scb); 387 xpt_done(ccb); 388 } 389 390 static void 391 ahc_action(struct cam_sim *sim, union ccb *ccb) 392 { 393 struct ahc_softc *ahc; 394 struct ahc_tmode_lstate *lstate; 395 u_int target_id; 396 u_int our_id; 397 long s; 398 399 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n")); 400 401 ahc = (struct ahc_softc *)cam_sim_softc(sim); 402 403 target_id = ccb->ccb_h.target_id; 404 our_id = SIM_SCSI_ID(ahc, sim); 405 406 switch (ccb->ccb_h.func_code) { 407 /* Common cases first */ 408 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 409 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ 410 { 411 struct ahc_tmode_tstate *tstate; 412 cam_status status; 413 414 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 415 &lstate, TRUE); 416 417 if (status != CAM_REQ_CMP) { 418 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 419 /* Response from the black hole device */ 420 tstate = NULL; 421 lstate = ahc->black_hole; 422 } else { 423 ccb->ccb_h.status = status; 424 xpt_done(ccb); 425 break; 426 } 427 } 428 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 429 430 ahc_lock(ahc, &s); 431 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 432 sim_links.sle); 433 ccb->ccb_h.status = CAM_REQ_INPROG; 434 if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0) 435 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 436 ahc_unlock(ahc, &s); 437 break; 438 } 439 440 /* 441 * The target_id represents the target we attempt to 442 * select. In target mode, this is the initiator of 443 * the original command. 444 */ 445 our_id = target_id; 446 target_id = ccb->csio.init_id; 447 /* FALLTHROUGH */ 448 } 449 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 450 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 451 { 452 struct scb *scb; 453 struct hardware_scb *hscb; 454 455 if ((ahc->flags & AHC_INITIATORROLE) == 0 456 && (ccb->ccb_h.func_code == XPT_SCSI_IO 457 || ccb->ccb_h.func_code == XPT_RESET_DEV)) { 458 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 459 xpt_done(ccb); 460 return; 461 } 462 463 /* 464 * get an scb to use. 465 */ 466 ahc_lock(ahc, &s); 467 if ((scb = ahc_get_scb(ahc)) == NULL) { 468 469 xpt_freeze_simq(sim, /*count*/1); 470 ahc->flags |= AHC_RESOURCE_SHORTAGE; 471 ahc_unlock(ahc, &s); 472 ccb->ccb_h.status = CAM_REQUEUE_REQ; 473 xpt_done(ccb); 474 return; 475 } 476 ahc_unlock(ahc, &s); 477 478 hscb = scb->hscb; 479 480 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, 481 ("start scb(%p)\n", scb)); 482 scb->io_ctx = ccb; 483 /* 484 * So we can find the SCB when an abort is requested 485 */ 486 ccb->ccb_h.ccb_scb_ptr = scb; 487 488 /* 489 * Put all the arguments for the xfer in the scb 490 */ 491 hscb->control = 0; 492 hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id); 493 hscb->lun = ccb->ccb_h.target_lun; 494 if (ccb->ccb_h.func_code == XPT_RESET_DEV) { 495 hscb->cdb_len = 0; 496 scb->flags |= SCB_DEVICE_RESET; 497 hscb->control |= MK_MESSAGE; 498 ahc_execute_scb(scb, NULL, 0, 0); 499 } else { 500 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 501 struct target_data *tdata; 502 503 tdata = &hscb->shared_data.tdata; 504 if (ahc->pending_device == lstate) 505 scb->flags |= SCB_TARGET_IMMEDIATE; 506 hscb->control |= TARGET_SCB; 507 tdata->target_phases = IDENTIFY_SEEN; 508 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 509 tdata->target_phases |= SPHASE_PENDING; 510 tdata->scsi_status = 511 ccb->csio.scsi_status; 512 } 513 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) 514 tdata->target_phases |= NO_DISCONNECT; 515 516 tdata->initiator_tag = ccb->csio.tag_id; 517 } 518 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) 519 hscb->control |= ccb->csio.tag_action; 520 521 ahc_setup_data(ahc, sim, &ccb->csio, scb); 522 } 523 break; 524 } 525 case XPT_NOTIFY_ACK: 526 case XPT_IMMED_NOTIFY: 527 { 528 struct ahc_tmode_tstate *tstate; 529 struct ahc_tmode_lstate *lstate; 530 cam_status status; 531 532 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 533 &lstate, TRUE); 534 535 if (status != CAM_REQ_CMP) { 536 ccb->ccb_h.status = status; 537 xpt_done(ccb); 538 break; 539 } 540 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 541 sim_links.sle); 542 ccb->ccb_h.status = CAM_REQ_INPROG; 543 ahc_send_lstate_events(ahc, lstate); 544 break; 545 } 546 case XPT_EN_LUN: /* Enable LUN as a target */ 547 ahc_handle_en_lun(ahc, sim, ccb); 548 xpt_done(ccb); 549 break; 550 case XPT_ABORT: /* Abort the specified CCB */ 551 { 552 ahc_abort_ccb(ahc, sim, ccb); 553 break; 554 } 555 case XPT_SET_TRAN_SETTINGS: 556 { 557 #ifdef AHC_NEW_TRAN_SETTINGS 558 struct ahc_devinfo devinfo; 559 struct ccb_trans_settings *cts; 560 struct ccb_trans_settings_scsi *scsi; 561 struct ccb_trans_settings_spi *spi; 562 struct ahc_initiator_tinfo *tinfo; 563 struct ahc_tmode_tstate *tstate; 564 uint16_t *discenable; 565 uint16_t *tagenable; 566 u_int update_type; 567 568 cts = &ccb->cts; 569 scsi = &cts->proto_specific.scsi; 570 spi = &cts->xport_specific.spi; 571 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 572 cts->ccb_h.target_id, 573 cts->ccb_h.target_lun, 574 SIM_CHANNEL(ahc, sim), 575 ROLE_UNKNOWN); 576 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 577 devinfo.our_scsiid, 578 devinfo.target, &tstate); 579 update_type = 0; 580 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 581 update_type |= AHC_TRANS_GOAL; 582 discenable = &tstate->discenable; 583 tagenable = &tstate->tagenable; 584 tinfo->curr.protocol_version = 585 cts->protocol_version; 586 tinfo->curr.transport_version = 587 cts->transport_version; 588 tinfo->goal.protocol_version = 589 cts->protocol_version; 590 tinfo->goal.transport_version = 591 cts->transport_version; 592 } else if (cts->type == CTS_TYPE_USER_SETTINGS) { 593 update_type |= AHC_TRANS_USER; 594 discenable = &ahc->user_discenable; 595 tagenable = &ahc->user_tagenable; 596 tinfo->user.protocol_version = 597 cts->protocol_version; 598 tinfo->user.transport_version = 599 cts->transport_version; 600 } else { 601 ccb->ccb_h.status = CAM_REQ_INVALID; 602 xpt_done(ccb); 603 break; 604 } 605 606 ahc_lock(ahc, &s); 607 608 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 609 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 610 *discenable |= devinfo.target_mask; 611 else 612 *discenable &= ~devinfo.target_mask; 613 } 614 615 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 616 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 617 *tagenable |= devinfo.target_mask; 618 else 619 *tagenable &= ~devinfo.target_mask; 620 } 621 622 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 623 ahc_validate_width(ahc, /*tinfo limit*/NULL, 624 &spi->bus_width, ROLE_UNKNOWN); 625 ahc_set_width(ahc, &devinfo, spi->bus_width, 626 update_type, /*paused*/FALSE); 627 } 628 629 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) { 630 if (update_type == AHC_TRANS_USER) 631 spi->ppr_options = tinfo->user.ppr_options; 632 else 633 spi->ppr_options = tinfo->goal.ppr_options; 634 } 635 636 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { 637 if (update_type == AHC_TRANS_USER) 638 spi->sync_offset = tinfo->user.offset; 639 else 640 spi->sync_offset = tinfo->goal.offset; 641 } 642 643 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { 644 if (update_type == AHC_TRANS_USER) 645 spi->sync_period = tinfo->user.period; 646 else 647 spi->sync_period = tinfo->goal.period; 648 } 649 650 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 651 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 652 struct ahc_syncrate *syncrate; 653 u_int maxsync; 654 655 if ((ahc->features & AHC_ULTRA2) != 0) 656 maxsync = AHC_SYNCRATE_DT; 657 else if ((ahc->features & AHC_ULTRA) != 0) 658 maxsync = AHC_SYNCRATE_ULTRA; 659 else 660 maxsync = AHC_SYNCRATE_FAST; 661 662 if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT) 663 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; 664 665 syncrate = ahc_find_syncrate(ahc, &spi->sync_period, 666 &spi->ppr_options, 667 maxsync); 668 ahc_validate_offset(ahc, /*tinfo limit*/NULL, 669 syncrate, &spi->sync_offset, 670 spi->bus_width, ROLE_UNKNOWN); 671 672 /* We use a period of 0 to represent async */ 673 if (spi->sync_offset == 0) { 674 spi->sync_period = 0; 675 spi->ppr_options = 0; 676 } 677 678 ahc_set_syncrate(ahc, &devinfo, syncrate, 679 spi->sync_period, spi->sync_offset, 680 spi->ppr_options, update_type, 681 /*paused*/FALSE); 682 } 683 ahc_unlock(ahc, &s); 684 ccb->ccb_h.status = CAM_REQ_CMP; 685 xpt_done(ccb); 686 #else 687 struct ahc_devinfo devinfo; 688 struct ccb_trans_settings *cts; 689 struct ahc_initiator_tinfo *tinfo; 690 struct ahc_tmode_tstate *tstate; 691 uint16_t *discenable; 692 uint16_t *tagenable; 693 u_int update_type; 694 long s; 695 696 cts = &ccb->cts; 697 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 698 cts->ccb_h.target_id, 699 cts->ccb_h.target_lun, 700 SIM_CHANNEL(ahc, sim), 701 ROLE_UNKNOWN); 702 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 703 devinfo.our_scsiid, 704 devinfo.target, &tstate); 705 update_type = 0; 706 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 707 update_type |= AHC_TRANS_GOAL; 708 discenable = &tstate->discenable; 709 tagenable = &tstate->tagenable; 710 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 711 update_type |= AHC_TRANS_USER; 712 discenable = &ahc->user_discenable; 713 tagenable = &ahc->user_tagenable; 714 } else { 715 ccb->ccb_h.status = CAM_REQ_INVALID; 716 xpt_done(ccb); 717 break; 718 } 719 720 ahc_lock(ahc, &s); 721 722 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 723 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 724 *discenable |= devinfo.target_mask; 725 else 726 *discenable &= ~devinfo.target_mask; 727 } 728 729 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 730 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 731 *tagenable |= devinfo.target_mask; 732 else 733 *tagenable &= ~devinfo.target_mask; 734 } 735 736 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 737 ahc_validate_width(ahc, /*tinfo limit*/NULL, 738 &cts->bus_width, ROLE_UNKNOWN); 739 ahc_set_width(ahc, &devinfo, cts->bus_width, 740 update_type, /*paused*/FALSE); 741 } 742 743 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { 744 if (update_type == AHC_TRANS_USER) 745 cts->sync_offset = tinfo->user.offset; 746 else 747 cts->sync_offset = tinfo->goal.offset; 748 } 749 750 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { 751 if (update_type == AHC_TRANS_USER) 752 cts->sync_period = tinfo->user.period; 753 else 754 cts->sync_period = tinfo->goal.period; 755 } 756 757 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 758 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 759 struct ahc_syncrate *syncrate; 760 u_int ppr_options; 761 u_int maxsync; 762 763 if ((ahc->features & AHC_ULTRA2) != 0) 764 maxsync = AHC_SYNCRATE_DT; 765 else if ((ahc->features & AHC_ULTRA) != 0) 766 maxsync = AHC_SYNCRATE_ULTRA; 767 else 768 maxsync = AHC_SYNCRATE_FAST; 769 770 ppr_options = 0; 771 if (cts->sync_period <= 9 772 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 773 ppr_options = MSG_EXT_PPR_DT_REQ; 774 775 syncrate = ahc_find_syncrate(ahc, &cts->sync_period, 776 &ppr_options, 777 maxsync); 778 ahc_validate_offset(ahc, /*tinfo limit*/NULL, 779 syncrate, &cts->sync_offset, 780 MSG_EXT_WDTR_BUS_8_BIT, 781 ROLE_UNKNOWN); 782 783 /* We use a period of 0 to represent async */ 784 if (cts->sync_offset == 0) { 785 cts->sync_period = 0; 786 ppr_options = 0; 787 } 788 789 if (ppr_options == MSG_EXT_PPR_DT_REQ 790 && tinfo->user.transport_version >= 3) { 791 tinfo->goal.transport_version = 792 tinfo->user.transport_version; 793 tinfo->curr.transport_version = 794 tinfo->user.transport_version; 795 } 796 797 ahc_set_syncrate(ahc, &devinfo, syncrate, 798 cts->sync_period, cts->sync_offset, 799 ppr_options, update_type, 800 /*paused*/FALSE); 801 } 802 ahc_unlock(ahc, &s); 803 ccb->ccb_h.status = CAM_REQ_CMP; 804 xpt_done(ccb); 805 #endif 806 break; 807 } 808 case XPT_GET_TRAN_SETTINGS: 809 /* Get default/user set transfer settings for the target */ 810 { 811 812 ahc_lock(ahc, &s); 813 ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim), 814 SIM_CHANNEL(ahc, sim), &ccb->cts); 815 ahc_unlock(ahc, &s); 816 xpt_done(ccb); 817 break; 818 } 819 case XPT_CALC_GEOMETRY: 820 { 821 struct ccb_calc_geometry *ccg; 822 uint32_t size_mb; 823 uint32_t secs_per_cylinder; 824 int extended; 825 826 ccg = &ccb->ccg; 827 size_mb = ccg->volume_size 828 / ((1024L * 1024L) / ccg->block_size); 829 extended = SIM_IS_SCSIBUS_B(ahc, sim) 830 ? ahc->flags & AHC_EXTENDED_TRANS_B 831 : ahc->flags & AHC_EXTENDED_TRANS_A; 832 833 if (size_mb > 1024 && extended) { 834 ccg->heads = 255; 835 ccg->secs_per_track = 63; 836 } else { 837 ccg->heads = 64; 838 ccg->secs_per_track = 32; 839 } 840 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 841 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 842 ccb->ccb_h.status = CAM_REQ_CMP; 843 xpt_done(ccb); 844 break; 845 } 846 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 847 { 848 int found; 849 850 ahc_lock(ahc, &s); 851 found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim), 852 /*initiate reset*/TRUE); 853 ahc_unlock(ahc, &s); 854 if (bootverbose) { 855 xpt_print_path(SIM_PATH(ahc, sim)); 856 printf("SCSI bus reset delivered. " 857 "%d SCBs aborted.\n", found); 858 } 859 ccb->ccb_h.status = CAM_REQ_CMP; 860 xpt_done(ccb); 861 break; 862 } 863 case XPT_TERM_IO: /* Terminate the I/O process */ 864 /* XXX Implement */ 865 ccb->ccb_h.status = CAM_REQ_INVALID; 866 xpt_done(ccb); 867 break; 868 case XPT_PATH_INQ: /* Path routing inquiry */ 869 { 870 struct ccb_pathinq *cpi = &ccb->cpi; 871 872 cpi->version_num = 1; /* XXX??? */ 873 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 874 if ((ahc->features & AHC_WIDE) != 0) 875 cpi->hba_inquiry |= PI_WIDE_16; 876 if ((ahc->features & AHC_TARGETMODE) != 0) { 877 cpi->target_sprt = PIT_PROCESSOR 878 | PIT_DISCONNECT 879 | PIT_TERM_IO; 880 } else { 881 cpi->target_sprt = 0; 882 } 883 cpi->hba_misc = 0; 884 cpi->hba_eng_cnt = 0; 885 cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7; 886 cpi->max_lun = AHC_NUM_LUNS - 1; 887 if (SIM_IS_SCSIBUS_B(ahc, sim)) { 888 cpi->initiator_id = ahc->our_id_b; 889 if ((ahc->flags & AHC_RESET_BUS_B) == 0) 890 cpi->hba_misc |= PIM_NOBUSRESET; 891 } else { 892 cpi->initiator_id = ahc->our_id; 893 if ((ahc->flags & AHC_RESET_BUS_A) == 0) 894 cpi->hba_misc |= PIM_NOBUSRESET; 895 } 896 cpi->bus_id = cam_sim_bus(sim); 897 cpi->base_transfer_speed = 3300; 898 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 899 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 900 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 901 cpi->unit_number = cam_sim_unit(sim); 902 #ifdef AHC_NEW_TRAN_SETTINGS 903 cpi->protocol = PROTO_SCSI; 904 cpi->protocol_version = SCSI_REV_2; 905 cpi->transport = XPORT_SPI; 906 cpi->transport_version = 2; 907 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; 908 if ((ahc->features & AHC_DT) != 0) { 909 cpi->transport_version = 3; 910 cpi->xport_specific.spi.ppr_options = 911 SID_SPI_CLOCK_DT_ST; 912 } 913 #endif 914 cpi->ccb_h.status = CAM_REQ_CMP; 915 xpt_done(ccb); 916 break; 917 } 918 default: 919 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 920 xpt_done(ccb); 921 break; 922 } 923 } 924 925 static void 926 ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel, 927 struct ccb_trans_settings *cts) 928 { 929 #ifdef AHC_NEW_TRAN_SETTINGS 930 struct ahc_devinfo devinfo; 931 struct ccb_trans_settings_scsi *scsi; 932 struct ccb_trans_settings_spi *spi; 933 struct ahc_initiator_tinfo *targ_info; 934 struct ahc_tmode_tstate *tstate; 935 struct ahc_transinfo *tinfo; 936 937 scsi = &cts->proto_specific.scsi; 938 spi = &cts->xport_specific.spi; 939 ahc_compile_devinfo(&devinfo, our_id, 940 cts->ccb_h.target_id, 941 cts->ccb_h.target_lun, 942 channel, ROLE_UNKNOWN); 943 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 944 devinfo.our_scsiid, 945 devinfo.target, &tstate); 946 947 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 948 tinfo = &targ_info->curr; 949 else 950 tinfo = &targ_info->user; 951 952 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 953 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 954 if (cts->type == CTS_TYPE_USER_SETTINGS) { 955 if ((ahc->user_discenable & devinfo.target_mask) != 0) 956 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 957 958 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 959 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 960 } else { 961 if ((tstate->discenable & devinfo.target_mask) != 0) 962 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 963 964 if ((tstate->tagenable & devinfo.target_mask) != 0) 965 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 966 } 967 cts->protocol_version = tinfo->protocol_version; 968 cts->transport_version = tinfo->transport_version; 969 970 spi->sync_period = tinfo->period; 971 spi->sync_offset = tinfo->offset; 972 spi->bus_width = tinfo->width; 973 spi->ppr_options = tinfo->ppr_options; 974 975 cts->protocol = PROTO_SCSI; 976 cts->transport = XPORT_SPI; 977 spi->valid = CTS_SPI_VALID_SYNC_RATE 978 | CTS_SPI_VALID_SYNC_OFFSET 979 | CTS_SPI_VALID_BUS_WIDTH 980 | CTS_SPI_VALID_PPR_OPTIONS; 981 982 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 983 scsi->valid = CTS_SCSI_VALID_TQ; 984 spi->valid |= CTS_SPI_VALID_DISC; 985 } else { 986 scsi->valid = 0; 987 } 988 989 cts->ccb_h.status = CAM_REQ_CMP; 990 #else 991 struct ahc_devinfo devinfo; 992 struct ahc_initiator_tinfo *targ_info; 993 struct ahc_tmode_tstate *tstate; 994 struct ahc_transinfo *tinfo; 995 996 ahc_compile_devinfo(&devinfo, our_id, 997 cts->ccb_h.target_id, 998 cts->ccb_h.target_lun, 999 channel, ROLE_UNKNOWN); 1000 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 1001 devinfo.our_scsiid, 1002 devinfo.target, &tstate); 1003 1004 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 1005 tinfo = &targ_info->curr; 1006 else 1007 tinfo = &targ_info->user; 1008 1009 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 1010 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) { 1011 if ((ahc->user_discenable & devinfo.target_mask) != 0) 1012 cts->flags |= CCB_TRANS_DISC_ENB; 1013 1014 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 1015 cts->flags |= CCB_TRANS_TAG_ENB; 1016 } else { 1017 if ((tstate->discenable & devinfo.target_mask) != 0) 1018 cts->flags |= CCB_TRANS_DISC_ENB; 1019 1020 if ((tstate->tagenable & devinfo.target_mask) != 0) 1021 cts->flags |= CCB_TRANS_TAG_ENB; 1022 } 1023 cts->sync_period = tinfo->period; 1024 cts->sync_offset = tinfo->offset; 1025 cts->bus_width = tinfo->width; 1026 1027 cts->valid = CCB_TRANS_SYNC_RATE_VALID 1028 | CCB_TRANS_SYNC_OFFSET_VALID 1029 | CCB_TRANS_BUS_WIDTH_VALID; 1030 1031 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) 1032 cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID; 1033 1034 cts->ccb_h.status = CAM_REQ_CMP; 1035 #endif 1036 } 1037 1038 static void 1039 ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 1040 { 1041 struct ahc_softc *ahc; 1042 struct cam_sim *sim; 1043 1044 sim = (struct cam_sim *)callback_arg; 1045 ahc = (struct ahc_softc *)cam_sim_softc(sim); 1046 switch (code) { 1047 case AC_LOST_DEVICE: 1048 { 1049 struct ahc_devinfo devinfo; 1050 long s; 1051 1052 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 1053 xpt_path_target_id(path), 1054 xpt_path_lun_id(path), 1055 SIM_CHANNEL(ahc, sim), 1056 ROLE_UNKNOWN); 1057 1058 /* 1059 * Revert to async/narrow transfers 1060 * for the next device. 1061 */ 1062 ahc_lock(ahc, &s); 1063 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 1064 AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE); 1065 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 1066 /*period*/0, /*offset*/0, /*ppr_options*/0, 1067 AHC_TRANS_GOAL|AHC_TRANS_CUR, 1068 /*paused*/FALSE); 1069 ahc_unlock(ahc, &s); 1070 break; 1071 } 1072 default: 1073 break; 1074 } 1075 } 1076 1077 static void 1078 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, 1079 int error) 1080 { 1081 struct scb *scb; 1082 union ccb *ccb; 1083 struct ahc_softc *ahc; 1084 struct ahc_initiator_tinfo *tinfo; 1085 struct ahc_tmode_tstate *tstate; 1086 u_int mask; 1087 long s; 1088 1089 scb = (struct scb *)arg; 1090 ccb = scb->io_ctx; 1091 ahc = scb->ahc_softc; 1092 1093 if (error != 0) { 1094 if (error == EFBIG) 1095 ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG); 1096 else 1097 ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR); 1098 if (nsegments != 0) 1099 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 1100 ahc_lock(ahc, &s); 1101 ahc_free_scb(ahc, scb); 1102 ahc_unlock(ahc, &s); 1103 xpt_done(ccb); 1104 return; 1105 } 1106 if (nsegments != 0) { 1107 struct ahc_dma_seg *sg; 1108 bus_dma_segment_t *end_seg; 1109 bus_dmasync_op_t op; 1110 1111 end_seg = dm_segs + nsegments; 1112 1113 /* Copy the segments into our SG list */ 1114 sg = scb->sg_list; 1115 while (dm_segs < end_seg) { 1116 uint32_t len; 1117 1118 sg->addr = ahc_htole32(dm_segs->ds_addr); 1119 len = dm_segs->ds_len 1120 | ((dm_segs->ds_addr >> 8) & 0x7F000000); 1121 sg->len = ahc_htole32(len); 1122 sg++; 1123 dm_segs++; 1124 } 1125 1126 /* 1127 * Note where to find the SG entries in bus space. 1128 * We also set the full residual flag which the 1129 * sequencer will clear as soon as a data transfer 1130 * occurs. 1131 */ 1132 scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID); 1133 1134 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1135 op = BUS_DMASYNC_PREREAD; 1136 else 1137 op = BUS_DMASYNC_PREWRITE; 1138 1139 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 1140 1141 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1142 struct target_data *tdata; 1143 1144 tdata = &scb->hscb->shared_data.tdata; 1145 tdata->target_phases |= DPHASE_PENDING; 1146 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1147 tdata->data_phase = P_DATAOUT; 1148 else 1149 tdata->data_phase = P_DATAIN; 1150 1151 /* 1152 * If the transfer is of an odd length and in the 1153 * "in" direction (scsi->HostBus), then it may 1154 * trigger a bug in the 'WideODD' feature of 1155 * non-Ultra2 chips. Force the total data-length 1156 * to be even by adding an extra, 1 byte, SG, 1157 * element. We do this even if we are not currently 1158 * negotiated wide as negotiation could occur before 1159 * this command is executed. 1160 */ 1161 if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0 1162 && (ccb->csio.dxfer_len & 0x1) != 0 1163 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1164 1165 nsegments++; 1166 if (nsegments > AHC_NSEG) { 1167 1168 ahc_set_transaction_status(scb, 1169 CAM_REQ_TOO_BIG); 1170 bus_dmamap_unload(ahc->buffer_dmat, 1171 scb->dmamap); 1172 ahc_lock(ahc, &s); 1173 ahc_free_scb(ahc, scb); 1174 ahc_unlock(ahc, &s); 1175 xpt_done(ccb); 1176 return; 1177 } 1178 sg->addr = ahc_htole32(ahc->dma_bug_buf); 1179 sg->len = ahc_htole32(1); 1180 sg++; 1181 } 1182 } 1183 sg--; 1184 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); 1185 1186 /* Copy the first SG into the "current" data pointer area */ 1187 scb->hscb->dataptr = scb->sg_list->addr; 1188 scb->hscb->datacnt = scb->sg_list->len; 1189 } else { 1190 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); 1191 scb->hscb->dataptr = 0; 1192 scb->hscb->datacnt = 0; 1193 } 1194 1195 scb->sg_count = nsegments; 1196 1197 ahc_lock(ahc, &s); 1198 1199 /* 1200 * Last time we need to check if this SCB needs to 1201 * be aborted. 1202 */ 1203 if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) { 1204 if (nsegments != 0) 1205 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 1206 ahc_free_scb(ahc, scb); 1207 ahc_unlock(ahc, &s); 1208 xpt_done(ccb); 1209 return; 1210 } 1211 1212 tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid), 1213 SCSIID_OUR_ID(scb->hscb->scsiid), 1214 SCSIID_TARGET(ahc, scb->hscb->scsiid), 1215 &tstate); 1216 1217 mask = SCB_GET_TARGET_MASK(ahc, scb); 1218 scb->hscb->scsirate = tinfo->scsirate; 1219 scb->hscb->scsioffset = tinfo->curr.offset; 1220 if ((tstate->ultraenb & mask) != 0) 1221 scb->hscb->control |= ULTRAENB; 1222 1223 if ((tstate->discenable & mask) != 0 1224 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 1225 scb->hscb->control |= DISCENB; 1226 1227 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0 1228 && (tinfo->goal.width != 0 1229 || tinfo->goal.offset != 0 1230 || tinfo->goal.ppr_options != 0)) { 1231 scb->flags |= SCB_NEGOTIATE; 1232 scb->hscb->control |= MK_MESSAGE; 1233 } else if ((tstate->auto_negotiate & mask) != 0) { 1234 scb->flags |= SCB_AUTO_NEGOTIATE; 1235 scb->hscb->control |= MK_MESSAGE; 1236 } 1237 1238 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); 1239 1240 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1241 1242 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1243 uint64_t time; 1244 1245 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1246 ccb->ccb_h.timeout = 5 * 1000; 1247 1248 time = ccb->ccb_h.timeout; 1249 time *= hz; 1250 time /= 1000; 1251 ccb->ccb_h.timeout_ch = 1252 timeout(ahc_timeout, (caddr_t)scb, time); 1253 } 1254 1255 /* 1256 * We only allow one untagged transaction 1257 * per target in the initiator role unless 1258 * we are storing a full busy target *lun* 1259 * table in SCB space. 1260 */ 1261 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 1262 && (ahc->flags & AHC_SCB_BTT) == 0) { 1263 struct scb_tailq *untagged_q; 1264 int target_offset; 1265 1266 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 1267 untagged_q = &(ahc->untagged_queues[target_offset]); 1268 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); 1269 scb->flags |= SCB_UNTAGGEDQ; 1270 if (TAILQ_FIRST(untagged_q) != scb) { 1271 ahc_unlock(ahc, &s); 1272 return; 1273 } 1274 } 1275 scb->flags |= SCB_ACTIVE; 1276 1277 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 1278 /* Define a mapping from our tag to the SCB. */ 1279 ahc->scb_data->scbindex[scb->hscb->tag] = scb; 1280 ahc_pause(ahc); 1281 if ((ahc->flags & AHC_PAGESCBS) == 0) 1282 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 1283 ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag); 1284 ahc_unpause(ahc); 1285 } else { 1286 ahc_queue_scb(ahc, scb); 1287 } 1288 1289 ahc_unlock(ahc, &s); 1290 } 1291 1292 static void 1293 ahc_poll(struct cam_sim *sim) 1294 { 1295 struct ahc_softc *ahc; 1296 1297 ahc = (struct ahc_softc *)cam_sim_softc(sim); 1298 ahc_intr(ahc); 1299 } 1300 1301 static void 1302 ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim, 1303 struct ccb_scsiio *csio, struct scb *scb) 1304 { 1305 struct hardware_scb *hscb; 1306 struct ccb_hdr *ccb_h; 1307 1308 hscb = scb->hscb; 1309 ccb_h = &csio->ccb_h; 1310 1311 csio->resid = 0; 1312 csio->sense_resid = 0; 1313 if (ccb_h->func_code == XPT_SCSI_IO) { 1314 hscb->cdb_len = csio->cdb_len; 1315 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 1316 1317 if (hscb->cdb_len > sizeof(hscb->cdb32) 1318 || (ccb_h->flags & CAM_CDB_PHYS) != 0) { 1319 u_long s; 1320 1321 ahc_set_transaction_status(scb, 1322 CAM_REQ_INVALID); 1323 ahc_lock(ahc, &s); 1324 ahc_free_scb(ahc, scb); 1325 ahc_unlock(ahc, &s); 1326 xpt_done((union ccb *)csio); 1327 return; 1328 } 1329 if (hscb->cdb_len > 12) { 1330 memcpy(hscb->cdb32, 1331 csio->cdb_io.cdb_ptr, 1332 hscb->cdb_len); 1333 scb->flags |= SCB_CDB32_PTR; 1334 } else { 1335 memcpy(hscb->shared_data.cdb, 1336 csio->cdb_io.cdb_ptr, 1337 hscb->cdb_len); 1338 } 1339 } else { 1340 if (hscb->cdb_len > 12) { 1341 memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes, 1342 hscb->cdb_len); 1343 scb->flags |= SCB_CDB32_PTR; 1344 } else { 1345 memcpy(hscb->shared_data.cdb, 1346 csio->cdb_io.cdb_bytes, 1347 hscb->cdb_len); 1348 } 1349 } 1350 } 1351 1352 /* Only use S/G if there is a transfer */ 1353 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1354 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 1355 /* We've been given a pointer to a single buffer */ 1356 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 1357 int s; 1358 int error; 1359 1360 s = splsoftvm(); 1361 error = bus_dmamap_load(ahc->buffer_dmat, 1362 scb->dmamap, 1363 csio->data_ptr, 1364 csio->dxfer_len, 1365 ahc_execute_scb, 1366 scb, /*flags*/0); 1367 if (error == EINPROGRESS) { 1368 /* 1369 * So as to maintain ordering, 1370 * freeze the controller queue 1371 * until our mapping is 1372 * returned. 1373 */ 1374 xpt_freeze_simq(sim, 1375 /*count*/1); 1376 scb->io_ctx->ccb_h.status |= 1377 CAM_RELEASE_SIMQ; 1378 } 1379 splx(s); 1380 } else { 1381 struct bus_dma_segment seg; 1382 1383 /* Pointer to physical buffer */ 1384 if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE) 1385 panic("ahc_setup_data - Transfer size " 1386 "larger than can device max"); 1387 1388 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1389 seg.ds_len = csio->dxfer_len; 1390 ahc_execute_scb(scb, &seg, 1, 0); 1391 } 1392 } else { 1393 struct bus_dma_segment *segs; 1394 1395 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 1396 panic("ahc_setup_data - Physical segment " 1397 "pointers unsupported"); 1398 1399 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 1400 panic("ahc_setup_data - Virtual segment " 1401 "addresses unsupported"); 1402 1403 /* Just use the segments provided */ 1404 segs = (struct bus_dma_segment *)csio->data_ptr; 1405 ahc_execute_scb(scb, segs, csio->sglist_cnt, 0); 1406 } 1407 } else { 1408 ahc_execute_scb(scb, NULL, 0, 0); 1409 } 1410 } 1411 1412 static void 1413 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) { 1414 1415 if ((scb->flags & SCB_RECOVERY_SCB) == 0) { 1416 struct scb *list_scb; 1417 1418 scb->flags |= SCB_RECOVERY_SCB; 1419 1420 /* 1421 * Take all queued, but not sent SCBs out of the equation. 1422 * Also ensure that no new CCBs are queued to us while we 1423 * try to fix this problem. 1424 */ 1425 if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 1426 xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1); 1427 scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; 1428 } 1429 1430 /* 1431 * Go through all of our pending SCBs and remove 1432 * any scheduled timeouts for them. We will reschedule 1433 * them after we've successfully fixed this problem. 1434 */ 1435 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { 1436 union ccb *ccb; 1437 1438 ccb = list_scb->io_ctx; 1439 untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch); 1440 } 1441 } 1442 } 1443 1444 void 1445 ahc_timeout(void *arg) 1446 { 1447 struct scb *scb; 1448 struct ahc_softc *ahc; 1449 long s; 1450 int found; 1451 u_int last_phase; 1452 int target; 1453 int lun; 1454 int i; 1455 char channel; 1456 1457 scb = (struct scb *)arg; 1458 ahc = (struct ahc_softc *)scb->ahc_softc; 1459 1460 ahc_lock(ahc, &s); 1461 1462 ahc_pause_and_flushwork(ahc); 1463 1464 if ((scb->flags & SCB_ACTIVE) == 0) { 1465 /* Previous timeout took care of me already */ 1466 printf("%s: Timedout SCB already complete. " 1467 "Interrupts may not be functioning.\n", ahc_name(ahc)); 1468 ahc_unpause(ahc); 1469 ahc_unlock(ahc, &s); 1470 return; 1471 } 1472 1473 target = SCB_GET_TARGET(ahc, scb); 1474 channel = SCB_GET_CHANNEL(ahc, scb); 1475 lun = SCB_GET_LUN(scb); 1476 1477 ahc_print_path(ahc, scb); 1478 printf("SCB 0x%x - timed out\n", scb->hscb->tag); 1479 ahc_dump_card_state(ahc); 1480 last_phase = ahc_inb(ahc, LASTPHASE); 1481 if (scb->sg_count > 0) { 1482 for (i = 0; i < scb->sg_count; i++) { 1483 printf("sg[%d] - Addr 0x%x : Length %d\n", 1484 i, 1485 scb->sg_list[i].addr, 1486 scb->sg_list[i].len & AHC_SG_LEN_MASK); 1487 } 1488 } 1489 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { 1490 /* 1491 * Been down this road before. 1492 * Do a full bus reset. 1493 */ 1494 bus_reset: 1495 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 1496 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE); 1497 printf("%s: Issued Channel %c Bus Reset. " 1498 "%d SCBs aborted\n", ahc_name(ahc), channel, found); 1499 } else { 1500 /* 1501 * If we are a target, transition to bus free and report 1502 * the timeout. 1503 * 1504 * The target/initiator that is holding up the bus may not 1505 * be the same as the one that triggered this timeout 1506 * (different commands have different timeout lengths). 1507 * If the bus is idle and we are actiing as the initiator 1508 * for this request, queue a BDR message to the timed out 1509 * target. Otherwise, if the timed out transaction is 1510 * active: 1511 * Initiator transaction: 1512 * Stuff the message buffer with a BDR message and assert 1513 * ATN in the hopes that the target will let go of the bus 1514 * and go to the mesgout phase. If this fails, we'll 1515 * get another timeout 2 seconds later which will attempt 1516 * a bus reset. 1517 * 1518 * Target transaction: 1519 * Transition to BUS FREE and report the error. 1520 * It's good to be the target! 1521 */ 1522 u_int active_scb_index; 1523 u_int saved_scbptr; 1524 1525 saved_scbptr = ahc_inb(ahc, SCBPTR); 1526 active_scb_index = ahc_inb(ahc, SCB_TAG); 1527 1528 if ((ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) != 0 1529 && (active_scb_index < ahc->scb_data->numscbs)) { 1530 struct scb *active_scb; 1531 1532 /* 1533 * If the active SCB is not us, assume that 1534 * the active SCB has a longer timeout than 1535 * the timedout SCB, and wait for the active 1536 * SCB to timeout. 1537 */ 1538 active_scb = ahc_lookup_scb(ahc, active_scb_index); 1539 if (active_scb != scb) { 1540 struct ccb_hdr *ccbh; 1541 uint64_t newtimeout; 1542 1543 ahc_print_path(ahc, scb); 1544 printf("Other SCB Timeout%s", 1545 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 1546 ? " again\n" : "\n"); 1547 scb->flags |= SCB_OTHERTCL_TIMEOUT; 1548 newtimeout = 1549 MAX(active_scb->io_ctx->ccb_h.timeout, 1550 scb->io_ctx->ccb_h.timeout); 1551 newtimeout *= hz; 1552 newtimeout /= 1000; 1553 ccbh = &scb->io_ctx->ccb_h; 1554 scb->io_ctx->ccb_h.timeout_ch = 1555 timeout(ahc_timeout, scb, newtimeout); 1556 ahc_unpause(ahc); 1557 ahc_unlock(ahc, &s); 1558 return; 1559 } 1560 1561 /* It's us */ 1562 if ((scb->hscb->control & TARGET_SCB) != 0) { 1563 1564 /* 1565 * Send back any queued up transactions 1566 * and properly record the error condition. 1567 */ 1568 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), 1569 SCB_GET_CHANNEL(ahc, scb), 1570 SCB_GET_LUN(scb), 1571 scb->hscb->tag, 1572 ROLE_TARGET, 1573 CAM_CMD_TIMEOUT); 1574 1575 /* Will clear us from the bus */ 1576 ahc_restart(ahc); 1577 ahc_unlock(ahc, &s); 1578 return; 1579 } 1580 1581 ahc_set_recoveryscb(ahc, active_scb); 1582 ahc_outb(ahc, MSG_OUT, HOST_MSG); 1583 ahc_outb(ahc, SCSISIGO, last_phase|ATNO); 1584 ahc_print_path(ahc, active_scb); 1585 printf("BDR message in message buffer\n"); 1586 active_scb->flags |= SCB_DEVICE_RESET; 1587 active_scb->io_ctx->ccb_h.timeout_ch = 1588 timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz); 1589 ahc_unpause(ahc); 1590 } else { 1591 int disconnected; 1592 1593 /* XXX Shouldn't panic. Just punt instead? */ 1594 if ((scb->hscb->control & TARGET_SCB) != 0) 1595 panic("Timed-out target SCB but bus idle"); 1596 1597 if (last_phase != P_BUSFREE 1598 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { 1599 /* XXX What happened to the SCB? */ 1600 /* Hung target selection. Goto busfree */ 1601 printf("%s: Hung target selection\n", 1602 ahc_name(ahc)); 1603 ahc_restart(ahc); 1604 ahc_unlock(ahc, &s); 1605 return; 1606 } 1607 1608 if (ahc_search_qinfifo(ahc, target, channel, lun, 1609 scb->hscb->tag, ROLE_INITIATOR, 1610 /*status*/0, SEARCH_COUNT) > 0) { 1611 disconnected = FALSE; 1612 } else { 1613 disconnected = TRUE; 1614 } 1615 1616 if (disconnected) { 1617 1618 ahc_set_recoveryscb(ahc, scb); 1619 /* 1620 * Actually re-queue this SCB in an attempt 1621 * to select the device before it reconnects. 1622 * In either case (selection or reselection), 1623 * we will now issue a target reset to the 1624 * timed-out device. 1625 * 1626 * Set the MK_MESSAGE control bit indicating 1627 * that we desire to send a message. We 1628 * also set the disconnected flag since 1629 * in the paging case there is no guarantee 1630 * that our SCB control byte matches the 1631 * version on the card. We don't want the 1632 * sequencer to abort the command thinking 1633 * an unsolicited reselection occurred. 1634 */ 1635 scb->hscb->control |= MK_MESSAGE|DISCONNECTED; 1636 scb->flags |= SCB_DEVICE_RESET; 1637 1638 /* 1639 * Remove any cached copy of this SCB in the 1640 * disconnected list in preparation for the 1641 * queuing of our abort SCB. We use the 1642 * same element in the SCB, SCB_NEXT, for 1643 * both the qinfifo and the disconnected list. 1644 */ 1645 ahc_search_disc_list(ahc, target, channel, 1646 lun, scb->hscb->tag, 1647 /*stop_on_first*/TRUE, 1648 /*remove*/TRUE, 1649 /*save_state*/FALSE); 1650 1651 /* 1652 * In the non-paging case, the sequencer will 1653 * never re-reference the in-core SCB. 1654 * To make sure we are notified during 1655 * reslection, set the MK_MESSAGE flag in 1656 * the card's copy of the SCB. 1657 */ 1658 if ((ahc->flags & AHC_PAGESCBS) == 0) { 1659 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 1660 ahc_outb(ahc, SCB_CONTROL, 1661 ahc_inb(ahc, SCB_CONTROL) 1662 | MK_MESSAGE); 1663 } 1664 1665 /* 1666 * Clear out any entries in the QINFIFO first 1667 * so we are the next SCB for this target 1668 * to run. 1669 */ 1670 ahc_search_qinfifo(ahc, 1671 SCB_GET_TARGET(ahc, scb), 1672 channel, SCB_GET_LUN(scb), 1673 SCB_LIST_NULL, 1674 ROLE_INITIATOR, 1675 CAM_REQUEUE_REQ, 1676 SEARCH_COMPLETE); 1677 ahc_print_path(ahc, scb); 1678 printf("Queuing a BDR SCB\n"); 1679 ahc_qinfifo_requeue_tail(ahc, scb); 1680 ahc_outb(ahc, SCBPTR, saved_scbptr); 1681 scb->io_ctx->ccb_h.timeout_ch = 1682 timeout(ahc_timeout, (caddr_t)scb, 2 * hz); 1683 ahc_unpause(ahc); 1684 } else { 1685 /* Go "immediatly" to the bus reset */ 1686 /* This shouldn't happen */ 1687 ahc_set_recoveryscb(ahc, scb); 1688 ahc_print_path(ahc, scb); 1689 printf("SCB %d: Immediate reset. " 1690 "Flags = 0x%x\n", scb->hscb->tag, 1691 scb->flags); 1692 goto bus_reset; 1693 } 1694 } 1695 } 1696 ahc_unlock(ahc, &s); 1697 } 1698 1699 static void 1700 ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 1701 { 1702 union ccb *abort_ccb; 1703 1704 abort_ccb = ccb->cab.abort_ccb; 1705 switch (abort_ccb->ccb_h.func_code) { 1706 case XPT_ACCEPT_TARGET_IO: 1707 case XPT_IMMED_NOTIFY: 1708 case XPT_CONT_TARGET_IO: 1709 { 1710 struct ahc_tmode_tstate *tstate; 1711 struct ahc_tmode_lstate *lstate; 1712 struct ccb_hdr_slist *list; 1713 cam_status status; 1714 1715 status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate, 1716 &lstate, TRUE); 1717 1718 if (status != CAM_REQ_CMP) { 1719 ccb->ccb_h.status = status; 1720 break; 1721 } 1722 1723 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 1724 list = &lstate->accept_tios; 1725 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) 1726 list = &lstate->immed_notifies; 1727 else 1728 list = NULL; 1729 1730 if (list != NULL) { 1731 struct ccb_hdr *curelm; 1732 int found; 1733 1734 curelm = SLIST_FIRST(list); 1735 found = 0; 1736 if (curelm == &abort_ccb->ccb_h) { 1737 found = 1; 1738 SLIST_REMOVE_HEAD(list, sim_links.sle); 1739 } else { 1740 while(curelm != NULL) { 1741 struct ccb_hdr *nextelm; 1742 1743 nextelm = 1744 SLIST_NEXT(curelm, sim_links.sle); 1745 1746 if (nextelm == &abort_ccb->ccb_h) { 1747 found = 1; 1748 SLIST_NEXT(curelm, 1749 sim_links.sle) = 1750 SLIST_NEXT(nextelm, 1751 sim_links.sle); 1752 break; 1753 } 1754 curelm = nextelm; 1755 } 1756 } 1757 1758 if (found) { 1759 abort_ccb->ccb_h.status = CAM_REQ_ABORTED; 1760 xpt_done(abort_ccb); 1761 ccb->ccb_h.status = CAM_REQ_CMP; 1762 } else { 1763 xpt_print_path(abort_ccb->ccb_h.path); 1764 printf("Not found\n"); 1765 ccb->ccb_h.status = CAM_PATH_INVALID; 1766 } 1767 break; 1768 } 1769 /* FALLTHROUGH */ 1770 } 1771 case XPT_SCSI_IO: 1772 /* XXX Fully implement the hard ones */ 1773 ccb->ccb_h.status = CAM_UA_ABORT; 1774 break; 1775 default: 1776 ccb->ccb_h.status = CAM_REQ_INVALID; 1777 break; 1778 } 1779 xpt_done(ccb); 1780 } 1781 1782 void 1783 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, 1784 u_int lun, ac_code code, void *opt_arg) 1785 { 1786 struct ccb_trans_settings cts; 1787 struct cam_path *path; 1788 void *arg; 1789 int error; 1790 1791 arg = NULL; 1792 error = ahc_create_path(ahc, channel, target, lun, &path); 1793 1794 if (error != CAM_REQ_CMP) 1795 return; 1796 1797 switch (code) { 1798 case AC_TRANSFER_NEG: 1799 { 1800 #ifdef AHC_NEW_TRAN_SETTINGS 1801 struct ccb_trans_settings_scsi *scsi; 1802 1803 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1804 scsi = &cts.proto_specific.scsi; 1805 #else 1806 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1807 #endif 1808 cts.ccb_h.path = path; 1809 cts.ccb_h.target_id = target; 1810 cts.ccb_h.target_lun = lun; 1811 ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id 1812 : ahc->our_id_b, 1813 channel, &cts); 1814 arg = &cts; 1815 #ifdef AHC_NEW_TRAN_SETTINGS 1816 scsi->valid &= ~CTS_SCSI_VALID_TQ; 1817 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 1818 #else 1819 cts.valid &= ~CCB_TRANS_TQ_VALID; 1820 cts.flags &= ~CCB_TRANS_TAG_ENB; 1821 #endif 1822 if (opt_arg == NULL) 1823 break; 1824 if (*((ahc_queue_alg *)opt_arg) == AHC_QUEUE_TAGGED) 1825 #ifdef AHC_NEW_TRAN_SETTINGS 1826 scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB; 1827 scsi->valid |= CTS_SCSI_VALID_TQ; 1828 #else 1829 cts.flags |= CCB_TRANS_TAG_ENB; 1830 cts.valid |= CCB_TRANS_TQ_VALID; 1831 #endif 1832 break; 1833 } 1834 case AC_SENT_BDR: 1835 case AC_BUS_RESET: 1836 break; 1837 default: 1838 panic("ahc_send_async: Unexpected async event"); 1839 } 1840 xpt_async(code, path, arg); 1841 xpt_free_path(path); 1842 } 1843 1844 void 1845 ahc_platform_set_tags(struct ahc_softc *ahc, 1846 struct ahc_devinfo *devinfo, int enable) 1847 { 1848 } 1849 1850 int 1851 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) 1852 { 1853 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF, 1854 M_NOWAIT | M_ZERO); 1855 if (ahc->platform_data == NULL) 1856 return (ENOMEM); 1857 return (0); 1858 } 1859 1860 void 1861 ahc_platform_free(struct ahc_softc *ahc) 1862 { 1863 struct ahc_platform_data *pdata; 1864 1865 pdata = ahc->platform_data; 1866 if (pdata != NULL) { 1867 if (pdata->regs != NULL) 1868 bus_release_resource(ahc->dev_softc, 1869 pdata->regs_res_type, 1870 pdata->regs_res_id, 1871 pdata->regs); 1872 1873 if (pdata->irq != NULL) 1874 bus_release_resource(ahc->dev_softc, 1875 pdata->irq_res_type, 1876 0, pdata->irq); 1877 1878 if (pdata->sim_b != NULL) { 1879 xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL); 1880 xpt_free_path(pdata->path_b); 1881 xpt_bus_deregister(cam_sim_path(pdata->sim_b)); 1882 cam_sim_free(pdata->sim_b, /*free_devq*/TRUE); 1883 } 1884 if (pdata->sim != NULL) { 1885 xpt_async(AC_LOST_DEVICE, pdata->path, NULL); 1886 xpt_free_path(pdata->path); 1887 xpt_bus_deregister(cam_sim_path(pdata->sim)); 1888 cam_sim_free(pdata->sim, /*free_devq*/TRUE); 1889 } 1890 if (pdata->eh != NULL) 1891 EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh); 1892 free(ahc->platform_data, M_DEVBUF); 1893 } 1894 } 1895 1896 int 1897 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc) 1898 { 1899 /* We don't sort softcs under FreeBSD so report equal always */ 1900 return (0); 1901 } 1902 1903 int 1904 ahc_detach(device_t dev) 1905 { 1906 struct ahc_softc *ahc; 1907 u_long l; 1908 u_long s; 1909 1910 ahc_list_lock(&l); 1911 device_printf(dev, "detaching device\n"); 1912 ahc = device_get_softc(dev); 1913 ahc = ahc_find_softc(ahc); 1914 if (ahc == NULL) { 1915 device_printf(dev, "aic7xxx already detached\n"); 1916 ahc_list_unlock(&l); 1917 return (ENOENT); 1918 } 1919 ahc_lock(ahc, &s); 1920 ahc_intr_enable(ahc, FALSE); 1921 bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih); 1922 ahc_unlock(ahc, &s); 1923 ahc_free(ahc); 1924 ahc_list_unlock(&l); 1925 return (0); 1926 } 1927 1928 #if UNUSED 1929 static void 1930 ahc_dump_targcmd(struct target_cmd *cmd) 1931 { 1932 uint8_t *byte; 1933 uint8_t *last_byte; 1934 int i; 1935 1936 byte = &cmd->initiator_channel; 1937 /* Debugging info for received commands */ 1938 last_byte = &cmd[1].initiator_channel; 1939 1940 i = 0; 1941 while (byte < last_byte) { 1942 if (i == 0) 1943 printf("\t"); 1944 printf("%#x", *byte++); 1945 i++; 1946 if (i == 8) { 1947 printf("\n"); 1948 i = 0; 1949 } else { 1950 printf(", "); 1951 } 1952 } 1953 } 1954 #endif 1955 1956 static int 1957 ahc_modevent(module_t mod, int type, void *data) 1958 { 1959 /* XXX Deal with busy status on unload. */ 1960 return 0; 1961 } 1962 1963 static moduledata_t ahc_mod = { 1964 "ahc", 1965 ahc_modevent, 1966 NULL 1967 }; 1968 1969 DECLARE_MODULE(ahc, ahc_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 1970 MODULE_DEPEND(ahc, cam, 1, 1, 1); 1971 MODULE_VERSION(ahc, 1); 1972