1 /* 2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers 3 * 4 * Copyright (c) 1994-2001 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU Public License ("GPL"). 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $Id$ 32 * 33 * $FreeBSD$ 34 */ 35 36 #include <dev/aic7xxx/aic7xxx_osm.h> 37 #include <dev/aic7xxx/aic7xxx_inline.h> 38 39 #ifndef AHC_TMODE_ENABLE 40 #define AHC_TMODE_ENABLE 0 41 #endif 42 43 #define ccb_scb_ptr spriv_ptr0 44 45 #if UNUSED 46 static void ahc_dump_targcmd(struct target_cmd *cmd); 47 #endif 48 static int ahc_modevent(module_t mod, int type, void *data); 49 static void ahc_action(struct cam_sim *sim, union ccb *ccb); 50 static void ahc_get_tran_settings(struct ahc_softc *ahc, 51 int our_id, char channel, 52 struct ccb_trans_settings *cts); 53 static void ahc_async(void *callback_arg, uint32_t code, 54 struct cam_path *path, void *arg); 55 static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, 56 int nsegments, int error); 57 static void ahc_poll(struct cam_sim *sim); 58 static void ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim, 59 struct ccb_scsiio *csio, struct scb *scb); 60 static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, 61 union ccb *ccb); 62 static int ahc_create_path(struct ahc_softc *ahc, 63 char channel, u_int target, u_int lun, 64 struct cam_path **path); 65 66 static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb); 67 68 static int 69 ahc_create_path(struct ahc_softc *ahc, char channel, u_int target, 70 u_int lun, struct cam_path **path) 71 { 72 path_id_t path_id; 73 74 if (channel == 'B') 75 path_id = cam_sim_path(ahc->platform_data->sim_b); 76 else 77 path_id = cam_sim_path(ahc->platform_data->sim); 78 79 return (xpt_create_path(path, /*periph*/NULL, 80 path_id, target, lun)); 81 } 82 83 int 84 ahc_map_int(struct ahc_softc *ahc) 85 { 86 int error; 87 88 /* Hook up our interrupt handler */ 89 error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq, 90 INTR_TYPE_CAM, ahc_platform_intr, ahc, 91 &ahc->platform_data->ih); 92 93 if (error != 0) 94 device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n", 95 error); 96 return (error); 97 } 98 99 /* 100 * Attach all the sub-devices we can find 101 */ 102 int 103 ahc_attach(struct ahc_softc *ahc) 104 { 105 char ahc_info[256]; 106 struct ccb_setasync csa; 107 struct cam_devq *devq; 108 int bus_id; 109 int bus_id2; 110 struct cam_sim *sim; 111 struct cam_sim *sim2; 112 struct cam_path *path; 113 struct cam_path *path2; 114 long s; 115 int count; 116 117 count = 0; 118 sim = NULL; 119 sim2 = NULL; 120 121 ahc_controller_info(ahc, ahc_info); 122 printf("%s\n", ahc_info); 123 ahc_lock(ahc, &s); 124 /* 125 * Attach secondary channel first if the user has 126 * declared it the primary channel. 127 */ 128 if ((ahc->features & AHC_TWIN) != 0 129 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) { 130 bus_id = 1; 131 bus_id2 = 0; 132 } else { 133 bus_id = 0; 134 bus_id2 = 1; 135 } 136 137 /* 138 * Create the device queue for our SIM(s). 139 */ 140 devq = cam_simq_alloc(AHC_MAX_QUEUE); 141 if (devq == NULL) 142 goto fail; 143 144 /* 145 * Construct our first channel SIM entry 146 */ 147 sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, 148 device_get_unit(ahc->dev_softc), 149 1, AHC_MAX_QUEUE, devq); 150 if (sim == NULL) { 151 cam_simq_free(devq); 152 goto fail; 153 } 154 155 if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) { 156 cam_sim_free(sim, /*free_devq*/TRUE); 157 sim = NULL; 158 goto fail; 159 } 160 161 if (xpt_create_path(&path, /*periph*/NULL, 162 cam_sim_path(sim), CAM_TARGET_WILDCARD, 163 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 164 xpt_bus_deregister(cam_sim_path(sim)); 165 cam_sim_free(sim, /*free_devq*/TRUE); 166 sim = NULL; 167 goto fail; 168 } 169 170 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 171 csa.ccb_h.func_code = XPT_SASYNC_CB; 172 csa.event_enable = AC_LOST_DEVICE; 173 csa.callback = ahc_async; 174 csa.callback_arg = sim; 175 xpt_action((union ccb *)&csa); 176 count++; 177 178 if (ahc->features & AHC_TWIN) { 179 sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc", 180 ahc, device_get_unit(ahc->dev_softc), 1, 181 AHC_MAX_QUEUE, devq); 182 183 if (sim2 == NULL) { 184 printf("ahc_attach: Unable to attach second " 185 "bus due to resource shortage"); 186 goto fail; 187 } 188 189 if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) { 190 printf("ahc_attach: Unable to attach second " 191 "bus due to resource shortage"); 192 /* 193 * We do not want to destroy the device queue 194 * because the first bus is using it. 195 */ 196 cam_sim_free(sim2, /*free_devq*/FALSE); 197 goto fail; 198 } 199 200 if (xpt_create_path(&path2, /*periph*/NULL, 201 cam_sim_path(sim2), 202 CAM_TARGET_WILDCARD, 203 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 204 xpt_bus_deregister(cam_sim_path(sim2)); 205 cam_sim_free(sim2, /*free_devq*/FALSE); 206 sim2 = NULL; 207 goto fail; 208 } 209 xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5); 210 csa.ccb_h.func_code = XPT_SASYNC_CB; 211 csa.event_enable = AC_LOST_DEVICE; 212 csa.callback = ahc_async; 213 csa.callback_arg = sim2; 214 xpt_action((union ccb *)&csa); 215 count++; 216 } 217 218 fail: 219 if ((ahc->features & AHC_TWIN) != 0 220 && (ahc->flags & AHC_PRIMARY_CHANNEL) != 0) { 221 ahc->platform_data->sim_b = sim; 222 ahc->platform_data->path_b = path; 223 ahc->platform_data->sim = sim2; 224 ahc->platform_data->path = path2; 225 } else { 226 ahc->platform_data->sim = sim; 227 ahc->platform_data->path = path; 228 ahc->platform_data->sim_b = sim2; 229 ahc->platform_data->path_b = path2; 230 } 231 ahc_unlock(ahc, &s); 232 233 if (count != 0) 234 /* We have to wait until after any system dumps... */ 235 ahc->platform_data->eh = 236 EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown, 237 ahc, SHUTDOWN_PRI_DEFAULT); 238 239 return (count); 240 } 241 242 /* 243 * Catch an interrupt from the adapter 244 */ 245 void 246 ahc_platform_intr(void *arg) 247 { 248 struct ahc_softc *ahc; 249 250 ahc = (struct ahc_softc *)arg; 251 ahc_intr(ahc); 252 } 253 254 /* 255 * We have an scb which has been processed by the 256 * adaptor, now we look to see how the operation 257 * went. 258 */ 259 void 260 ahc_done(struct ahc_softc *ahc, struct scb *scb) 261 { 262 union ccb *ccb; 263 264 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE, 265 ("ahc_done - scb %d\n", scb->hscb->tag)); 266 267 ccb = scb->io_ctx; 268 LIST_REMOVE(scb, pending_links); 269 if ((scb->flags & SCB_UNTAGGEDQ) != 0) { 270 struct scb_tailq *untagged_q; 271 int target_offset; 272 273 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 274 untagged_q = &ahc->untagged_queues[target_offset]; 275 TAILQ_REMOVE(untagged_q, scb, links.tqe); 276 scb->flags &= ~SCB_UNTAGGEDQ; 277 ahc_run_untagged_queue(ahc, untagged_q); 278 } 279 280 untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); 281 282 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 283 bus_dmasync_op_t op; 284 285 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 286 op = BUS_DMASYNC_POSTREAD; 287 else 288 op = BUS_DMASYNC_POSTWRITE; 289 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 290 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 291 } 292 293 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 294 struct cam_path *ccb_path; 295 296 /* 297 * If we have finally disconnected, clean up our 298 * pending device state. 299 * XXX - There may be error states that cause where 300 * we will remain connected. 301 */ 302 ccb_path = ccb->ccb_h.path; 303 if (ahc->pending_device != NULL 304 && xpt_path_comp(ahc->pending_device->path, ccb_path) == 0) { 305 306 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 307 ahc->pending_device = NULL; 308 } else { 309 if (bootverbose) { 310 xpt_print_path(ccb->ccb_h.path); 311 printf("Still connected\n"); 312 } 313 ahc_freeze_ccb(ccb); 314 } 315 } 316 317 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) 318 ccb->ccb_h.status |= CAM_REQ_CMP; 319 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 320 ahc_free_scb(ahc, scb); 321 xpt_done(ccb); 322 return; 323 } 324 325 /* 326 * If the recovery SCB completes, we have to be 327 * out of our timeout. 328 */ 329 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 330 struct scb *list_scb; 331 332 /* 333 * We were able to complete the command successfully, 334 * so reinstate the timeouts for all other pending 335 * commands. 336 */ 337 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { 338 union ccb *ccb; 339 uint64_t time; 340 341 ccb = list_scb->io_ctx; 342 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) 343 continue; 344 345 time = ccb->ccb_h.timeout; 346 time *= hz; 347 time /= 1000; 348 ccb->ccb_h.timeout_ch = 349 timeout(ahc_timeout, list_scb, time); 350 } 351 352 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT 353 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) 354 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 355 ahc_print_path(ahc, scb); 356 printf("no longer in timeout, status = %x\n", 357 ccb->ccb_h.status); 358 } 359 360 /* Don't clobber any existing error state */ 361 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) { 362 ccb->ccb_h.status |= CAM_REQ_CMP; 363 } else if ((scb->flags & SCB_SENSE) != 0) { 364 /* 365 * We performed autosense retrieval. 366 * 367 * Zero any sense not transferred by the 368 * device. The SCSI spec mandates that any 369 * untransfered data should be assumed to be 370 * zero. Complete the 'bounce' of sense information 371 * through buffers accessible via bus-space by 372 * copying it into the clients csio. 373 */ 374 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); 375 memcpy(&ccb->csio.sense_data, 376 ahc_get_sense_buf(ahc, scb), 377 (ahc_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK) 378 - ccb->csio.sense_resid); 379 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; 380 } 381 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 382 ahc_free_scb(ahc, scb); 383 xpt_done(ccb); 384 } 385 386 static void 387 ahc_action(struct cam_sim *sim, union ccb *ccb) 388 { 389 struct ahc_softc *ahc; 390 struct ahc_tmode_lstate *lstate; 391 u_int target_id; 392 u_int our_id; 393 long s; 394 395 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n")); 396 397 ahc = (struct ahc_softc *)cam_sim_softc(sim); 398 399 target_id = ccb->ccb_h.target_id; 400 our_id = SIM_SCSI_ID(ahc, sim); 401 402 switch (ccb->ccb_h.func_code) { 403 /* Common cases first */ 404 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 405 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ 406 { 407 struct ahc_tmode_tstate *tstate; 408 cam_status status; 409 410 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 411 &lstate, TRUE); 412 413 if (status != CAM_REQ_CMP) { 414 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 415 /* Response from the black hole device */ 416 tstate = NULL; 417 lstate = ahc->black_hole; 418 } else { 419 ccb->ccb_h.status = status; 420 xpt_done(ccb); 421 break; 422 } 423 } 424 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 425 426 ahc_lock(ahc, &s); 427 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 428 sim_links.sle); 429 ccb->ccb_h.status = CAM_REQ_INPROG; 430 if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0) 431 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 432 ahc_unlock(ahc, &s); 433 break; 434 } 435 436 /* 437 * The target_id represents the target we attempt to 438 * select. In target mode, this is the initiator of 439 * the original command. 440 */ 441 our_id = target_id; 442 target_id = ccb->csio.init_id; 443 /* FALLTHROUGH */ 444 } 445 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 446 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 447 { 448 struct scb *scb; 449 struct hardware_scb *hscb; 450 451 if ((ahc->flags & AHC_INITIATORROLE) == 0 452 && (ccb->ccb_h.func_code == XPT_SCSI_IO 453 || ccb->ccb_h.func_code == XPT_RESET_DEV)) { 454 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 455 xpt_done(ccb); 456 return; 457 } 458 459 /* 460 * get an scb to use. 461 */ 462 ahc_lock(ahc, &s); 463 if ((scb = ahc_get_scb(ahc)) == NULL) { 464 465 xpt_freeze_simq(sim, /*count*/1); 466 ahc->flags |= AHC_RESOURCE_SHORTAGE; 467 ahc_unlock(ahc, &s); 468 ccb->ccb_h.status = CAM_REQUEUE_REQ; 469 xpt_done(ccb); 470 return; 471 } 472 ahc_unlock(ahc, &s); 473 474 hscb = scb->hscb; 475 476 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, 477 ("start scb(%p)\n", scb)); 478 scb->io_ctx = ccb; 479 /* 480 * So we can find the SCB when an abort is requested 481 */ 482 ccb->ccb_h.ccb_scb_ptr = scb; 483 484 /* 485 * Put all the arguments for the xfer in the scb 486 */ 487 hscb->control = 0; 488 hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id); 489 hscb->lun = ccb->ccb_h.target_lun; 490 if (ccb->ccb_h.func_code == XPT_RESET_DEV) { 491 hscb->cdb_len = 0; 492 scb->flags |= SCB_DEVICE_RESET; 493 hscb->control |= MK_MESSAGE; 494 ahc_execute_scb(scb, NULL, 0, 0); 495 } else { 496 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 497 struct target_data *tdata; 498 499 tdata = &hscb->shared_data.tdata; 500 if (ahc->pending_device == lstate) 501 scb->flags |= SCB_TARGET_IMMEDIATE; 502 hscb->control |= TARGET_SCB; 503 tdata->target_phases = IDENTIFY_SEEN; 504 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 505 tdata->target_phases |= SPHASE_PENDING; 506 tdata->scsi_status = 507 ccb->csio.scsi_status; 508 } 509 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) 510 tdata->target_phases |= NO_DISCONNECT; 511 512 tdata->initiator_tag = ccb->csio.tag_id; 513 } 514 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) 515 hscb->control |= ccb->csio.tag_action; 516 517 ahc_setup_data(ahc, sim, &ccb->csio, scb); 518 } 519 break; 520 } 521 case XPT_NOTIFY_ACK: 522 case XPT_IMMED_NOTIFY: 523 { 524 struct ahc_tmode_tstate *tstate; 525 struct ahc_tmode_lstate *lstate; 526 cam_status status; 527 528 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 529 &lstate, TRUE); 530 531 if (status != CAM_REQ_CMP) { 532 ccb->ccb_h.status = status; 533 xpt_done(ccb); 534 break; 535 } 536 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 537 sim_links.sle); 538 ccb->ccb_h.status = CAM_REQ_INPROG; 539 ahc_send_lstate_events(ahc, lstate); 540 break; 541 } 542 case XPT_EN_LUN: /* Enable LUN as a target */ 543 ahc_handle_en_lun(ahc, sim, ccb); 544 xpt_done(ccb); 545 break; 546 case XPT_ABORT: /* Abort the specified CCB */ 547 { 548 ahc_abort_ccb(ahc, sim, ccb); 549 break; 550 } 551 case XPT_SET_TRAN_SETTINGS: 552 { 553 #ifdef AHC_NEW_TRAN_SETTINGS 554 struct ahc_devinfo devinfo; 555 struct ccb_trans_settings *cts; 556 struct ccb_trans_settings_scsi *scsi; 557 struct ccb_trans_settings_spi *spi; 558 struct ahc_initiator_tinfo *tinfo; 559 struct ahc_tmode_tstate *tstate; 560 uint16_t *discenable; 561 uint16_t *tagenable; 562 u_int update_type; 563 564 cts = &ccb->cts; 565 scsi = &cts->proto_specific.scsi; 566 spi = &cts->xport_specific.spi; 567 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 568 cts->ccb_h.target_id, 569 cts->ccb_h.target_lun, 570 SIM_CHANNEL(ahc, sim), 571 ROLE_UNKNOWN); 572 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 573 devinfo.our_scsiid, 574 devinfo.target, &tstate); 575 update_type = 0; 576 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 577 update_type |= AHC_TRANS_GOAL; 578 discenable = &tstate->discenable; 579 tagenable = &tstate->tagenable; 580 tinfo->curr.protocol_version = 581 cts->protocol_version; 582 tinfo->curr.transport_version = 583 cts->transport_version; 584 tinfo->goal.protocol_version = 585 cts->protocol_version; 586 tinfo->goal.transport_version = 587 cts->transport_version; 588 } else if (cts->type == CTS_TYPE_USER_SETTINGS) { 589 update_type |= AHC_TRANS_USER; 590 discenable = &ahc->user_discenable; 591 tagenable = &ahc->user_tagenable; 592 tinfo->user.protocol_version = 593 cts->protocol_version; 594 tinfo->user.transport_version = 595 cts->transport_version; 596 } else { 597 ccb->ccb_h.status = CAM_REQ_INVALID; 598 xpt_done(ccb); 599 break; 600 } 601 602 ahc_lock(ahc, &s); 603 604 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 605 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 606 *discenable |= devinfo.target_mask; 607 else 608 *discenable &= ~devinfo.target_mask; 609 } 610 611 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 612 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 613 *tagenable |= devinfo.target_mask; 614 else 615 *tagenable &= ~devinfo.target_mask; 616 } 617 618 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 619 ahc_validate_width(ahc, /*tinfo limit*/NULL, 620 &spi->bus_width, ROLE_UNKNOWN); 621 ahc_set_width(ahc, &devinfo, spi->bus_width, 622 update_type, /*paused*/FALSE); 623 } 624 625 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) { 626 if (update_type == AHC_TRANS_USER) 627 spi->ppr_options = tinfo->user.ppr_options; 628 else 629 spi->ppr_options = tinfo->goal.ppr_options; 630 } 631 632 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { 633 if (update_type == AHC_TRANS_USER) 634 spi->sync_offset = tinfo->user.offset; 635 else 636 spi->sync_offset = tinfo->goal.offset; 637 } 638 639 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { 640 if (update_type == AHC_TRANS_USER) 641 spi->sync_period = tinfo->user.period; 642 else 643 spi->sync_period = tinfo->goal.period; 644 } 645 646 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 647 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 648 struct ahc_syncrate *syncrate; 649 u_int maxsync; 650 651 if ((ahc->features & AHC_ULTRA2) != 0) 652 maxsync = AHC_SYNCRATE_DT; 653 else if ((ahc->features & AHC_ULTRA) != 0) 654 maxsync = AHC_SYNCRATE_ULTRA; 655 else 656 maxsync = AHC_SYNCRATE_FAST; 657 658 if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT) 659 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; 660 661 syncrate = ahc_find_syncrate(ahc, &spi->sync_period, 662 &spi->ppr_options, 663 maxsync); 664 ahc_validate_offset(ahc, /*tinfo limit*/NULL, 665 syncrate, &spi->sync_offset, 666 spi->bus_width, ROLE_UNKNOWN); 667 668 /* We use a period of 0 to represent async */ 669 if (spi->sync_offset == 0) { 670 spi->sync_period = 0; 671 spi->ppr_options = 0; 672 } 673 674 ahc_set_syncrate(ahc, &devinfo, syncrate, 675 spi->sync_period, spi->sync_offset, 676 spi->ppr_options, update_type, 677 /*paused*/FALSE); 678 } 679 ahc_unlock(ahc, &s); 680 ccb->ccb_h.status = CAM_REQ_CMP; 681 xpt_done(ccb); 682 #else 683 struct ahc_devinfo devinfo; 684 struct ccb_trans_settings *cts; 685 struct ahc_initiator_tinfo *tinfo; 686 struct ahc_tmode_tstate *tstate; 687 uint16_t *discenable; 688 uint16_t *tagenable; 689 u_int update_type; 690 long s; 691 692 cts = &ccb->cts; 693 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 694 cts->ccb_h.target_id, 695 cts->ccb_h.target_lun, 696 SIM_CHANNEL(ahc, sim), 697 ROLE_UNKNOWN); 698 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 699 devinfo.our_scsiid, 700 devinfo.target, &tstate); 701 update_type = 0; 702 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 703 update_type |= AHC_TRANS_GOAL; 704 discenable = &tstate->discenable; 705 tagenable = &tstate->tagenable; 706 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 707 update_type |= AHC_TRANS_USER; 708 discenable = &ahc->user_discenable; 709 tagenable = &ahc->user_tagenable; 710 } else { 711 ccb->ccb_h.status = CAM_REQ_INVALID; 712 xpt_done(ccb); 713 break; 714 } 715 716 ahc_lock(ahc, &s); 717 718 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 719 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 720 *discenable |= devinfo.target_mask; 721 else 722 *discenable &= ~devinfo.target_mask; 723 } 724 725 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 726 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 727 *tagenable |= devinfo.target_mask; 728 else 729 *tagenable &= ~devinfo.target_mask; 730 } 731 732 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 733 ahc_validate_width(ahc, /*tinfo limit*/NULL, 734 &cts->bus_width, ROLE_UNKNOWN); 735 ahc_set_width(ahc, &devinfo, cts->bus_width, 736 update_type, /*paused*/FALSE); 737 } 738 739 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { 740 if (update_type == AHC_TRANS_USER) 741 cts->sync_offset = tinfo->user.offset; 742 else 743 cts->sync_offset = tinfo->goal.offset; 744 } 745 746 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { 747 if (update_type == AHC_TRANS_USER) 748 cts->sync_period = tinfo->user.period; 749 else 750 cts->sync_period = tinfo->goal.period; 751 } 752 753 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 754 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 755 struct ahc_syncrate *syncrate; 756 u_int ppr_options; 757 u_int maxsync; 758 759 if ((ahc->features & AHC_ULTRA2) != 0) 760 maxsync = AHC_SYNCRATE_DT; 761 else if ((ahc->features & AHC_ULTRA) != 0) 762 maxsync = AHC_SYNCRATE_ULTRA; 763 else 764 maxsync = AHC_SYNCRATE_FAST; 765 766 ppr_options = 0; 767 if (cts->sync_period <= 9 768 && cts->bus_width == MSG_EXT_WDTR_BUS_16_BIT) 769 ppr_options = MSG_EXT_PPR_DT_REQ; 770 771 syncrate = ahc_find_syncrate(ahc, &cts->sync_period, 772 &ppr_options, 773 maxsync); 774 ahc_validate_offset(ahc, /*tinfo limit*/NULL, 775 syncrate, &cts->sync_offset, 776 MSG_EXT_WDTR_BUS_8_BIT, 777 ROLE_UNKNOWN); 778 779 /* We use a period of 0 to represent async */ 780 if (cts->sync_offset == 0) { 781 cts->sync_period = 0; 782 ppr_options = 0; 783 } 784 785 if (ppr_options == MSG_EXT_PPR_DT_REQ 786 && tinfo->user.transport_version >= 3) { 787 tinfo->goal.transport_version = 788 tinfo->user.transport_version; 789 tinfo->curr.transport_version = 790 tinfo->user.transport_version; 791 } 792 793 ahc_set_syncrate(ahc, &devinfo, syncrate, 794 cts->sync_period, cts->sync_offset, 795 ppr_options, update_type, 796 /*paused*/FALSE); 797 } 798 ahc_unlock(ahc, &s); 799 ccb->ccb_h.status = CAM_REQ_CMP; 800 xpt_done(ccb); 801 #endif 802 break; 803 } 804 case XPT_GET_TRAN_SETTINGS: 805 /* Get default/user set transfer settings for the target */ 806 { 807 808 ahc_lock(ahc, &s); 809 ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim), 810 SIM_CHANNEL(ahc, sim), &ccb->cts); 811 ahc_unlock(ahc, &s); 812 xpt_done(ccb); 813 break; 814 } 815 case XPT_CALC_GEOMETRY: 816 { 817 struct ccb_calc_geometry *ccg; 818 uint32_t size_mb; 819 uint32_t secs_per_cylinder; 820 int extended; 821 822 ccg = &ccb->ccg; 823 size_mb = ccg->volume_size 824 / ((1024L * 1024L) / ccg->block_size); 825 extended = SIM_IS_SCSIBUS_B(ahc, sim) 826 ? ahc->flags & AHC_EXTENDED_TRANS_B 827 : ahc->flags & AHC_EXTENDED_TRANS_A; 828 829 if (size_mb > 1024 && extended) { 830 ccg->heads = 255; 831 ccg->secs_per_track = 63; 832 } else { 833 ccg->heads = 64; 834 ccg->secs_per_track = 32; 835 } 836 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 837 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 838 ccb->ccb_h.status = CAM_REQ_CMP; 839 xpt_done(ccb); 840 break; 841 } 842 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 843 { 844 int found; 845 846 ahc_lock(ahc, &s); 847 found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim), 848 /*initiate reset*/TRUE); 849 ahc_unlock(ahc, &s); 850 if (bootverbose) { 851 xpt_print_path(SIM_PATH(ahc, sim)); 852 printf("SCSI bus reset delivered. " 853 "%d SCBs aborted.\n", found); 854 } 855 ccb->ccb_h.status = CAM_REQ_CMP; 856 xpt_done(ccb); 857 break; 858 } 859 case XPT_TERM_IO: /* Terminate the I/O process */ 860 /* XXX Implement */ 861 ccb->ccb_h.status = CAM_REQ_INVALID; 862 xpt_done(ccb); 863 break; 864 case XPT_PATH_INQ: /* Path routing inquiry */ 865 { 866 struct ccb_pathinq *cpi = &ccb->cpi; 867 868 cpi->version_num = 1; /* XXX??? */ 869 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 870 if ((ahc->features & AHC_WIDE) != 0) 871 cpi->hba_inquiry |= PI_WIDE_16; 872 if ((ahc->features & AHC_TARGETMODE) != 0) { 873 cpi->target_sprt = PIT_PROCESSOR 874 | PIT_DISCONNECT 875 | PIT_TERM_IO; 876 } else { 877 cpi->target_sprt = 0; 878 } 879 cpi->hba_misc = 0; 880 cpi->hba_eng_cnt = 0; 881 cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7; 882 cpi->max_lun = AHC_NUM_LUNS - 1; 883 if (SIM_IS_SCSIBUS_B(ahc, sim)) { 884 cpi->initiator_id = ahc->our_id_b; 885 if ((ahc->flags & AHC_RESET_BUS_B) == 0) 886 cpi->hba_misc |= PIM_NOBUSRESET; 887 } else { 888 cpi->initiator_id = ahc->our_id; 889 if ((ahc->flags & AHC_RESET_BUS_A) == 0) 890 cpi->hba_misc |= PIM_NOBUSRESET; 891 } 892 cpi->bus_id = cam_sim_bus(sim); 893 cpi->base_transfer_speed = 3300; 894 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 895 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 896 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 897 cpi->unit_number = cam_sim_unit(sim); 898 #ifdef AHC_NEW_TRAN_SETTINGS 899 cpi->protocol = PROTO_SCSI; 900 cpi->protocol_version = SCSI_REV_2; 901 cpi->transport = XPORT_SPI; 902 cpi->transport_version = 2; 903 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; 904 if ((ahc->features & AHC_DT) != 0) { 905 cpi->transport_version = 3; 906 cpi->xport_specific.spi.ppr_options = 907 SID_SPI_CLOCK_DT_ST; 908 } 909 #endif 910 cpi->ccb_h.status = CAM_REQ_CMP; 911 xpt_done(ccb); 912 break; 913 } 914 default: 915 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 916 xpt_done(ccb); 917 break; 918 } 919 } 920 921 static void 922 ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel, 923 struct ccb_trans_settings *cts) 924 { 925 #ifdef AHC_NEW_TRAN_SETTINGS 926 struct ahc_devinfo devinfo; 927 struct ccb_trans_settings_scsi *scsi; 928 struct ccb_trans_settings_spi *spi; 929 struct ahc_initiator_tinfo *targ_info; 930 struct ahc_tmode_tstate *tstate; 931 struct ahc_transinfo *tinfo; 932 933 scsi = &cts->proto_specific.scsi; 934 spi = &cts->xport_specific.spi; 935 ahc_compile_devinfo(&devinfo, our_id, 936 cts->ccb_h.target_id, 937 cts->ccb_h.target_lun, 938 channel, ROLE_UNKNOWN); 939 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 940 devinfo.our_scsiid, 941 devinfo.target, &tstate); 942 943 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 944 tinfo = &targ_info->curr; 945 else 946 tinfo = &targ_info->user; 947 948 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 949 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 950 if (cts->type == CTS_TYPE_USER_SETTINGS) { 951 if ((ahc->user_discenable & devinfo.target_mask) != 0) 952 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 953 954 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 955 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 956 } else { 957 if ((tstate->discenable & devinfo.target_mask) != 0) 958 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 959 960 if ((tstate->tagenable & devinfo.target_mask) != 0) 961 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 962 } 963 cts->protocol_version = tinfo->protocol_version; 964 cts->transport_version = tinfo->transport_version; 965 966 spi->sync_period = tinfo->period; 967 spi->sync_offset = tinfo->offset; 968 spi->bus_width = tinfo->width; 969 spi->ppr_options = tinfo->ppr_options; 970 971 cts->protocol = PROTO_SCSI; 972 cts->transport = XPORT_SPI; 973 spi->valid = CTS_SPI_VALID_SYNC_RATE 974 | CTS_SPI_VALID_SYNC_OFFSET 975 | CTS_SPI_VALID_BUS_WIDTH 976 | CTS_SPI_VALID_PPR_OPTIONS; 977 978 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 979 scsi->valid = CTS_SCSI_VALID_TQ; 980 spi->valid |= CTS_SPI_VALID_DISC; 981 } else { 982 scsi->valid = 0; 983 } 984 985 cts->ccb_h.status = CAM_REQ_CMP; 986 #else 987 struct ahc_devinfo devinfo; 988 struct ahc_initiator_tinfo *targ_info; 989 struct ahc_tmode_tstate *tstate; 990 struct ahc_transinfo *tinfo; 991 992 ahc_compile_devinfo(&devinfo, our_id, 993 cts->ccb_h.target_id, 994 cts->ccb_h.target_lun, 995 channel, ROLE_UNKNOWN); 996 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 997 devinfo.our_scsiid, 998 devinfo.target, &tstate); 999 1000 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 1001 tinfo = &targ_info->curr; 1002 else 1003 tinfo = &targ_info->user; 1004 1005 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 1006 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) { 1007 if ((ahc->user_discenable & devinfo.target_mask) != 0) 1008 cts->flags |= CCB_TRANS_DISC_ENB; 1009 1010 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 1011 cts->flags |= CCB_TRANS_TAG_ENB; 1012 } else { 1013 if ((tstate->discenable & devinfo.target_mask) != 0) 1014 cts->flags |= CCB_TRANS_DISC_ENB; 1015 1016 if ((tstate->tagenable & devinfo.target_mask) != 0) 1017 cts->flags |= CCB_TRANS_TAG_ENB; 1018 } 1019 cts->sync_period = tinfo->period; 1020 cts->sync_offset = tinfo->offset; 1021 cts->bus_width = tinfo->width; 1022 1023 cts->valid = CCB_TRANS_SYNC_RATE_VALID 1024 | CCB_TRANS_SYNC_OFFSET_VALID 1025 | CCB_TRANS_BUS_WIDTH_VALID; 1026 1027 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) 1028 cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID; 1029 1030 cts->ccb_h.status = CAM_REQ_CMP; 1031 #endif 1032 } 1033 1034 static void 1035 ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 1036 { 1037 struct ahc_softc *ahc; 1038 struct cam_sim *sim; 1039 1040 sim = (struct cam_sim *)callback_arg; 1041 ahc = (struct ahc_softc *)cam_sim_softc(sim); 1042 switch (code) { 1043 case AC_LOST_DEVICE: 1044 { 1045 struct ahc_devinfo devinfo; 1046 long s; 1047 1048 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 1049 xpt_path_target_id(path), 1050 xpt_path_lun_id(path), 1051 SIM_CHANNEL(ahc, sim), 1052 ROLE_UNKNOWN); 1053 1054 /* 1055 * Revert to async/narrow transfers 1056 * for the next device. 1057 */ 1058 ahc_lock(ahc, &s); 1059 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 1060 AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE); 1061 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 1062 /*period*/0, /*offset*/0, /*ppr_options*/0, 1063 AHC_TRANS_GOAL|AHC_TRANS_CUR, 1064 /*paused*/FALSE); 1065 ahc_unlock(ahc, &s); 1066 break; 1067 } 1068 default: 1069 break; 1070 } 1071 } 1072 1073 static void 1074 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, 1075 int error) 1076 { 1077 struct scb *scb; 1078 union ccb *ccb; 1079 struct ahc_softc *ahc; 1080 struct ahc_initiator_tinfo *tinfo; 1081 struct ahc_tmode_tstate *tstate; 1082 u_int mask; 1083 long s; 1084 1085 scb = (struct scb *)arg; 1086 ccb = scb->io_ctx; 1087 ahc = scb->ahc_softc; 1088 1089 if (error != 0) { 1090 if (error == EFBIG) 1091 ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG); 1092 else 1093 ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR); 1094 if (nsegments != 0) 1095 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 1096 ahc_lock(ahc, &s); 1097 ahc_free_scb(ahc, scb); 1098 ahc_unlock(ahc, &s); 1099 xpt_done(ccb); 1100 return; 1101 } 1102 if (nsegments != 0) { 1103 struct ahc_dma_seg *sg; 1104 bus_dma_segment_t *end_seg; 1105 bus_dmasync_op_t op; 1106 1107 end_seg = dm_segs + nsegments; 1108 1109 /* Copy the segments into our SG list */ 1110 sg = scb->sg_list; 1111 while (dm_segs < end_seg) { 1112 uint32_t len; 1113 1114 sg->addr = ahc_htole32(dm_segs->ds_addr); 1115 len = dm_segs->ds_len 1116 | ((dm_segs->ds_addr >> 8) & 0x7F000000); 1117 sg->len = ahc_htole32(len); 1118 sg++; 1119 dm_segs++; 1120 } 1121 1122 /* 1123 * Note where to find the SG entries in bus space. 1124 * We also set the full residual flag which the 1125 * sequencer will clear as soon as a data transfer 1126 * occurs. 1127 */ 1128 scb->hscb->sgptr = ahc_htole32(scb->sg_list_phys|SG_FULL_RESID); 1129 1130 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1131 op = BUS_DMASYNC_PREREAD; 1132 else 1133 op = BUS_DMASYNC_PREWRITE; 1134 1135 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 1136 1137 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1138 struct target_data *tdata; 1139 1140 tdata = &scb->hscb->shared_data.tdata; 1141 tdata->target_phases |= DPHASE_PENDING; 1142 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1143 tdata->data_phase = P_DATAOUT; 1144 else 1145 tdata->data_phase = P_DATAIN; 1146 1147 /* 1148 * If the transfer is of an odd length and in the 1149 * "in" direction (scsi->HostBus), then it may 1150 * trigger a bug in the 'WideODD' feature of 1151 * non-Ultra2 chips. Force the total data-length 1152 * to be even by adding an extra, 1 byte, SG, 1153 * element. We do this even if we are not currently 1154 * negotiated wide as negotiation could occur before 1155 * this command is executed. 1156 */ 1157 if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0 1158 && (ccb->csio.dxfer_len & 0x1) != 0 1159 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1160 1161 nsegments++; 1162 if (nsegments > AHC_NSEG) { 1163 1164 ahc_set_transaction_status(scb, 1165 CAM_REQ_TOO_BIG); 1166 bus_dmamap_unload(ahc->buffer_dmat, 1167 scb->dmamap); 1168 ahc_lock(ahc, &s); 1169 ahc_free_scb(ahc, scb); 1170 ahc_unlock(ahc, &s); 1171 xpt_done(ccb); 1172 return; 1173 } 1174 sg->addr = ahc_htole32(ahc->dma_bug_buf); 1175 sg->len = ahc_htole32(1); 1176 sg++; 1177 } 1178 } 1179 sg--; 1180 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); 1181 1182 /* Copy the first SG into the "current" data pointer area */ 1183 scb->hscb->dataptr = scb->sg_list->addr; 1184 scb->hscb->datacnt = scb->sg_list->len; 1185 } else { 1186 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); 1187 scb->hscb->dataptr = 0; 1188 scb->hscb->datacnt = 0; 1189 } 1190 1191 scb->sg_count = nsegments; 1192 1193 ahc_lock(ahc, &s); 1194 1195 /* 1196 * Last time we need to check if this SCB needs to 1197 * be aborted. 1198 */ 1199 if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) { 1200 if (nsegments != 0) 1201 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 1202 ahc_free_scb(ahc, scb); 1203 ahc_unlock(ahc, &s); 1204 xpt_done(ccb); 1205 return; 1206 } 1207 1208 tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid), 1209 SCSIID_OUR_ID(scb->hscb->scsiid), 1210 SCSIID_TARGET(ahc, scb->hscb->scsiid), 1211 &tstate); 1212 1213 mask = SCB_GET_TARGET_MASK(ahc, scb); 1214 scb->hscb->scsirate = tinfo->scsirate; 1215 scb->hscb->scsioffset = tinfo->curr.offset; 1216 if ((tstate->ultraenb & mask) != 0) 1217 scb->hscb->control |= ULTRAENB; 1218 1219 if ((tstate->discenable & mask) != 0 1220 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 1221 scb->hscb->control |= DISCENB; 1222 1223 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0 1224 && (tinfo->goal.width != 0 1225 || tinfo->goal.period != 0 1226 || tinfo->goal.ppr_options != 0)) { 1227 scb->flags |= SCB_NEGOTIATE; 1228 scb->hscb->control |= MK_MESSAGE; 1229 } else if ((tstate->auto_negotiate & mask) != 0) { 1230 scb->flags |= SCB_AUTO_NEGOTIATE; 1231 scb->hscb->control |= MK_MESSAGE; 1232 } 1233 1234 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); 1235 1236 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1237 1238 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1239 uint64_t time; 1240 1241 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1242 ccb->ccb_h.timeout = 5 * 1000; 1243 1244 time = ccb->ccb_h.timeout; 1245 time *= hz; 1246 time /= 1000; 1247 ccb->ccb_h.timeout_ch = 1248 timeout(ahc_timeout, (caddr_t)scb, time); 1249 } 1250 1251 /* 1252 * We only allow one untagged transaction 1253 * per target in the initiator role unless 1254 * we are storing a full busy target *lun* 1255 * table in SCB space. 1256 */ 1257 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 1258 && (ahc->flags & AHC_SCB_BTT) == 0) { 1259 struct scb_tailq *untagged_q; 1260 int target_offset; 1261 1262 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 1263 untagged_q = &(ahc->untagged_queues[target_offset]); 1264 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); 1265 scb->flags |= SCB_UNTAGGEDQ; 1266 if (TAILQ_FIRST(untagged_q) != scb) { 1267 ahc_unlock(ahc, &s); 1268 return; 1269 } 1270 } 1271 scb->flags |= SCB_ACTIVE; 1272 1273 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 1274 /* Define a mapping from our tag to the SCB. */ 1275 ahc->scb_data->scbindex[scb->hscb->tag] = scb; 1276 ahc_pause(ahc); 1277 if ((ahc->flags & AHC_PAGESCBS) == 0) 1278 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 1279 ahc_outb(ahc, SCB_TAG, scb->hscb->tag); 1280 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 1281 ahc_unpause(ahc); 1282 } else { 1283 ahc_queue_scb(ahc, scb); 1284 } 1285 1286 ahc_unlock(ahc, &s); 1287 } 1288 1289 static void 1290 ahc_poll(struct cam_sim *sim) 1291 { 1292 ahc_intr(cam_sim_softc(sim)); 1293 } 1294 1295 static void 1296 ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim, 1297 struct ccb_scsiio *csio, struct scb *scb) 1298 { 1299 struct hardware_scb *hscb; 1300 struct ccb_hdr *ccb_h; 1301 1302 hscb = scb->hscb; 1303 ccb_h = &csio->ccb_h; 1304 1305 csio->resid = 0; 1306 csio->sense_resid = 0; 1307 if (ccb_h->func_code == XPT_SCSI_IO) { 1308 hscb->cdb_len = csio->cdb_len; 1309 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 1310 1311 if (hscb->cdb_len > sizeof(hscb->cdb32) 1312 || (ccb_h->flags & CAM_CDB_PHYS) != 0) { 1313 u_long s; 1314 1315 ahc_set_transaction_status(scb, 1316 CAM_REQ_INVALID); 1317 ahc_lock(ahc, &s); 1318 ahc_free_scb(ahc, scb); 1319 ahc_unlock(ahc, &s); 1320 xpt_done((union ccb *)csio); 1321 return; 1322 } 1323 if (hscb->cdb_len > 12) { 1324 memcpy(hscb->cdb32, 1325 csio->cdb_io.cdb_ptr, 1326 hscb->cdb_len); 1327 scb->flags |= SCB_CDB32_PTR; 1328 } else { 1329 memcpy(hscb->shared_data.cdb, 1330 csio->cdb_io.cdb_ptr, 1331 hscb->cdb_len); 1332 } 1333 } else { 1334 if (hscb->cdb_len > 12) { 1335 memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes, 1336 hscb->cdb_len); 1337 scb->flags |= SCB_CDB32_PTR; 1338 } else { 1339 memcpy(hscb->shared_data.cdb, 1340 csio->cdb_io.cdb_bytes, 1341 hscb->cdb_len); 1342 } 1343 } 1344 } 1345 1346 /* Only use S/G if there is a transfer */ 1347 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1348 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 1349 /* We've been given a pointer to a single buffer */ 1350 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 1351 int s; 1352 int error; 1353 1354 s = splsoftvm(); 1355 error = bus_dmamap_load(ahc->buffer_dmat, 1356 scb->dmamap, 1357 csio->data_ptr, 1358 csio->dxfer_len, 1359 ahc_execute_scb, 1360 scb, /*flags*/0); 1361 if (error == EINPROGRESS) { 1362 /* 1363 * So as to maintain ordering, 1364 * freeze the controller queue 1365 * until our mapping is 1366 * returned. 1367 */ 1368 xpt_freeze_simq(sim, 1369 /*count*/1); 1370 scb->io_ctx->ccb_h.status |= 1371 CAM_RELEASE_SIMQ; 1372 } 1373 splx(s); 1374 } else { 1375 struct bus_dma_segment seg; 1376 1377 /* Pointer to physical buffer */ 1378 if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE) 1379 panic("ahc_setup_data - Transfer size " 1380 "larger than can device max"); 1381 1382 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1383 seg.ds_len = csio->dxfer_len; 1384 ahc_execute_scb(scb, &seg, 1, 0); 1385 } 1386 } else { 1387 struct bus_dma_segment *segs; 1388 1389 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 1390 panic("ahc_setup_data - Physical segment " 1391 "pointers unsupported"); 1392 1393 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 1394 panic("ahc_setup_data - Virtual segment " 1395 "addresses unsupported"); 1396 1397 /* Just use the segments provided */ 1398 segs = (struct bus_dma_segment *)csio->data_ptr; 1399 ahc_execute_scb(scb, segs, csio->sglist_cnt, 0); 1400 } 1401 } else { 1402 ahc_execute_scb(scb, NULL, 0, 0); 1403 } 1404 } 1405 1406 static void 1407 ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) { 1408 1409 if ((scb->flags & SCB_RECOVERY_SCB) == 0) { 1410 struct scb *list_scb; 1411 1412 scb->flags |= SCB_RECOVERY_SCB; 1413 1414 /* 1415 * Take all queued, but not sent SCBs out of the equation. 1416 * Also ensure that no new CCBs are queued to us while we 1417 * try to fix this problem. 1418 */ 1419 if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 1420 xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1); 1421 scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; 1422 } 1423 1424 /* 1425 * Go through all of our pending SCBs and remove 1426 * any scheduled timeouts for them. We will reschedule 1427 * them after we've successfully fixed this problem. 1428 */ 1429 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { 1430 union ccb *ccb; 1431 1432 ccb = list_scb->io_ctx; 1433 untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch); 1434 } 1435 } 1436 } 1437 1438 void 1439 ahc_timeout(void *arg) 1440 { 1441 struct scb *scb; 1442 struct ahc_softc *ahc; 1443 long s; 1444 int found; 1445 u_int last_phase; 1446 int target; 1447 int lun; 1448 int i; 1449 char channel; 1450 1451 scb = (struct scb *)arg; 1452 ahc = (struct ahc_softc *)scb->ahc_softc; 1453 1454 ahc_lock(ahc, &s); 1455 1456 ahc_pause_and_flushwork(ahc); 1457 1458 if ((scb->flags & SCB_ACTIVE) == 0) { 1459 /* Previous timeout took care of me already */ 1460 printf("%s: Timedout SCB already complete. " 1461 "Interrupts may not be functioning.\n", ahc_name(ahc)); 1462 ahc_unpause(ahc); 1463 ahc_unlock(ahc, &s); 1464 return; 1465 } 1466 1467 target = SCB_GET_TARGET(ahc, scb); 1468 channel = SCB_GET_CHANNEL(ahc, scb); 1469 lun = SCB_GET_LUN(scb); 1470 1471 ahc_print_path(ahc, scb); 1472 printf("SCB 0x%x - timed out\n", scb->hscb->tag); 1473 ahc_dump_card_state(ahc); 1474 last_phase = ahc_inb(ahc, LASTPHASE); 1475 if (scb->sg_count > 0) { 1476 for (i = 0; i < scb->sg_count; i++) { 1477 printf("sg[%d] - Addr 0x%x : Length %d\n", 1478 i, 1479 scb->sg_list[i].addr, 1480 scb->sg_list[i].len & AHC_SG_LEN_MASK); 1481 } 1482 } 1483 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { 1484 /* 1485 * Been down this road before. 1486 * Do a full bus reset. 1487 */ 1488 bus_reset: 1489 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 1490 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE); 1491 printf("%s: Issued Channel %c Bus Reset. " 1492 "%d SCBs aborted\n", ahc_name(ahc), channel, found); 1493 } else { 1494 /* 1495 * If we are a target, transition to bus free and report 1496 * the timeout. 1497 * 1498 * The target/initiator that is holding up the bus may not 1499 * be the same as the one that triggered this timeout 1500 * (different commands have different timeout lengths). 1501 * If the bus is idle and we are actiing as the initiator 1502 * for this request, queue a BDR message to the timed out 1503 * target. Otherwise, if the timed out transaction is 1504 * active: 1505 * Initiator transaction: 1506 * Stuff the message buffer with a BDR message and assert 1507 * ATN in the hopes that the target will let go of the bus 1508 * and go to the mesgout phase. If this fails, we'll 1509 * get another timeout 2 seconds later which will attempt 1510 * a bus reset. 1511 * 1512 * Target transaction: 1513 * Transition to BUS FREE and report the error. 1514 * It's good to be the target! 1515 */ 1516 u_int active_scb_index; 1517 u_int saved_scbptr; 1518 1519 saved_scbptr = ahc_inb(ahc, SCBPTR); 1520 active_scb_index = ahc_inb(ahc, SCB_TAG); 1521 1522 if (last_phase != P_BUSFREE 1523 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) != 0 1524 && (active_scb_index < ahc->scb_data->numscbs)) { 1525 struct scb *active_scb; 1526 1527 /* 1528 * If the active SCB is not us, assume that 1529 * the active SCB has a longer timeout than 1530 * the timedout SCB, and wait for the active 1531 * SCB to timeout. 1532 */ 1533 active_scb = ahc_lookup_scb(ahc, active_scb_index); 1534 if (active_scb != scb) { 1535 struct ccb_hdr *ccbh; 1536 uint64_t newtimeout; 1537 1538 ahc_print_path(ahc, scb); 1539 printf("Other SCB Timeout%s", 1540 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 1541 ? " again\n" : "\n"); 1542 scb->flags |= SCB_OTHERTCL_TIMEOUT; 1543 newtimeout = 1544 MAX(active_scb->io_ctx->ccb_h.timeout, 1545 scb->io_ctx->ccb_h.timeout); 1546 newtimeout *= hz; 1547 newtimeout /= 1000; 1548 ccbh = &scb->io_ctx->ccb_h; 1549 scb->io_ctx->ccb_h.timeout_ch = 1550 timeout(ahc_timeout, scb, newtimeout); 1551 ahc_unpause(ahc); 1552 ahc_unlock(ahc, &s); 1553 return; 1554 } 1555 1556 /* It's us */ 1557 if ((scb->hscb->control & TARGET_SCB) != 0) { 1558 1559 /* 1560 * Send back any queued up transactions 1561 * and properly record the error condition. 1562 */ 1563 ahc_abort_scbs(ahc, SCB_GET_TARGET(ahc, scb), 1564 SCB_GET_CHANNEL(ahc, scb), 1565 SCB_GET_LUN(scb), 1566 scb->hscb->tag, 1567 ROLE_TARGET, 1568 CAM_CMD_TIMEOUT); 1569 1570 /* Will clear us from the bus */ 1571 ahc_restart(ahc); 1572 ahc_unlock(ahc, &s); 1573 return; 1574 } 1575 1576 ahc_set_recoveryscb(ahc, active_scb); 1577 ahc_outb(ahc, MSG_OUT, HOST_MSG); 1578 ahc_outb(ahc, SCSISIGO, last_phase|ATNO); 1579 ahc_print_path(ahc, active_scb); 1580 printf("BDR message in message buffer\n"); 1581 active_scb->flags |= SCB_DEVICE_RESET; 1582 active_scb->io_ctx->ccb_h.timeout_ch = 1583 timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz); 1584 ahc_unpause(ahc); 1585 } else { 1586 int disconnected; 1587 1588 /* XXX Shouldn't panic. Just punt instead? */ 1589 if ((scb->hscb->control & TARGET_SCB) != 0) 1590 panic("Timed-out target SCB but bus idle"); 1591 1592 if (last_phase != P_BUSFREE 1593 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { 1594 /* XXX What happened to the SCB? */ 1595 /* Hung target selection. Goto busfree */ 1596 printf("%s: Hung target selection\n", 1597 ahc_name(ahc)); 1598 ahc_restart(ahc); 1599 ahc_unlock(ahc, &s); 1600 return; 1601 } 1602 1603 if (ahc_search_qinfifo(ahc, target, channel, lun, 1604 scb->hscb->tag, ROLE_INITIATOR, 1605 /*status*/0, SEARCH_COUNT) > 0) { 1606 disconnected = FALSE; 1607 } else { 1608 disconnected = TRUE; 1609 } 1610 1611 if (disconnected) { 1612 1613 ahc_set_recoveryscb(ahc, scb); 1614 /* 1615 * Actually re-queue this SCB in an attempt 1616 * to select the device before it reconnects. 1617 * In either case (selection or reselection), 1618 * we will now issue a target reset to the 1619 * timed-out device. 1620 * 1621 * Set the MK_MESSAGE control bit indicating 1622 * that we desire to send a message. We 1623 * also set the disconnected flag since 1624 * in the paging case there is no guarantee 1625 * that our SCB control byte matches the 1626 * version on the card. We don't want the 1627 * sequencer to abort the command thinking 1628 * an unsolicited reselection occurred. 1629 */ 1630 scb->hscb->control |= MK_MESSAGE|DISCONNECTED; 1631 scb->flags |= SCB_DEVICE_RESET; 1632 1633 /* 1634 * Remove any cached copy of this SCB in the 1635 * disconnected list in preparation for the 1636 * queuing of our abort SCB. We use the 1637 * same element in the SCB, SCB_NEXT, for 1638 * both the qinfifo and the disconnected list. 1639 */ 1640 ahc_search_disc_list(ahc, target, channel, 1641 lun, scb->hscb->tag, 1642 /*stop_on_first*/TRUE, 1643 /*remove*/TRUE, 1644 /*save_state*/FALSE); 1645 1646 /* 1647 * In the non-paging case, the sequencer will 1648 * never re-reference the in-core SCB. 1649 * To make sure we are notified during 1650 * reslection, set the MK_MESSAGE flag in 1651 * the card's copy of the SCB. 1652 */ 1653 if ((ahc->flags & AHC_PAGESCBS) == 0) { 1654 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 1655 ahc_outb(ahc, SCB_CONTROL, 1656 ahc_inb(ahc, SCB_CONTROL) 1657 | MK_MESSAGE); 1658 } 1659 1660 /* 1661 * Clear out any entries in the QINFIFO first 1662 * so we are the next SCB for this target 1663 * to run. 1664 */ 1665 ahc_search_qinfifo(ahc, 1666 SCB_GET_TARGET(ahc, scb), 1667 channel, SCB_GET_LUN(scb), 1668 SCB_LIST_NULL, 1669 ROLE_INITIATOR, 1670 CAM_REQUEUE_REQ, 1671 SEARCH_COMPLETE); 1672 ahc_print_path(ahc, scb); 1673 printf("Queuing a BDR SCB\n"); 1674 ahc_qinfifo_requeue_tail(ahc, scb); 1675 ahc_outb(ahc, SCBPTR, saved_scbptr); 1676 scb->io_ctx->ccb_h.timeout_ch = 1677 timeout(ahc_timeout, (caddr_t)scb, 2 * hz); 1678 ahc_unpause(ahc); 1679 } else { 1680 /* Go "immediatly" to the bus reset */ 1681 /* This shouldn't happen */ 1682 ahc_set_recoveryscb(ahc, scb); 1683 ahc_print_path(ahc, scb); 1684 printf("SCB %d: Immediate reset. " 1685 "Flags = 0x%x\n", scb->hscb->tag, 1686 scb->flags); 1687 goto bus_reset; 1688 } 1689 } 1690 } 1691 ahc_unlock(ahc, &s); 1692 } 1693 1694 static void 1695 ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 1696 { 1697 union ccb *abort_ccb; 1698 1699 abort_ccb = ccb->cab.abort_ccb; 1700 switch (abort_ccb->ccb_h.func_code) { 1701 case XPT_ACCEPT_TARGET_IO: 1702 case XPT_IMMED_NOTIFY: 1703 case XPT_CONT_TARGET_IO: 1704 { 1705 struct ahc_tmode_tstate *tstate; 1706 struct ahc_tmode_lstate *lstate; 1707 struct ccb_hdr_slist *list; 1708 cam_status status; 1709 1710 status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate, 1711 &lstate, TRUE); 1712 1713 if (status != CAM_REQ_CMP) { 1714 ccb->ccb_h.status = status; 1715 break; 1716 } 1717 1718 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 1719 list = &lstate->accept_tios; 1720 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) 1721 list = &lstate->immed_notifies; 1722 else 1723 list = NULL; 1724 1725 if (list != NULL) { 1726 struct ccb_hdr *curelm; 1727 int found; 1728 1729 curelm = SLIST_FIRST(list); 1730 found = 0; 1731 if (curelm == &abort_ccb->ccb_h) { 1732 found = 1; 1733 SLIST_REMOVE_HEAD(list, sim_links.sle); 1734 } else { 1735 while(curelm != NULL) { 1736 struct ccb_hdr *nextelm; 1737 1738 nextelm = 1739 SLIST_NEXT(curelm, sim_links.sle); 1740 1741 if (nextelm == &abort_ccb->ccb_h) { 1742 found = 1; 1743 SLIST_NEXT(curelm, 1744 sim_links.sle) = 1745 SLIST_NEXT(nextelm, 1746 sim_links.sle); 1747 break; 1748 } 1749 curelm = nextelm; 1750 } 1751 } 1752 1753 if (found) { 1754 abort_ccb->ccb_h.status = CAM_REQ_ABORTED; 1755 xpt_done(abort_ccb); 1756 ccb->ccb_h.status = CAM_REQ_CMP; 1757 } else { 1758 xpt_print_path(abort_ccb->ccb_h.path); 1759 printf("Not found\n"); 1760 ccb->ccb_h.status = CAM_PATH_INVALID; 1761 } 1762 break; 1763 } 1764 /* FALLTHROUGH */ 1765 } 1766 case XPT_SCSI_IO: 1767 /* XXX Fully implement the hard ones */ 1768 ccb->ccb_h.status = CAM_UA_ABORT; 1769 break; 1770 default: 1771 ccb->ccb_h.status = CAM_REQ_INVALID; 1772 break; 1773 } 1774 xpt_done(ccb); 1775 } 1776 1777 void 1778 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, 1779 u_int lun, ac_code code, void *opt_arg) 1780 { 1781 struct ccb_trans_settings cts; 1782 struct cam_path *path; 1783 void *arg; 1784 int error; 1785 1786 arg = NULL; 1787 error = ahc_create_path(ahc, channel, target, lun, &path); 1788 1789 if (error != CAM_REQ_CMP) 1790 return; 1791 1792 switch (code) { 1793 case AC_TRANSFER_NEG: 1794 { 1795 #ifdef AHC_NEW_TRAN_SETTINGS 1796 struct ccb_trans_settings_scsi *scsi; 1797 1798 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1799 scsi = &cts.proto_specific.scsi; 1800 #else 1801 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1802 #endif 1803 cts.ccb_h.path = path; 1804 cts.ccb_h.target_id = target; 1805 cts.ccb_h.target_lun = lun; 1806 ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id 1807 : ahc->our_id_b, 1808 channel, &cts); 1809 arg = &cts; 1810 #ifdef AHC_NEW_TRAN_SETTINGS 1811 scsi->valid &= ~CTS_SCSI_VALID_TQ; 1812 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 1813 #else 1814 cts.valid &= ~CCB_TRANS_TQ_VALID; 1815 cts.flags &= ~CCB_TRANS_TAG_ENB; 1816 #endif 1817 if (opt_arg == NULL) 1818 break; 1819 if (*((ahc_queue_alg *)opt_arg) == AHC_QUEUE_TAGGED) 1820 #ifdef AHC_NEW_TRAN_SETTINGS 1821 scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB; 1822 scsi->valid |= CTS_SCSI_VALID_TQ; 1823 #else 1824 cts.flags |= CCB_TRANS_TAG_ENB; 1825 cts.valid |= CCB_TRANS_TQ_VALID; 1826 #endif 1827 break; 1828 } 1829 case AC_SENT_BDR: 1830 case AC_BUS_RESET: 1831 break; 1832 default: 1833 panic("ahc_send_async: Unexpected async event"); 1834 } 1835 xpt_async(code, path, arg); 1836 xpt_free_path(path); 1837 } 1838 1839 void 1840 ahc_platform_set_tags(struct ahc_softc *ahc, 1841 struct ahc_devinfo *devinfo, int enable) 1842 { 1843 } 1844 1845 int 1846 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) 1847 { 1848 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF, 1849 M_NOWAIT | M_ZERO); 1850 if (ahc->platform_data == NULL) 1851 return (ENOMEM); 1852 return (0); 1853 } 1854 1855 void 1856 ahc_platform_free(struct ahc_softc *ahc) 1857 { 1858 struct ahc_platform_data *pdata; 1859 1860 pdata = ahc->platform_data; 1861 if (pdata != NULL) { 1862 if (pdata->regs != NULL) 1863 bus_release_resource(ahc->dev_softc, 1864 pdata->regs_res_type, 1865 pdata->regs_res_id, 1866 pdata->regs); 1867 1868 if (pdata->irq != NULL) 1869 bus_release_resource(ahc->dev_softc, 1870 pdata->irq_res_type, 1871 0, pdata->irq); 1872 1873 if (pdata->sim_b != NULL) { 1874 xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL); 1875 xpt_free_path(pdata->path_b); 1876 xpt_bus_deregister(cam_sim_path(pdata->sim_b)); 1877 cam_sim_free(pdata->sim_b, /*free_devq*/TRUE); 1878 } 1879 if (pdata->sim != NULL) { 1880 xpt_async(AC_LOST_DEVICE, pdata->path, NULL); 1881 xpt_free_path(pdata->path); 1882 xpt_bus_deregister(cam_sim_path(pdata->sim)); 1883 cam_sim_free(pdata->sim, /*free_devq*/TRUE); 1884 } 1885 if (pdata->eh != NULL) 1886 EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh); 1887 free(ahc->platform_data, M_DEVBUF); 1888 } 1889 } 1890 1891 int 1892 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc) 1893 { 1894 /* We don't sort softcs under FreeBSD so report equal always */ 1895 return (0); 1896 } 1897 1898 int 1899 ahc_detach(device_t dev) 1900 { 1901 struct ahc_softc *ahc; 1902 u_long s; 1903 1904 device_printf(dev, "detaching device\n"); 1905 ahc = device_get_softc(dev); 1906 ahc_lock(ahc, &s); 1907 bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih); 1908 ahc_unlock(ahc, &s); 1909 ahc_free(ahc); 1910 return (0); 1911 } 1912 1913 #if UNUSED 1914 static void 1915 ahc_dump_targcmd(struct target_cmd *cmd) 1916 { 1917 uint8_t *byte; 1918 uint8_t *last_byte; 1919 int i; 1920 1921 byte = &cmd->initiator_channel; 1922 /* Debugging info for received commands */ 1923 last_byte = &cmd[1].initiator_channel; 1924 1925 i = 0; 1926 while (byte < last_byte) { 1927 if (i == 0) 1928 printf("\t"); 1929 printf("%#x", *byte++); 1930 i++; 1931 if (i == 8) { 1932 printf("\n"); 1933 i = 0; 1934 } else { 1935 printf(", "); 1936 } 1937 } 1938 } 1939 #endif 1940 1941 static int 1942 ahc_modevent(module_t mod, int type, void *data) 1943 { 1944 /* XXX Deal with busy status on unload. */ 1945 return 0; 1946 } 1947 1948 static moduledata_t ahc_mod = { 1949 "ahc", 1950 ahc_modevent, 1951 NULL 1952 }; 1953 1954 DECLARE_MODULE(ahc, ahc_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 1955 MODULE_DEPEND(ahc, cam, 1, 1, 1); 1956 MODULE_VERSION(ahc, 1); 1957