1 /*- 2 * Bus independent FreeBSD shim for the aic79xx based Adaptec SCSI controllers 3 * 4 * Copyright (c) 1994-2002, 2004 Justin T. Gibbs. 5 * Copyright (c) 2001-2002 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * Alternatively, this software may be distributed under the terms of the 18 * GNU Public License ("GPL"). 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#35 $ 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <dev/aic7xxx/aic79xx_osm.h> 39 #include <dev/aic7xxx/aic79xx_inline.h> 40 41 #include <sys/kthread.h> 42 43 #include "opt_ddb.h" 44 #ifdef DDB 45 #include <ddb/ddb.h> 46 #endif 47 48 #ifndef AHD_TMODE_ENABLE 49 #define AHD_TMODE_ENABLE 0 50 #endif 51 52 #include <dev/aic7xxx/aic_osm_lib.c> 53 54 #define ccb_scb_ptr spriv_ptr0 55 56 #if 0 57 static void ahd_dump_targcmd(struct target_cmd *cmd); 58 #endif 59 static int ahd_modevent(module_t mod, int type, void *data); 60 static void ahd_action(struct cam_sim *sim, union ccb *ccb); 61 static void ahd_set_tran_settings(struct ahd_softc *ahd, 62 int our_id, char channel, 63 struct ccb_trans_settings *cts); 64 static void ahd_get_tran_settings(struct ahd_softc *ahd, 65 int our_id, char channel, 66 struct ccb_trans_settings *cts); 67 static void ahd_async(void *callback_arg, uint32_t code, 68 struct cam_path *path, void *arg); 69 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, 70 int nsegments, int error); 71 static void ahd_poll(struct cam_sim *sim); 72 static void ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim, 73 struct ccb_scsiio *csio, struct scb *scb); 74 static void ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, 75 union ccb *ccb); 76 static int ahd_create_path(struct ahd_softc *ahd, 77 char channel, u_int target, u_int lun, 78 struct cam_path **path); 79 80 static const char *ahd_sysctl_node_elements[] = { 81 "root", 82 "summary", 83 "debug" 84 }; 85 86 #ifndef NO_SYSCTL_DESCR 87 static const char *ahd_sysctl_node_descriptions[] = { 88 "root error collection for aic79xx controllers", 89 "summary collection for aic79xx controllers", 90 "debug collection for aic79xx controllers" 91 }; 92 #endif 93 94 static const char *ahd_sysctl_errors_elements[] = { 95 "Cerrors", 96 "Uerrors", 97 "Ferrors" 98 }; 99 100 #ifndef NO_SYSCTL_DESCR 101 static const char *ahd_sysctl_errors_descriptions[] = { 102 "Correctable errors", 103 "Uncorrectable errors", 104 "Fatal errors" 105 }; 106 #endif 107 108 static int 109 ahd_set_debugcounters(SYSCTL_HANDLER_ARGS) 110 { 111 struct ahd_softc *sc; 112 int error, tmpv; 113 114 tmpv = 0; 115 sc = arg1; 116 error = sysctl_handle_int(oidp, &tmpv, 0, req); 117 if (error != 0 || req->newptr == NULL) 118 return (error); 119 if (tmpv < 0 || tmpv >= AHD_ERRORS_NUMBER) 120 return (EINVAL); 121 sc->summerr[arg2] = tmpv; 122 return (0); 123 } 124 125 static int 126 ahd_clear_allcounters(SYSCTL_HANDLER_ARGS) 127 { 128 struct ahd_softc *sc; 129 int error, tmpv; 130 131 tmpv = 0; 132 sc = arg1; 133 error = sysctl_handle_int(oidp, &tmpv, 0, req); 134 if (error != 0 || req->newptr == NULL) 135 return (error); 136 if (tmpv != 0) 137 bzero(sc->summerr, sizeof(sc->summerr)); 138 return (0); 139 } 140 141 static int 142 ahd_create_path(struct ahd_softc *ahd, char channel, u_int target, 143 u_int lun, struct cam_path **path) 144 { 145 path_id_t path_id; 146 147 path_id = cam_sim_path(ahd->platform_data->sim); 148 return (xpt_create_path(path, /*periph*/NULL, 149 path_id, target, lun)); 150 } 151 152 void 153 ahd_sysctl(struct ahd_softc *ahd) 154 { 155 u_int i; 156 157 for (i = 0; i < AHD_SYSCTL_NUMBER; i++) 158 sysctl_ctx_init(&ahd->sysctl_ctx[i]); 159 160 ahd->sysctl_tree[AHD_SYSCTL_ROOT] = 161 SYSCTL_ADD_NODE(&ahd->sysctl_ctx[AHD_SYSCTL_ROOT], 162 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 163 device_get_nameunit(ahd->dev_softc), CTLFLAG_RD, 0, 164 ahd_sysctl_node_descriptions[AHD_SYSCTL_ROOT]); 165 SYSCTL_ADD_PROC(&ahd->sysctl_ctx[AHD_SYSCTL_ROOT], 166 SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_ROOT]), 167 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW, ahd, 168 0, ahd_clear_allcounters, "IU", 169 "Clear all counters"); 170 171 for (i = AHD_SYSCTL_SUMMARY; i < AHD_SYSCTL_NUMBER; i++) 172 ahd->sysctl_tree[i] = 173 SYSCTL_ADD_NODE(&ahd->sysctl_ctx[i], 174 SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_ROOT]), 175 OID_AUTO, ahd_sysctl_node_elements[i], 176 CTLFLAG_RD, 0, 177 ahd_sysctl_node_descriptions[i]); 178 179 for (i = AHD_ERRORS_CORRECTABLE; i < AHD_ERRORS_NUMBER; i++) { 180 SYSCTL_ADD_UINT(&ahd->sysctl_ctx[AHD_SYSCTL_SUMMARY], 181 SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_SUMMARY]), 182 OID_AUTO, ahd_sysctl_errors_elements[i], 183 CTLFLAG_RD, &ahd->summerr[i], i, 184 ahd_sysctl_errors_descriptions[i]); 185 SYSCTL_ADD_PROC(&ahd->sysctl_ctx[AHD_SYSCTL_DEBUG], 186 SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_DEBUG]), 187 OID_AUTO, ahd_sysctl_errors_elements[i], 188 CTLFLAG_RW | CTLTYPE_UINT, ahd, i, 189 ahd_set_debugcounters, "IU", 190 ahd_sysctl_errors_descriptions[i]); 191 } 192 } 193 194 int 195 ahd_map_int(struct ahd_softc *ahd) 196 { 197 int error; 198 199 /* Hook up our interrupt handler */ 200 error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq, 201 INTR_TYPE_CAM|INTR_MPSAFE, NULL, 202 ahd_platform_intr, ahd, &ahd->platform_data->ih); 203 if (error != 0) 204 device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n", 205 error); 206 return (error); 207 } 208 209 /* 210 * Attach all the sub-devices we can find 211 */ 212 int 213 ahd_attach(struct ahd_softc *ahd) 214 { 215 char ahd_info[256]; 216 struct ccb_setasync csa; 217 struct cam_devq *devq; 218 struct cam_sim *sim; 219 struct cam_path *path; 220 int count; 221 222 count = 0; 223 devq = NULL; 224 sim = NULL; 225 226 /* 227 * Create a thread to perform all recovery. 228 */ 229 if (ahd_spawn_recovery_thread(ahd) != 0) 230 goto fail; 231 232 ahd_controller_info(ahd, ahd_info); 233 printf("%s\n", ahd_info); 234 ahd_lock(ahd); 235 236 /* 237 * Create the device queue for our SIM(s). 238 */ 239 devq = cam_simq_alloc(AHD_MAX_QUEUE); 240 if (devq == NULL) 241 goto fail; 242 243 /* 244 * Construct our SIM entry 245 */ 246 sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd, 247 device_get_unit(ahd->dev_softc), 248 &ahd->platform_data->mtx, 1, /*XXX*/256, devq); 249 if (sim == NULL) { 250 cam_simq_free(devq); 251 goto fail; 252 } 253 254 if (xpt_bus_register(sim, ahd->dev_softc, /*bus_id*/0) != CAM_SUCCESS) { 255 cam_sim_free(sim, /*free_devq*/TRUE); 256 sim = NULL; 257 goto fail; 258 } 259 260 if (xpt_create_path(&path, /*periph*/NULL, 261 cam_sim_path(sim), CAM_TARGET_WILDCARD, 262 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 263 xpt_bus_deregister(cam_sim_path(sim)); 264 cam_sim_free(sim, /*free_devq*/TRUE); 265 sim = NULL; 266 goto fail; 267 } 268 269 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 270 csa.ccb_h.func_code = XPT_SASYNC_CB; 271 csa.event_enable = AC_LOST_DEVICE; 272 csa.callback = ahd_async; 273 csa.callback_arg = sim; 274 xpt_action((union ccb *)&csa); 275 count++; 276 277 fail: 278 ahd->platform_data->sim = sim; 279 ahd->platform_data->path = path; 280 ahd_unlock(ahd); 281 if (count != 0) { 282 /* We have to wait until after any system dumps... */ 283 ahd->platform_data->eh = 284 EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown, 285 ahd, SHUTDOWN_PRI_DEFAULT); 286 ahd_intr_enable(ahd, TRUE); 287 } 288 289 290 return (count); 291 } 292 293 /* 294 * Catch an interrupt from the adapter 295 */ 296 void 297 ahd_platform_intr(void *arg) 298 { 299 struct ahd_softc *ahd; 300 301 ahd = (struct ahd_softc *)arg; 302 ahd_lock(ahd); 303 ahd_intr(ahd); 304 ahd_unlock(ahd); 305 } 306 307 /* 308 * We have an scb which has been processed by the 309 * adaptor, now we look to see how the operation 310 * went. 311 */ 312 void 313 ahd_done(struct ahd_softc *ahd, struct scb *scb) 314 { 315 union ccb *ccb; 316 317 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE, 318 ("ahd_done - scb %d\n", SCB_GET_TAG(scb))); 319 320 ccb = scb->io_ctx; 321 LIST_REMOVE(scb, pending_links); 322 if ((scb->flags & SCB_TIMEDOUT) != 0) 323 LIST_REMOVE(scb, timedout_links); 324 325 callout_stop(&scb->io_timer); 326 327 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 328 bus_dmasync_op_t op; 329 330 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 331 op = BUS_DMASYNC_POSTREAD; 332 else 333 op = BUS_DMASYNC_POSTWRITE; 334 bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op); 335 bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap); 336 } 337 338 #ifdef AHD_TARGET_MODE 339 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 340 struct cam_path *ccb_path; 341 342 /* 343 * If we have finally disconnected, clean up our 344 * pending device state. 345 * XXX - There may be error states that cause where 346 * we will remain connected. 347 */ 348 ccb_path = ccb->ccb_h.path; 349 if (ahd->pending_device != NULL 350 && xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) { 351 352 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 353 ahd->pending_device = NULL; 354 } else { 355 xpt_print_path(ccb->ccb_h.path); 356 printf("Still disconnected\n"); 357 ahd_freeze_ccb(ccb); 358 } 359 } 360 361 if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) 362 ccb->ccb_h.status |= CAM_REQ_CMP; 363 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 364 ahd_free_scb(ahd, scb); 365 xpt_done(ccb); 366 return; 367 } 368 #endif 369 370 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 371 struct scb *list_scb; 372 373 ahd->scb_data.recovery_scbs--; 374 375 if (aic_get_transaction_status(scb) == CAM_BDR_SENT 376 || aic_get_transaction_status(scb) == CAM_REQ_ABORTED) 377 aic_set_transaction_status(scb, CAM_CMD_TIMEOUT); 378 379 if (ahd->scb_data.recovery_scbs == 0) { 380 /* 381 * All recovery actions have completed successfully, 382 * so reinstate the timeouts for all other pending 383 * commands. 384 */ 385 LIST_FOREACH(list_scb, 386 &ahd->pending_scbs, pending_links) { 387 388 aic_scb_timer_reset(list_scb, 389 aic_get_timeout(scb)); 390 } 391 392 ahd_print_path(ahd, scb); 393 printf("no longer in timeout, status = %x\n", 394 ccb->ccb_h.status); 395 } 396 } 397 398 /* Don't clobber any existing error state */ 399 if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) { 400 ccb->ccb_h.status |= CAM_REQ_CMP; 401 } else if ((scb->flags & SCB_SENSE) != 0) { 402 /* 403 * We performed autosense retrieval. 404 * 405 * Zero any sense not transferred by the 406 * device. The SCSI spec mandates that any 407 * untransfered data should be assumed to be 408 * zero. Complete the 'bounce' of sense information 409 * through buffers accessible via bus-space by 410 * copying it into the clients csio. 411 */ 412 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); 413 memcpy(&ccb->csio.sense_data, 414 ahd_get_sense_buf(ahd, scb), 415 /* XXX What size do we want to use??? */ 416 sizeof(ccb->csio.sense_data) 417 - ccb->csio.sense_resid); 418 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; 419 } else if ((scb->flags & SCB_PKT_SENSE) != 0) { 420 struct scsi_status_iu_header *siu; 421 u_int sense_len; 422 423 /* 424 * Copy only the sense data into the provided buffer. 425 */ 426 siu = (struct scsi_status_iu_header *)scb->sense_data; 427 sense_len = MIN(scsi_4btoul(siu->sense_length), 428 sizeof(ccb->csio.sense_data)); 429 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); 430 memcpy(&ccb->csio.sense_data, 431 ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu), 432 sense_len); 433 #ifdef AHD_DEBUG 434 if ((ahd_debug & AHD_SHOW_SENSE) != 0) { 435 uint8_t *sense_data = (uint8_t *)&ccb->csio.sense_data; 436 u_int i; 437 438 printf("Copied %d bytes of sense data offset %d:", 439 sense_len, SIU_SENSE_OFFSET(siu)); 440 for (i = 0; i < sense_len; i++) 441 printf(" 0x%x", *sense_data++); 442 printf("\n"); 443 } 444 #endif 445 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; 446 } 447 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 448 ahd_free_scb(ahd, scb); 449 xpt_done(ccb); 450 } 451 452 static void 453 ahd_action(struct cam_sim *sim, union ccb *ccb) 454 { 455 struct ahd_softc *ahd; 456 #ifdef AHD_TARGET_MODE 457 struct ahd_tmode_lstate *lstate; 458 #endif 459 u_int target_id; 460 u_int our_id; 461 462 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n")); 463 464 ahd = (struct ahd_softc *)cam_sim_softc(sim); 465 466 target_id = ccb->ccb_h.target_id; 467 our_id = SIM_SCSI_ID(ahd, sim); 468 469 switch (ccb->ccb_h.func_code) { 470 /* Common cases first */ 471 #ifdef AHD_TARGET_MODE 472 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 473 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ 474 { 475 struct ahd_tmode_tstate *tstate; 476 cam_status status; 477 478 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, 479 &lstate, TRUE); 480 481 if (status != CAM_REQ_CMP) { 482 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 483 /* Response from the black hole device */ 484 tstate = NULL; 485 lstate = ahd->black_hole; 486 } else { 487 ccb->ccb_h.status = status; 488 xpt_done(ccb); 489 break; 490 } 491 } 492 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 493 494 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 495 sim_links.sle); 496 ccb->ccb_h.status = CAM_REQ_INPROG; 497 if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0) 498 ahd_run_tqinfifo(ahd, /*paused*/FALSE); 499 break; 500 } 501 502 /* 503 * The target_id represents the target we attempt to 504 * select. In target mode, this is the initiator of 505 * the original command. 506 */ 507 our_id = target_id; 508 target_id = ccb->csio.init_id; 509 /* FALLTHROUGH */ 510 } 511 #endif 512 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 513 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 514 { 515 struct scb *scb; 516 struct hardware_scb *hscb; 517 struct ahd_initiator_tinfo *tinfo; 518 struct ahd_tmode_tstate *tstate; 519 u_int col_idx; 520 521 if ((ahd->flags & AHD_INITIATORROLE) == 0 522 && (ccb->ccb_h.func_code == XPT_SCSI_IO 523 || ccb->ccb_h.func_code == XPT_RESET_DEV)) { 524 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 525 xpt_done(ccb); 526 return; 527 } 528 529 /* 530 * get an scb to use. 531 */ 532 tinfo = ahd_fetch_transinfo(ahd, 'A', our_id, 533 target_id, &tstate); 534 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0 535 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0 536 || ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 537 col_idx = AHD_NEVER_COL_IDX; 538 } else { 539 col_idx = AHD_BUILD_COL_IDX(target_id, 540 ccb->ccb_h.target_lun); 541 } 542 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) { 543 544 xpt_freeze_simq(sim, /*count*/1); 545 ahd->flags |= AHD_RESOURCE_SHORTAGE; 546 ccb->ccb_h.status = CAM_REQUEUE_REQ; 547 xpt_done(ccb); 548 return; 549 } 550 551 hscb = scb->hscb; 552 553 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, 554 ("start scb(%p)\n", scb)); 555 scb->io_ctx = ccb; 556 /* 557 * So we can find the SCB when an abort is requested 558 */ 559 ccb->ccb_h.ccb_scb_ptr = scb; 560 561 /* 562 * Put all the arguments for the xfer in the scb 563 */ 564 hscb->control = 0; 565 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id); 566 hscb->lun = ccb->ccb_h.target_lun; 567 if (ccb->ccb_h.func_code == XPT_RESET_DEV) { 568 hscb->cdb_len = 0; 569 scb->flags |= SCB_DEVICE_RESET; 570 hscb->control |= MK_MESSAGE; 571 hscb->task_management = SIU_TASKMGMT_LUN_RESET; 572 ahd_execute_scb(scb, NULL, 0, 0); 573 } else { 574 #ifdef AHD_TARGET_MODE 575 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 576 struct target_data *tdata; 577 578 tdata = &hscb->shared_data.tdata; 579 if (ahd->pending_device == lstate) 580 scb->flags |= SCB_TARGET_IMMEDIATE; 581 hscb->control |= TARGET_SCB; 582 tdata->target_phases = 0; 583 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 584 tdata->target_phases |= SPHASE_PENDING; 585 tdata->scsi_status = 586 ccb->csio.scsi_status; 587 } 588 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) 589 tdata->target_phases |= NO_DISCONNECT; 590 591 tdata->initiator_tag = 592 ahd_htole16(ccb->csio.tag_id); 593 } 594 #endif 595 hscb->task_management = 0; 596 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) 597 hscb->control |= ccb->csio.tag_action; 598 599 ahd_setup_data(ahd, sim, &ccb->csio, scb); 600 } 601 break; 602 } 603 #ifdef AHD_TARGET_MODE 604 case XPT_NOTIFY_ACK: 605 case XPT_IMMED_NOTIFY: 606 { 607 struct ahd_tmode_tstate *tstate; 608 struct ahd_tmode_lstate *lstate; 609 cam_status status; 610 611 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, 612 &lstate, TRUE); 613 614 if (status != CAM_REQ_CMP) { 615 ccb->ccb_h.status = status; 616 xpt_done(ccb); 617 break; 618 } 619 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 620 sim_links.sle); 621 ccb->ccb_h.status = CAM_REQ_INPROG; 622 ahd_send_lstate_events(ahd, lstate); 623 break; 624 } 625 case XPT_EN_LUN: /* Enable LUN as a target */ 626 ahd_handle_en_lun(ahd, sim, ccb); 627 xpt_done(ccb); 628 break; 629 #endif 630 case XPT_ABORT: /* Abort the specified CCB */ 631 { 632 ahd_abort_ccb(ahd, sim, ccb); 633 break; 634 } 635 case XPT_SET_TRAN_SETTINGS: 636 { 637 ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim), 638 SIM_CHANNEL(ahd, sim), &ccb->cts); 639 xpt_done(ccb); 640 break; 641 } 642 case XPT_GET_TRAN_SETTINGS: 643 /* Get default/user set transfer settings for the target */ 644 { 645 ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim), 646 SIM_CHANNEL(ahd, sim), &ccb->cts); 647 xpt_done(ccb); 648 break; 649 } 650 case XPT_CALC_GEOMETRY: 651 { 652 aic_calc_geometry(&ccb->ccg, ahd->flags & AHD_EXTENDED_TRANS_A); 653 xpt_done(ccb); 654 break; 655 } 656 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 657 { 658 int found; 659 660 found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim), 661 /*initiate reset*/TRUE); 662 if (bootverbose) { 663 xpt_print_path(SIM_PATH(ahd, sim)); 664 printf("SCSI bus reset delivered. " 665 "%d SCBs aborted.\n", found); 666 } 667 ccb->ccb_h.status = CAM_REQ_CMP; 668 xpt_done(ccb); 669 break; 670 } 671 case XPT_TERM_IO: /* Terminate the I/O process */ 672 /* XXX Implement */ 673 ccb->ccb_h.status = CAM_REQ_INVALID; 674 xpt_done(ccb); 675 break; 676 case XPT_PATH_INQ: /* Path routing inquiry */ 677 { 678 struct ccb_pathinq *cpi = &ccb->cpi; 679 680 cpi->version_num = 1; /* XXX??? */ 681 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 682 if ((ahd->features & AHD_WIDE) != 0) 683 cpi->hba_inquiry |= PI_WIDE_16; 684 if ((ahd->features & AHD_TARGETMODE) != 0) { 685 cpi->target_sprt = PIT_PROCESSOR 686 | PIT_DISCONNECT 687 | PIT_TERM_IO; 688 } else { 689 cpi->target_sprt = 0; 690 } 691 cpi->hba_misc = 0; 692 cpi->hba_eng_cnt = 0; 693 cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7; 694 cpi->max_lun = AHD_NUM_LUNS_NONPKT - 1; 695 cpi->initiator_id = ahd->our_id; 696 if ((ahd->flags & AHD_RESET_BUS_A) == 0) { 697 cpi->hba_misc |= PIM_NOBUSRESET; 698 } 699 cpi->bus_id = cam_sim_bus(sim); 700 cpi->base_transfer_speed = 3300; 701 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 702 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 703 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 704 cpi->unit_number = cam_sim_unit(sim); 705 cpi->protocol = PROTO_SCSI; 706 cpi->protocol_version = SCSI_REV_2; 707 cpi->transport = XPORT_SPI; 708 cpi->transport_version = 4; 709 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST 710 | SID_SPI_IUS 711 | SID_SPI_QAS; 712 cpi->ccb_h.status = CAM_REQ_CMP; 713 xpt_done(ccb); 714 break; 715 } 716 default: 717 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 718 xpt_done(ccb); 719 break; 720 } 721 } 722 723 724 static void 725 ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel, 726 struct ccb_trans_settings *cts) 727 { 728 struct ahd_devinfo devinfo; 729 struct ccb_trans_settings_scsi *scsi; 730 struct ccb_trans_settings_spi *spi; 731 struct ahd_initiator_tinfo *tinfo; 732 struct ahd_tmode_tstate *tstate; 733 uint16_t *discenable; 734 uint16_t *tagenable; 735 u_int update_type; 736 737 scsi = &cts->proto_specific.scsi; 738 spi = &cts->xport_specific.spi; 739 ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim), 740 cts->ccb_h.target_id, 741 cts->ccb_h.target_lun, 742 SIM_CHANNEL(ahd, sim), 743 ROLE_UNKNOWN); 744 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, 745 devinfo.our_scsiid, 746 devinfo.target, &tstate); 747 update_type = 0; 748 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 749 update_type |= AHD_TRANS_GOAL; 750 discenable = &tstate->discenable; 751 tagenable = &tstate->tagenable; 752 tinfo->curr.protocol_version = cts->protocol_version; 753 tinfo->curr.transport_version = cts->transport_version; 754 tinfo->goal.protocol_version = cts->protocol_version; 755 tinfo->goal.transport_version = cts->transport_version; 756 } else if (cts->type == CTS_TYPE_USER_SETTINGS) { 757 update_type |= AHD_TRANS_USER; 758 discenable = &ahd->user_discenable; 759 tagenable = &ahd->user_tagenable; 760 tinfo->user.protocol_version = cts->protocol_version; 761 tinfo->user.transport_version = cts->transport_version; 762 } else { 763 cts->ccb_h.status = CAM_REQ_INVALID; 764 return; 765 } 766 767 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 768 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 769 *discenable |= devinfo.target_mask; 770 else 771 *discenable &= ~devinfo.target_mask; 772 } 773 774 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 775 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 776 *tagenable |= devinfo.target_mask; 777 else 778 *tagenable &= ~devinfo.target_mask; 779 } 780 781 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 782 ahd_validate_width(ahd, /*tinfo limit*/NULL, 783 &spi->bus_width, ROLE_UNKNOWN); 784 ahd_set_width(ahd, &devinfo, spi->bus_width, 785 update_type, /*paused*/FALSE); 786 } 787 788 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) { 789 if (update_type == AHD_TRANS_USER) 790 spi->ppr_options = tinfo->user.ppr_options; 791 else 792 spi->ppr_options = tinfo->goal.ppr_options; 793 } 794 795 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { 796 if (update_type == AHD_TRANS_USER) 797 spi->sync_offset = tinfo->user.offset; 798 else 799 spi->sync_offset = tinfo->goal.offset; 800 } 801 802 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { 803 if (update_type == AHD_TRANS_USER) 804 spi->sync_period = tinfo->user.period; 805 else 806 spi->sync_period = tinfo->goal.period; 807 } 808 809 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 810 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 811 u_int maxsync; 812 813 maxsync = AHD_SYNCRATE_MAX; 814 815 if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT) 816 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; 817 818 if ((*discenable & devinfo.target_mask) == 0) 819 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; 820 821 ahd_find_syncrate(ahd, &spi->sync_period, 822 &spi->ppr_options, maxsync); 823 ahd_validate_offset(ahd, /*tinfo limit*/NULL, 824 spi->sync_period, &spi->sync_offset, 825 spi->bus_width, ROLE_UNKNOWN); 826 827 /* We use a period of 0 to represent async */ 828 if (spi->sync_offset == 0) { 829 spi->sync_period = 0; 830 spi->ppr_options = 0; 831 } 832 833 ahd_set_syncrate(ahd, &devinfo, spi->sync_period, 834 spi->sync_offset, spi->ppr_options, 835 update_type, /*paused*/FALSE); 836 } 837 cts->ccb_h.status = CAM_REQ_CMP; 838 } 839 840 static void 841 ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel, 842 struct ccb_trans_settings *cts) 843 { 844 struct ahd_devinfo devinfo; 845 struct ccb_trans_settings_scsi *scsi; 846 struct ccb_trans_settings_spi *spi; 847 struct ahd_initiator_tinfo *targ_info; 848 struct ahd_tmode_tstate *tstate; 849 struct ahd_transinfo *tinfo; 850 851 scsi = &cts->proto_specific.scsi; 852 spi = &cts->xport_specific.spi; 853 ahd_compile_devinfo(&devinfo, our_id, 854 cts->ccb_h.target_id, 855 cts->ccb_h.target_lun, 856 channel, ROLE_UNKNOWN); 857 targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, 858 devinfo.our_scsiid, 859 devinfo.target, &tstate); 860 861 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 862 tinfo = &targ_info->curr; 863 else 864 tinfo = &targ_info->user; 865 866 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 867 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 868 if (cts->type == CTS_TYPE_USER_SETTINGS) { 869 if ((ahd->user_discenable & devinfo.target_mask) != 0) 870 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 871 872 if ((ahd->user_tagenable & devinfo.target_mask) != 0) 873 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 874 } else { 875 if ((tstate->discenable & devinfo.target_mask) != 0) 876 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 877 878 if ((tstate->tagenable & devinfo.target_mask) != 0) 879 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 880 } 881 cts->protocol_version = tinfo->protocol_version; 882 cts->transport_version = tinfo->transport_version; 883 884 spi->sync_period = tinfo->period; 885 spi->sync_offset = tinfo->offset; 886 spi->bus_width = tinfo->width; 887 spi->ppr_options = tinfo->ppr_options; 888 889 cts->protocol = PROTO_SCSI; 890 cts->transport = XPORT_SPI; 891 spi->valid = CTS_SPI_VALID_SYNC_RATE 892 | CTS_SPI_VALID_SYNC_OFFSET 893 | CTS_SPI_VALID_BUS_WIDTH 894 | CTS_SPI_VALID_PPR_OPTIONS; 895 896 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 897 scsi->valid = CTS_SCSI_VALID_TQ; 898 spi->valid |= CTS_SPI_VALID_DISC; 899 } else { 900 scsi->valid = 0; 901 } 902 903 cts->ccb_h.status = CAM_REQ_CMP; 904 } 905 906 static void 907 ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 908 { 909 struct ahd_softc *ahd; 910 struct cam_sim *sim; 911 912 sim = (struct cam_sim *)callback_arg; 913 ahd = (struct ahd_softc *)cam_sim_softc(sim); 914 switch (code) { 915 case AC_LOST_DEVICE: 916 { 917 struct ahd_devinfo devinfo; 918 919 ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim), 920 xpt_path_target_id(path), 921 xpt_path_lun_id(path), 922 SIM_CHANNEL(ahd, sim), 923 ROLE_UNKNOWN); 924 925 /* 926 * Revert to async/narrow transfers 927 * for the next device. 928 */ 929 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 930 AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE); 931 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, 932 /*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR, 933 /*paused*/FALSE); 934 break; 935 } 936 default: 937 break; 938 } 939 } 940 941 static void 942 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, 943 int error) 944 { 945 struct scb *scb; 946 union ccb *ccb; 947 struct ahd_softc *ahd; 948 struct ahd_initiator_tinfo *tinfo; 949 struct ahd_tmode_tstate *tstate; 950 u_int mask; 951 952 scb = (struct scb *)arg; 953 ccb = scb->io_ctx; 954 ahd = scb->ahd_softc; 955 956 if (error != 0) { 957 if (error == EFBIG) 958 aic_set_transaction_status(scb, CAM_REQ_TOO_BIG); 959 else 960 aic_set_transaction_status(scb, CAM_REQ_CMP_ERR); 961 if (nsegments != 0) 962 bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap); 963 ahd_free_scb(ahd, scb); 964 xpt_done(ccb); 965 return; 966 } 967 scb->sg_count = 0; 968 if (nsegments != 0) { 969 void *sg; 970 bus_dmasync_op_t op; 971 u_int i; 972 973 /* Copy the segments into our SG list */ 974 for (i = nsegments, sg = scb->sg_list; i > 0; i--) { 975 976 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr, 977 dm_segs->ds_len, 978 /*last*/i == 1); 979 dm_segs++; 980 } 981 982 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 983 op = BUS_DMASYNC_PREREAD; 984 else 985 op = BUS_DMASYNC_PREWRITE; 986 987 bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op); 988 989 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 990 struct target_data *tdata; 991 992 tdata = &scb->hscb->shared_data.tdata; 993 tdata->target_phases |= DPHASE_PENDING; 994 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 995 tdata->data_phase = P_DATAOUT; 996 else 997 tdata->data_phase = P_DATAIN; 998 } 999 } 1000 1001 /* 1002 * Last time we need to check if this SCB needs to 1003 * be aborted. 1004 */ 1005 if (aic_get_transaction_status(scb) != CAM_REQ_INPROG) { 1006 if (nsegments != 0) 1007 bus_dmamap_unload(ahd->buffer_dmat, 1008 scb->dmamap); 1009 ahd_free_scb(ahd, scb); 1010 xpt_done(ccb); 1011 return; 1012 } 1013 1014 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid), 1015 SCSIID_OUR_ID(scb->hscb->scsiid), 1016 SCSIID_TARGET(ahd, scb->hscb->scsiid), 1017 &tstate); 1018 1019 mask = SCB_GET_TARGET_MASK(ahd, scb); 1020 1021 if ((tstate->discenable & mask) != 0 1022 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 1023 scb->hscb->control |= DISCENB; 1024 1025 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { 1026 scb->flags |= SCB_PACKETIZED; 1027 if (scb->hscb->task_management != 0) 1028 scb->hscb->control &= ~MK_MESSAGE; 1029 } 1030 1031 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0 1032 && (tinfo->goal.width != 0 1033 || tinfo->goal.period != 0 1034 || tinfo->goal.ppr_options != 0)) { 1035 scb->flags |= SCB_NEGOTIATE; 1036 scb->hscb->control |= MK_MESSAGE; 1037 } else if ((tstate->auto_negotiate & mask) != 0) { 1038 scb->flags |= SCB_AUTO_NEGOTIATE; 1039 scb->hscb->control |= MK_MESSAGE; 1040 } 1041 1042 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); 1043 1044 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1045 1046 aic_scb_timer_start(scb); 1047 1048 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 1049 /* Define a mapping from our tag to the SCB. */ 1050 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; 1051 ahd_pause(ahd); 1052 ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); 1053 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); 1054 ahd_unpause(ahd); 1055 } else { 1056 ahd_queue_scb(ahd, scb); 1057 } 1058 1059 } 1060 1061 static void 1062 ahd_poll(struct cam_sim *sim) 1063 { 1064 ahd_intr(cam_sim_softc(sim)); 1065 } 1066 1067 static void 1068 ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim, 1069 struct ccb_scsiio *csio, struct scb *scb) 1070 { 1071 struct hardware_scb *hscb; 1072 struct ccb_hdr *ccb_h; 1073 1074 hscb = scb->hscb; 1075 ccb_h = &csio->ccb_h; 1076 1077 csio->resid = 0; 1078 csio->sense_resid = 0; 1079 if (ccb_h->func_code == XPT_SCSI_IO) { 1080 hscb->cdb_len = csio->cdb_len; 1081 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 1082 1083 if (hscb->cdb_len > MAX_CDB_LEN 1084 && (ccb_h->flags & CAM_CDB_PHYS) == 0) { 1085 1086 /* 1087 * Should CAM start to support CDB sizes 1088 * greater than 16 bytes, we could use 1089 * the sense buffer to store the CDB. 1090 */ 1091 aic_set_transaction_status(scb, 1092 CAM_REQ_INVALID); 1093 ahd_free_scb(ahd, scb); 1094 xpt_done((union ccb *)csio); 1095 return; 1096 } 1097 if ((ccb_h->flags & CAM_CDB_PHYS) != 0) { 1098 hscb->shared_data.idata.cdb_from_host.cdbptr = 1099 aic_htole64((uintptr_t)csio->cdb_io.cdb_ptr); 1100 hscb->shared_data.idata.cdb_from_host.cdblen = 1101 csio->cdb_len; 1102 hscb->cdb_len |= SCB_CDB_LEN_PTR; 1103 } else { 1104 memcpy(hscb->shared_data.idata.cdb, 1105 csio->cdb_io.cdb_ptr, 1106 hscb->cdb_len); 1107 } 1108 } else { 1109 if (hscb->cdb_len > MAX_CDB_LEN) { 1110 1111 aic_set_transaction_status(scb, 1112 CAM_REQ_INVALID); 1113 ahd_free_scb(ahd, scb); 1114 xpt_done((union ccb *)csio); 1115 return; 1116 } 1117 memcpy(hscb->shared_data.idata.cdb, 1118 csio->cdb_io.cdb_bytes, hscb->cdb_len); 1119 } 1120 } 1121 1122 /* Only use S/G if there is a transfer */ 1123 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1124 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 1125 /* We've been given a pointer to a single buffer */ 1126 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 1127 int s; 1128 int error; 1129 1130 s = splsoftvm(); 1131 error = bus_dmamap_load(ahd->buffer_dmat, 1132 scb->dmamap, 1133 csio->data_ptr, 1134 csio->dxfer_len, 1135 ahd_execute_scb, 1136 scb, /*flags*/0); 1137 if (error == EINPROGRESS) { 1138 /* 1139 * So as to maintain ordering, 1140 * freeze the controller queue 1141 * until our mapping is 1142 * returned. 1143 */ 1144 xpt_freeze_simq(sim, 1145 /*count*/1); 1146 scb->io_ctx->ccb_h.status |= 1147 CAM_RELEASE_SIMQ; 1148 } 1149 splx(s); 1150 } else { 1151 struct bus_dma_segment seg; 1152 1153 /* Pointer to physical buffer */ 1154 if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE) 1155 panic("ahd_setup_data - Transfer size " 1156 "larger than can device max"); 1157 1158 seg.ds_addr = 1159 (bus_addr_t)(vm_offset_t)csio->data_ptr; 1160 seg.ds_len = csio->dxfer_len; 1161 ahd_execute_scb(scb, &seg, 1, 0); 1162 } 1163 } else { 1164 struct bus_dma_segment *segs; 1165 1166 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 1167 panic("ahd_setup_data - Physical segment " 1168 "pointers unsupported"); 1169 1170 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 1171 panic("ahd_setup_data - Virtual segment " 1172 "addresses unsupported"); 1173 1174 /* Just use the segments provided */ 1175 segs = (struct bus_dma_segment *)csio->data_ptr; 1176 ahd_execute_scb(scb, segs, csio->sglist_cnt, 0); 1177 } 1178 } else { 1179 ahd_execute_scb(scb, NULL, 0, 0); 1180 } 1181 } 1182 1183 static void 1184 ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) 1185 { 1186 union ccb *abort_ccb; 1187 1188 abort_ccb = ccb->cab.abort_ccb; 1189 switch (abort_ccb->ccb_h.func_code) { 1190 #ifdef AHD_TARGET_MODE 1191 case XPT_ACCEPT_TARGET_IO: 1192 case XPT_IMMED_NOTIFY: 1193 case XPT_CONT_TARGET_IO: 1194 { 1195 struct ahd_tmode_tstate *tstate; 1196 struct ahd_tmode_lstate *lstate; 1197 struct ccb_hdr_slist *list; 1198 cam_status status; 1199 1200 status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate, 1201 &lstate, TRUE); 1202 1203 if (status != CAM_REQ_CMP) { 1204 ccb->ccb_h.status = status; 1205 break; 1206 } 1207 1208 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 1209 list = &lstate->accept_tios; 1210 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) 1211 list = &lstate->immed_notifies; 1212 else 1213 list = NULL; 1214 1215 if (list != NULL) { 1216 struct ccb_hdr *curelm; 1217 int found; 1218 1219 curelm = SLIST_FIRST(list); 1220 found = 0; 1221 if (curelm == &abort_ccb->ccb_h) { 1222 found = 1; 1223 SLIST_REMOVE_HEAD(list, sim_links.sle); 1224 } else { 1225 while(curelm != NULL) { 1226 struct ccb_hdr *nextelm; 1227 1228 nextelm = 1229 SLIST_NEXT(curelm, sim_links.sle); 1230 1231 if (nextelm == &abort_ccb->ccb_h) { 1232 found = 1; 1233 SLIST_NEXT(curelm, 1234 sim_links.sle) = 1235 SLIST_NEXT(nextelm, 1236 sim_links.sle); 1237 break; 1238 } 1239 curelm = nextelm; 1240 } 1241 } 1242 1243 if (found) { 1244 abort_ccb->ccb_h.status = CAM_REQ_ABORTED; 1245 xpt_done(abort_ccb); 1246 ccb->ccb_h.status = CAM_REQ_CMP; 1247 } else { 1248 xpt_print_path(abort_ccb->ccb_h.path); 1249 printf("Not found\n"); 1250 ccb->ccb_h.status = CAM_PATH_INVALID; 1251 } 1252 break; 1253 } 1254 /* FALLTHROUGH */ 1255 } 1256 #endif 1257 case XPT_SCSI_IO: 1258 /* XXX Fully implement the hard ones */ 1259 ccb->ccb_h.status = CAM_UA_ABORT; 1260 break; 1261 default: 1262 ccb->ccb_h.status = CAM_REQ_INVALID; 1263 break; 1264 } 1265 xpt_done(ccb); 1266 } 1267 1268 void 1269 ahd_send_async(struct ahd_softc *ahd, char channel, u_int target, 1270 u_int lun, ac_code code, void *opt_arg) 1271 { 1272 struct ccb_trans_settings cts; 1273 struct cam_path *path; 1274 void *arg; 1275 int error; 1276 1277 arg = NULL; 1278 error = ahd_create_path(ahd, channel, target, lun, &path); 1279 1280 if (error != CAM_REQ_CMP) 1281 return; 1282 1283 switch (code) { 1284 case AC_TRANSFER_NEG: 1285 { 1286 struct ccb_trans_settings_scsi *scsi; 1287 1288 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1289 scsi = &cts.proto_specific.scsi; 1290 cts.ccb_h.path = path; 1291 cts.ccb_h.target_id = target; 1292 cts.ccb_h.target_lun = lun; 1293 ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts); 1294 arg = &cts; 1295 scsi->valid &= ~CTS_SCSI_VALID_TQ; 1296 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 1297 if (opt_arg == NULL) 1298 break; 1299 if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED) 1300 scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB; 1301 scsi->valid |= CTS_SCSI_VALID_TQ; 1302 break; 1303 } 1304 case AC_SENT_BDR: 1305 case AC_BUS_RESET: 1306 break; 1307 default: 1308 panic("ahd_send_async: Unexpected async event"); 1309 } 1310 xpt_async(code, path, arg); 1311 xpt_free_path(path); 1312 } 1313 1314 void 1315 ahd_platform_set_tags(struct ahd_softc *ahd, 1316 struct ahd_devinfo *devinfo, int enable) 1317 { 1318 } 1319 1320 int 1321 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg) 1322 { 1323 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF, 1324 M_NOWAIT | M_ZERO); 1325 if (ahd->platform_data == NULL) 1326 return (ENOMEM); 1327 return (0); 1328 } 1329 1330 void 1331 ahd_platform_free(struct ahd_softc *ahd) 1332 { 1333 struct ahd_platform_data *pdata; 1334 1335 pdata = ahd->platform_data; 1336 if (pdata != NULL) { 1337 if (pdata->regs[0] != NULL) 1338 bus_release_resource(ahd->dev_softc, 1339 pdata->regs_res_type[0], 1340 pdata->regs_res_id[0], 1341 pdata->regs[0]); 1342 1343 if (pdata->regs[1] != NULL) 1344 bus_release_resource(ahd->dev_softc, 1345 pdata->regs_res_type[1], 1346 pdata->regs_res_id[1], 1347 pdata->regs[1]); 1348 1349 if (pdata->irq != NULL) 1350 bus_release_resource(ahd->dev_softc, 1351 pdata->irq_res_type, 1352 0, pdata->irq); 1353 1354 if (pdata->sim != NULL) { 1355 xpt_async(AC_LOST_DEVICE, pdata->path, NULL); 1356 xpt_free_path(pdata->path); 1357 xpt_bus_deregister(cam_sim_path(pdata->sim)); 1358 cam_sim_free(pdata->sim, /*free_devq*/TRUE); 1359 } 1360 if (pdata->eh != NULL) 1361 EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh); 1362 free(ahd->platform_data, M_DEVBUF); 1363 } 1364 } 1365 1366 int 1367 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd) 1368 { 1369 /* We don't sort softcs under FreeBSD so report equal always */ 1370 return (0); 1371 } 1372 1373 int 1374 ahd_detach(device_t dev) 1375 { 1376 struct ahd_softc *ahd; 1377 1378 device_printf(dev, "detaching device\n"); 1379 ahd = device_get_softc(dev); 1380 ahd_lock(ahd); 1381 TAILQ_REMOVE(&ahd_tailq, ahd, links); 1382 ahd_intr_enable(ahd, FALSE); 1383 bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih); 1384 ahd_unlock(ahd); 1385 ahd_free(ahd); 1386 return (0); 1387 } 1388 1389 #if 0 1390 static void 1391 ahd_dump_targcmd(struct target_cmd *cmd) 1392 { 1393 uint8_t *byte; 1394 uint8_t *last_byte; 1395 int i; 1396 1397 byte = &cmd->initiator_channel; 1398 /* Debugging info for received commands */ 1399 last_byte = &cmd[1].initiator_channel; 1400 1401 i = 0; 1402 while (byte < last_byte) { 1403 if (i == 0) 1404 printf("\t"); 1405 printf("%#x", *byte++); 1406 i++; 1407 if (i == 8) { 1408 printf("\n"); 1409 i = 0; 1410 } else { 1411 printf(", "); 1412 } 1413 } 1414 } 1415 #endif 1416 1417 static int 1418 ahd_modevent(module_t mod, int type, void *data) 1419 { 1420 /* XXX Deal with busy status on unload. */ 1421 /* XXX Deal with unknown events */ 1422 return 0; 1423 } 1424 1425 static moduledata_t ahd_mod = { 1426 "ahd", 1427 ahd_modevent, 1428 NULL 1429 }; 1430 1431 /********************************** DDB Hooks *********************************/ 1432 #ifdef DDB 1433 static struct ahd_softc *ahd_ddb_softc; 1434 static int ahd_ddb_paused; 1435 static int ahd_ddb_paused_on_entry; 1436 DB_COMMAND(ahd_sunit, ahd_ddb_sunit) 1437 { 1438 struct ahd_softc *list_ahd; 1439 1440 ahd_ddb_softc = NULL; 1441 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { 1442 if (list_ahd->unit == addr) 1443 ahd_ddb_softc = list_ahd; 1444 } 1445 if (ahd_ddb_softc == NULL) 1446 db_error("No matching softc found!\n"); 1447 } 1448 1449 DB_COMMAND(ahd_pause, ahd_ddb_pause) 1450 { 1451 if (ahd_ddb_softc == NULL) { 1452 db_error("Must set unit with ahd_sunit first!\n"); 1453 return; 1454 } 1455 if (ahd_ddb_paused == 0) { 1456 ahd_ddb_paused++; 1457 if (ahd_is_paused(ahd_ddb_softc)) { 1458 ahd_ddb_paused_on_entry++; 1459 return; 1460 } 1461 ahd_pause(ahd_ddb_softc); 1462 } 1463 } 1464 1465 DB_COMMAND(ahd_unpause, ahd_ddb_unpause) 1466 { 1467 if (ahd_ddb_softc == NULL) { 1468 db_error("Must set unit with ahd_sunit first!\n"); 1469 return; 1470 } 1471 if (ahd_ddb_paused != 0) { 1472 ahd_ddb_paused = 0; 1473 if (ahd_ddb_paused_on_entry) 1474 return; 1475 ahd_unpause(ahd_ddb_softc); 1476 } else if (ahd_ddb_paused_on_entry != 0) { 1477 /* Two unpauses to clear a paused on entry. */ 1478 ahd_ddb_paused_on_entry = 0; 1479 ahd_unpause(ahd_ddb_softc); 1480 } 1481 } 1482 1483 DB_COMMAND(ahd_in, ahd_ddb_in) 1484 { 1485 int c; 1486 int size; 1487 1488 if (ahd_ddb_softc == NULL) { 1489 db_error("Must set unit with ahd_sunit first!\n"); 1490 return; 1491 } 1492 if (have_addr == 0) 1493 return; 1494 1495 size = 1; 1496 while ((c = *modif++) != '\0') { 1497 switch (c) { 1498 case 'b': 1499 size = 1; 1500 break; 1501 case 'w': 1502 size = 2; 1503 break; 1504 case 'l': 1505 size = 4; 1506 break; 1507 } 1508 } 1509 1510 if (count <= 0) 1511 count = 1; 1512 while (--count >= 0) { 1513 db_printf("%04lx (M)%x: \t", (u_long)addr, 1514 ahd_inb(ahd_ddb_softc, MODE_PTR)); 1515 switch (size) { 1516 case 1: 1517 db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr)); 1518 break; 1519 case 2: 1520 db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr)); 1521 break; 1522 case 4: 1523 db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr)); 1524 break; 1525 } 1526 } 1527 } 1528 1529 DB_FUNC(ahd_out, ahd_ddb_out, db_cmd_table, CS_MORE, NULL) 1530 { 1531 db_expr_t old_value; 1532 db_expr_t new_value; 1533 int size; 1534 1535 if (ahd_ddb_softc == NULL) { 1536 db_error("Must set unit with ahd_sunit first!\n"); 1537 return; 1538 } 1539 1540 switch (modif[0]) { 1541 case '\0': 1542 case 'b': 1543 size = 1; 1544 break; 1545 case 'h': 1546 size = 2; 1547 break; 1548 case 'l': 1549 size = 4; 1550 break; 1551 default: 1552 db_error("Unknown size\n"); 1553 return; 1554 } 1555 1556 while (db_expression(&new_value)) { 1557 switch (size) { 1558 default: 1559 case 1: 1560 old_value = ahd_inb(ahd_ddb_softc, addr); 1561 ahd_outb(ahd_ddb_softc, addr, new_value); 1562 break; 1563 case 2: 1564 old_value = ahd_inw(ahd_ddb_softc, addr); 1565 ahd_outw(ahd_ddb_softc, addr, new_value); 1566 break; 1567 case 4: 1568 old_value = ahd_inl(ahd_ddb_softc, addr); 1569 ahd_outl(ahd_ddb_softc, addr, new_value); 1570 break; 1571 } 1572 db_printf("%04lx (M)%x: \t0x%lx\t=\t0x%lx", 1573 (u_long)addr, ahd_inb(ahd_ddb_softc, MODE_PTR), 1574 (u_long)old_value, (u_long)new_value); 1575 addr += size; 1576 } 1577 db_skip_to_eol(); 1578 } 1579 1580 DB_COMMAND(ahd_dump, ahd_ddb_dump) 1581 { 1582 if (ahd_ddb_softc == NULL) { 1583 db_error("Must set unit with ahd_sunit first!\n"); 1584 return; 1585 } 1586 ahd_dump_card_state(ahd_ddb_softc); 1587 } 1588 1589 #endif 1590 1591 1592 DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 1593 MODULE_DEPEND(ahd, cam, 1, 1, 1); 1594 MODULE_VERSION(ahd, 1); 1595